repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pycbc
|
pycbc-master/pycbc/inference/models/brute_marg.py
|
# Copyright (C) 2020 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that do brute force marginalization
using at the likelihood level.
"""
import math
import logging
import numpy
from multiprocessing import Pool
from scipy.special import logsumexp
from .gaussian_noise import BaseGaussianNoise
from .tools import draw_sample
_model = None
class likelihood_wrapper(object):
def __init__(self, model):
global _model
_model = model
def __call__(self, params):
global _model
_model.update(**params)
loglr = _model.loglr
return loglr, _model.current_stats
class BruteParallelGaussianMarginalize(BaseGaussianNoise):
name = "brute_parallel_gaussian_marginalize"
def __init__(self, variable_params,
cores=10,
base_model=None,
marginalize_phase=None,
**kwds):
super().__init__(variable_params, **kwds)
from pycbc.inference.models import models
self.model = models[base_model](variable_params, **kwds)
self.call = likelihood_wrapper(self.model)
# size of pool for each likelihood call
self.pool = Pool(int(cores))
# Only one for now, but can be easily extended
self.phase = None
if marginalize_phase:
samples = int(marginalize_phase)
self.phase = numpy.linspace(0, 2.0 * numpy.pi, samples)
@property
def _extra_stats(self):
stats = self.model._extra_stats
stats.append('maxl_phase')
if 'maxl_loglr' not in stats:
stats.append('maxl_loglr')
return stats
def _loglr(self):
if self.phase is not None:
params = []
for p in self.phase:
pref = self.current_params.copy()
pref['coa_phase'] = p
params.append(pref)
vals = list(self.pool.map(self.call, params))
loglr = numpy.array([v[0] for v in vals])
# get the maxl values
if 'maxl_loglr' not in self.model._extra_stats:
maxl_loglrs = loglr
else:
maxl_loglrs = numpy.array([v[1]['maxl_loglr'] for v in vals])
maxidx = maxl_loglrs.argmax()
maxstats = vals[maxidx][1]
maxphase = self.phase[maxidx]
# set the stats
for stat in maxstats:
setattr(self._current_stats, stat, maxstats[stat])
self._current_stats.maxl_phase = maxphase
self._current_stats.maxl_loglr = maxl_loglrs[maxidx]
# calculate the marginal loglr and return
return logsumexp(loglr) - numpy.log(len(self.phase))
class BruteLISASkyModesMarginalize(BaseGaussianNoise):
name = "brute_lisa_sky_modes_marginalize"
def __init__(self, variable_params,
cores=1,
loop_polarization=False,
base_model=None,
**kwds):
super().__init__(variable_params, **kwds)
from pycbc.inference.models import models
kwds.update(models[base_model].extra_args_from_config(
kwds['config_object'],
"model",
skip_args=[])
)
self.model = models[base_model](variable_params, **kwds)
self.call = likelihood_wrapper(self.model)
# size of pool for each likelihood call
if cores > 1:
self.pool = Pool(int(cores))
self.mapfunc = self.pool.map
else:
self.pool = None
self.mapfunc = map
# Do I explicitly check the polarization + pi/2 points
# We could also add other arguments here, ie only check longitude
# or latitude symmetry points.
if loop_polarization:
self.num_sky_modes = 16
else:
self.num_sky_modes = 8
self.reconstruct_sky_points = False
@property
def _extra_stats(self):
stats = self.model._extra_stats
return stats
def _loglr(self):
params = []
for sym_num in range(self.num_sky_modes):
pref = self.current_params.copy()
self._apply_sky_point_rotation(pref, sym_num)
params.append(pref)
vals = list(self.mapfunc(self.call, params))
loglr = numpy.array([v[0] for v in vals])
if self.reconstruct_sky_points:
return loglr
max_llr_idx = loglr.argmax()
max_llr = loglr[max_llr_idx]
marg_lrfac = sum([math.exp(llr - max_llr) for llr in loglr])
marg_llr = max_llr + math.log(marg_lrfac/self.num_sky_modes)
# set the stats
for sym_num in range(self.num_sky_modes):
setattr(self._current_stats, f'llr_mode_{sym_num}', loglr[sym_num])
return marg_llr
def _apply_sky_point_rotation(self, pref, sky_num):
""" Apply the sky point rotation for mode sky_num to parameters pref
"""
lambdal = pref['eclipticlongitude']
beta = pref['eclipticlatitude']
psi = pref['polarization']
inc = pref['inclination']
pol_num = sky_num // 8
sky_num = sky_num % 8
long_num = sky_num % 4
lat_num = sky_num // 4
# Apply latitude symmetry mode
if lat_num:
beta = - beta
inc = numpy.pi - inc
psi = numpy.pi - psi
# Apply longitudonal symmetry mode
lambdal = (lambdal + long_num * 0.5 * numpy.pi) % (2*numpy.pi)
psi = (psi + long_num * 0.5 * numpy.pi) % (2*numpy.pi)
# Apply additional polarization mode (shouldn't be needed)
if pol_num:
psi = psi + (math.pi / 2.)
pref['eclipticlongitude'] = lambdal
pref['eclipticlatitude'] = beta
pref['polarization'] = psi
pref['inclination'] = inc
@classmethod
def from_config(cls, cp, **kwargs):
kwargs['config_object'] = cp
return super(BruteLISASkyModesMarginalize, cls).from_config(
cp,
**kwargs
)
def reconstruct(self, seed=None):
""" Reconstruct a point from unwrapping the 8-fold sky symmetry
"""
if seed:
numpy.random.seed(seed)
rec = {}
logging.info('Reconstruct LISA sky mode symmetry')
self.reconstruct_sky_points = True
loglr = self.loglr
xl = draw_sample(loglr)
logging.info('Found point %d', xl)
# Undo rotations
pref = self.current_params.copy()
self._apply_sky_point_rotation(pref, xl)
for val in ['polarization', 'eclipticlongitude', 'eclipticlatitude',
'inclination']:
rec[val] = pref[val]
rec['loglr'] = loglr[xl]
rec['loglikelihood'] = self.lognl + rec['loglr']
self.reconstruct_sky_points = False
return self.model.reconstruct(seed=seed, rec=rec)
| 7,614
| 32.108696
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/jump/normal.py
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Jump proposals that use a normal distribution."""
import numpy
from epsie import proposals as epsie_proposals
from epsie.proposals import Boundaries
from pycbc import VARARGS_DELIM
class EpsieNormal(epsie_proposals.Normal):
"""Adds ``from_config`` method to epsie's normal proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.Normal` and ``with_boundaries`` set to
False. See that function for details on options that can be read.
Example::
[jump_proposal-mchrip+q]
name = normal
var-q = 0.1
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.Normal`:
A normal proposal for use with ``epsie`` samplers.
"""
return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
class EpsieAdaptiveNormal(epsie_proposals.AdaptiveNormal):
"""Adds ``from_config`` method to epsie's adaptive normal proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveNormal`. See that function for
details on options that can be read.
Example::
[jump_proposal-mchirp+q]
name = adaptive_normal
adaptation-duration = 1000
min-q = 1
max-q = 8
min-mchirp = 20
max-mchirp = 80
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveNormal`:
An adaptive normal proposal for use with ``epsie`` samplers.
"""
return epsie_adaptive_from_config(cls, cp, section, tag,
boundary_arg_name='prior_widths')
class EpsieATAdaptiveNormal(epsie_proposals.ATAdaptiveNormal):
"""Adds ``from_config`` method to epsie's ATAdaptiveProposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveProposal` and ``with_boundaries``
set to False. See that function for details on options that can be
read.
Example::
[jump_proposal-mchrip+q]
name = adaptive_proposal
diagonal =
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveProposal`:
An adaptive proposal for use with ``epsie`` samplers.
"""
return epsie_at_adaptive_from_config(cls, cp, section, tag,
with_boundaries=False)
def epsie_from_config(cls, cp, section, tag, with_boundaries=False):
r"""Generic function for loading epsie proposals from a config file.
This should be used for proposals that are not adaptive.
The section that is read should have the format ``[{section}-{tag}]``,
where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list
of the parameters to create the jump proposal for.
Options that are read:
* name : str
Required. Must match the name of the proposal.
* var-{param} : float
Optional. Variance to use for parameter {param}. If ``with_boundaries``
is True, then any parameter not specified will use a default variance
of :math:`(\Delta p/10)^2`, where :math:`\Delta p` is the boundary
width for that parameter. If ``with_boundaries`` is False, will use
a default value of 1.
* min-{param} : float
* max-{param} : float
The bounds on each parameter. Required if ``with_boundaries`` is set to
True, in which case bounds must be provided for every parameter.
Parameters
----------
cls : epsie.proposals.BaseProposal
The epsie proposal class to initialize.
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
with_boundaries : bool, optional
Try to load boundaries from the section and pass a ``boundaries``
argument to the class's initialization. This should be set to true
for bounded proposals. Default is False.
Returns
-------
cls :
The class initialized with the options read from the config file.
"""
# check that the name matches
assert cp.get_opt_tag(section, "name", tag) == cls.name, (
"name in specified section must match mine")
params, opts = load_opts(cp, section, tag, skip=['name'])
args = {'parameters': params}
if with_boundaries:
boundaries = get_param_boundaries(params, opts)
args['boundaries'] = boundaries
if 'discrete' in cls.name.split('_'):
args.update({'successive':
get_epsie_discrete_successive_settings(params, opts)})
# if there are any options left, assume they are for setting the variance
if opts:
cov = get_variance(params, opts)
elif with_boundaries:
cov = numpy.array([abs(boundaries[p])/10. for p in params])**2.
else:
cov = None
args['cov'] = cov
# no other options should remain
if opts:
raise ValueError("unrecognized options {}"
.format(', '.join(opts.keys())))
return cls(**args)
def epsie_adaptive_from_config(cls, cp, section, tag, with_boundaries=True,
boundary_arg_name='boundaries'):
"""Generic function for loading adaptive epsie proposals from a config
file.
The section that is read should have the format ``[{section}-{tag}]``,
where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list
of the parameters to create the jump proposal for.
Options that are read:
* name : str
Required. Must match the name of the proposal.
* adaptation-duration : int
Required. Sets the ``adaptation_duration``.
* min-{param} : float
* max-{param} : float
The bounds on each parameter. Required if ``with_boundaries`` is set to
True, in which case bounds must be provided for every parameter.
* var-{param} : float
Optional. Initial variance to use. If not provided, will use a
default based on the bounds (see
:py:class:`epsie.proposals.AdaptiveSupport` for details).
* adaptation-decay : int
Optional. Sets the ``adaptation_decay``. If not provided, will use
the class's default.
* start-iteration : int
Optional. Sets the ``start_iteration``.If not provided, will use
the class's default.
* target-rate : float
Optional. Sets the ``target_rate``. If not provided, will use
the class's default.
Parameters
----------
cls : epsie.proposals.BaseProposal
The epsie proposal class to initialize. The class should have
:py:class:`epsie.proposals.normal.AdaptiveSupport`.
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
with_boundaries : bool, optional
Try to load boundaries from the section and pass a ``boundaries``
argument to the class's initialization. Default is True.
boundary_arg_name : str, optional
The name of the argument for the boundaries (only used if
``with_boundaries`` is True). Provided because some adaptive proposals
that only need the boundary widths call this ``prior_widths``. Default
is ``'boundaries'``.
Returns
-------
cls :
The class initialized with the options read from the config file.
"""
# check that the name matches
assert cp.get_opt_tag(section, "name", tag) == cls.name, (
"name in specified section must match mine")
params, opts = load_opts(cp, section, tag, skip=['name'])
args = {'parameters': params}
# get the bounds
if with_boundaries:
args[boundary_arg_name] = get_param_boundaries(params, opts)
if 'discrete' in cls.name.split('_'):
args.update({'successive':
get_epsie_discrete_successive_settings(params, opts)})
# get the adaptation parameters
args.update(get_epsie_adaptation_settings(opts))
# if there are any other options, assume they are for setting the
# initial standard deviation
if opts:
var = get_variance(params, opts)
args['initial_std'] = var**0.5
# at this point, there should be no options left
if opts:
raise ValueError('unrecognized options {} in section {}'
.format(', '.join(opts.keys()),
'-'.join([section, tag])))
return cls(**args)
def epsie_at_adaptive_from_config(cls, cp, section, tag,
with_boundaries=False):
"""Generic function for loading AT Adaptive Normal proposals from a config
file.
The section that is read should have the format ``[{section}-{tag}]``,
where ``{tag}`` is a :py:const:`pycbc.VARARGS_DELIM` separated list
of the parameters to create the jump proposal for.
Options that are read:
* name : str
Required. Must match the name of the proposal.
* adaptation-duration : int
Sets the ``adaptation_duration``. If not provided will use the class's
default.
* diagonal : bool, optional
Determines whether only to adapt the variance. If True will only train
the diagonal elements.
* componentwise : bool, optional
Whether to include a componentwise scaling of the parameters.
By default set to False. Componentwise scaling `ndim` times more
expensive than global scaling.
* min-{param} : float
* max-{param} : float
The bounds on each parameter. Required if ``with_boundaries`` is set to
True, in which case bounds must be provided for every parameter.
* start-iteration : int
Optional. Sets the ``start_iteration``. If not provided, will use
the class's default.
* target-rate : float
Optional. Sets the ``target_rate``. If not provided, will use
the class's default.
Parameters
----------
cls : epsie.proposals.BaseProposal
The epsie proposal class to initialize. The class should have
:py:class:`epsie.proposals.normal.AdaptiveSupport`.
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
with_boundaries : bool, optional
Try to load boundaries from the section and pass a ``boundaries``
argument to the class's initialization. Default is True.
Returns
-------
cls :
The class initialized with the options read from the config file.
"""
# check that the name matches
assert cp.get_opt_tag(section, "name", tag) == cls.name, (
"name in specified section must match mine")
params, opts = load_opts(cp, section, tag, skip=['name'])
args = {'parameters': params}
# get the bounds
if with_boundaries:
args['boundaries'] = get_param_boundaries(params, opts)
if 'discrete' in cls.name.split('_'):
args.update({'successive':
get_epsie_discrete_successive_settings(params, opts)})
# get the adaptation parameters
args.update(get_epsie_adaptation_settings(opts, cls.name))
# bounded and angular adaptive proposals support diagonal-only
diagonal = opts.pop('diagonal', None)
if not any(p in cls.name.split('_') for p in ['bounded', 'angular']):
args.update({'diagonal': diagonal is not None})
componentwise = opts.pop('componentwise', None)
if componentwise is not None:
args.update({'componentwise': True})
if opts:
raise ValueError("unrecognized options {}"
.format(', '.join(opts.keys())))
return cls(**args)
def load_opts(cp, section, tag, skip=None):
"""Loads config options for jump proposals.
All `-` in option names are converted to `_` before returning.
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
skip : list, optional
List of option names to skip loading.
Returns
-------
params : list
List of parameter names the jump proposal is for.
opts : dict
Dictionary of option names -> values, where all values are strings.
"""
if skip is None:
skip = []
params = tag.split(VARARGS_DELIM)
# get options
readsection = '-'.join([section, tag])
opts = {opt.replace('-', '_'): cp.get(readsection, opt)
for opt in cp.options(readsection) if opt not in skip}
return params, opts
def get_variance(params, opts, default=1.):
"""Gets variance for jump proposals from the dictionary of options.
This looks for ``var_{param}`` for every parameter listed in ``params``.
If found, the argument is popped from the given ``opts`` dictionary. If not
found, ``default`` will be used.
Parameters
----------
params : list of str
List of parameter names to look for.
opts : dict
Dictionary of option -> value that was loaded from a config file
section.
default : float, optional
Default value to use for parameters that do not have variances
provided. Default is 1.
Returns
-------
numpy.array
Array of variances to use. Order is the same as the parameter names
given in ``params``.
"""
varfmt = 'var_{}'
cov = numpy.array([float(opts.pop(varfmt.format(param), default))
for param in params])
return cov
def get_param_boundaries(params, opts):
"""Gets parameter boundaries for jump proposals.
The syntax for the options should be ``(min|max)_{param} = value``. Both
a minimum and maximum should be provided for every parameter in ``params``.
If the opts are created using ``load_opts``, then the options can be
formatted as ``(min|max)-{param}``, since that function will turn all ``-``
to ``_`` in option names.
Arguments will be popped from the given ``opts`` dictionary.
Parameters
----------
params : list of str
List of parameter names to get boundaries for.
opts : dict
Dictionary of option -> value that was loaded from a config file
section.
Returns
-------
dict :
Dictionary of parameter names -> :py:class:`epsie.proposals.Boundaries`
"""
boundaries = {}
for param in params:
minbound = opts.pop('min_{}'.format(param), None)
if minbound is None:
raise ValueError("Must provide a minimum bound for {p}."
"Syntax is min_{p} = val".format(p=param))
maxbound = opts.pop('max_{}'.format(param), None)
if maxbound is None:
raise ValueError("Must provide a maximum bound for {p}."
"Syntax is max_{p} = val".format(p=param))
boundaries[param] = Boundaries((float(minbound), float(maxbound)))
return boundaries
def get_epsie_adaptation_settings(opts, name=None):
"""Get settings for Epsie adaptive proposals from a config file.
This requires that ``adaptation_duration`` is in the given dictionary.
It will also look for ``adaptation_decay``, ``start_iteration``, and
``target_rate``, but these are optional. Arguments will be popped from the
given dictionary.
Parameters
----------
opts : dict
Dictionary of option -> value that was loaded from a config file
section.
name : str (optional)
Proposal name
Returns
-------
dict :
Dictionary of argument name -> values.
"""
args = {}
adaptation_duration = opts.pop('adaptation_duration', None)
if adaptation_duration is None:
if name is not None:
if all(p in name.split('_') for p in ['at', 'adaptive']):
args.update({'adaptation_duration': None})
else:
raise ValueError("No adaptation_duration specified")
else:
args.update({'adaptation_duration': int(adaptation_duration)})
# optional args
adaptation_decay = opts.pop('adaptation_decay', None)
if adaptation_decay is not None:
args.update({'adaptation_decay': int(adaptation_decay)})
start_iteration = opts.pop('start_iteration', None)
if start_iteration is not None:
args.update({'start_iteration': int(start_iteration)})
target_rate = opts.pop('target_rate', None)
if target_rate is not None:
args.update({'target_rate': float(target_rate)})
return args
def get_epsie_discrete_successive_settings(params, opts):
"""Get settings for Epsie successive discrete proposal successive jumps
from a config file.
If ``successive`` is not defined for a parameter then assumes successive
jumps are not allowed (i.e. jumps from an integer to the same integer).
Arguments will be popped from the given dictionary.
Example::
[jump_proposal-k+n]
name = discrete
successive-k =
This example sets successive jumps for ``k`` but does not do so for ``n``.
Parameters
----------
params : list of str
List of parameter names to get the successive option for.
opts : dict
Dictionary of option -> value that was loaded from a config file
section.
Returns
-------
dict :
Dictionary of parameter names -> bools
"""
successive = {}
for param in params:
successive.update(
{param: opts.pop('successive_{}'.format(param), None) is not None})
return successive
| 20,231
| 35.454054
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/jump/discrete.py
|
# Copyright (C) 2020 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Jump proposals that use a bounded normal distribution."""
from epsie import proposals as epsie_proposals
from .normal import (epsie_from_config, epsie_adaptive_from_config)
class EpsieNormalDiscrete(epsie_proposals.NormalDiscrete):
"""Adds ``from_config`` method to epsie's normal discrete proposal."""
@classmethod
def from_config(cls, cp, section, tag):
r"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.NormalDiscrete` and ``with_boundaries`` set
to False. See that function for details on options that can be read.
Example::
[jump_proposal-index]
name = discrete
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.BoundedDiscrete`:
A bounded discrete proposal for use with ``epsie`` samplers.
"""
return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
class EpsieBoundedDiscrete(epsie_proposals.BoundedDiscrete):
"""Adds ``from_config`` method to epsie's bounded discrete proposal."""
@classmethod
def from_config(cls, cp, section, tag):
r"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.BoundedDiscrete` and ``with_boundaries`` set
to True. See that function for details on options that can be read.
Example::
[jump_proposal-index]
name = bounded_discrete
min-index = 0
max-index = 19
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.BoundedDiscrete`:
A bounded discrete proposal for use with ``epsie`` samplers.
"""
return epsie_from_config(cls, cp, section, tag, with_boundaries=True)
class EpsieAdaptiveNormalDiscrete(epsie_proposals.AdaptiveNormalDiscrete):
"""Adds ``from_config`` method to epsie's adaptive bounded discrete
proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveNormalDiscrete`. See that function
for details on options that can be read.
Example::
[jump_proposal-index]
name = adaptive_normal_discrete
adaptation-duration = 1000
min-index = 0
max-index = 42
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveBoundedDiscrete`:
An adaptive normal proposal for use with ``epsie`` samplers.
"""
return epsie_adaptive_from_config(cls, cp, section, tag,
boundary_arg_name='prior_widths')
class EpsieAdaptiveBoundedDiscrete(epsie_proposals.AdaptiveBoundedDiscrete):
"""Adds ``from_config`` method to epsie's adaptive bounded discrete
proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveBoundedDiscrete`. See that function
for details on options that can be read.
Example::
[jump_proposal-index]
name = adaptive_bounded_discrete
adaptation-duration = 1000
min-index = 0
max-index = 42
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveBoundedDiscrete`:
An adaptive normal proposal for use with ``epsie`` samplers.
"""
return epsie_adaptive_from_config(cls, cp, section, tag)
| 5,794
| 33.494048
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/jump/angular.py
|
# Copyright (C) 2020 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Jump proposals that use cyclic boundaries on [0, 2pi)."""
from epsie import proposals as epsie_proposals
from .normal import (epsie_from_config, epsie_adaptive_from_config,
epsie_at_adaptive_from_config)
class EpsieAngular(epsie_proposals.Angular):
"""Adds ``from_config`` method to epsie's angular proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.Angular` and ``with_boundaries`` set
to False. See that function for details on options that can be read.
Example::
[jump_proposal-ra]
name = angular
var-ra = 0.01
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.Angular`:
An angular proposal for use with ``epsie`` samplers.
"""
return epsie_from_config(cls, cp, section, tag, with_boundaries=False)
class EpsieAdaptiveAngular(epsie_proposals.AdaptiveAngular):
"""Adds ``from_config`` method to epsie's adaptive angular proposal."""
@classmethod
def from_config(cls, cp, section, tag):
r"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveBoundedNormal` and
``with_boundaries`` set to False (since the boundaries for the angular
proposals are always :math:`[0, 2\pi)`). See that function
for details on options that can be read.
Example::
[jump_proposal-ra]
name = adaptive_angular
adaptation-duration = 1000
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveAngular`:
An adaptive angular proposal for use with ``epsie`` samplers.
"""
return epsie_adaptive_from_config(cls, cp, section, tag,
with_boundaries=False)
class EpsieATAdaptiveAngular(epsie_proposals.ATAdaptiveAngular):
"""Adds ``from_config`` method to epsie's adaptive angular proposal."""
@classmethod
def from_config(cls, cp, section, tag):
r"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveBoundedNormal` and
``with_boundaries`` set to False (since the boundaries for the angular
proposals are always :math:`[0, 2\pi)`). See that function
for details on options that can be read.
Example::
[jump_proposal-ra]
name = adaptive_angular_proposal
adaptation-duration = 1000
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveAngularProposal`:
An adaptive angular proposal for use with ``epsie`` samplers.
"""
return epsie_at_adaptive_from_config(cls, cp, section, tag,
with_boundaries=False)
| 4,775
| 34.909774
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/inference/jump/__init__.py
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides custom jump proposals for samplers."""
from .normal import (EpsieNormal, EpsieAdaptiveNormal, EpsieATAdaptiveNormal)
from .bounded_normal import (EpsieBoundedNormal, EpsieAdaptiveBoundedNormal,
EpsieATAdaptiveBoundedNormal)
from .angular import (EpsieAngular, EpsieAdaptiveAngular,
EpsieATAdaptiveAngular)
from .discrete import (EpsieNormalDiscrete, EpsieBoundedDiscrete,
EpsieAdaptiveNormalDiscrete,
EpsieAdaptiveBoundedDiscrete)
epsie_proposals = {
EpsieNormal.name: EpsieNormal,
EpsieAdaptiveNormal.name: EpsieAdaptiveNormal,
EpsieATAdaptiveNormal.name: EpsieATAdaptiveNormal,
EpsieBoundedNormal.name: EpsieBoundedNormal,
EpsieAdaptiveBoundedNormal.name: EpsieAdaptiveBoundedNormal,
EpsieATAdaptiveBoundedNormal.name: EpsieATAdaptiveBoundedNormal,
EpsieAngular.name: EpsieAngular,
EpsieAdaptiveAngular.name: EpsieAdaptiveAngular,
EpsieATAdaptiveAngular.name: EpsieATAdaptiveAngular,
EpsieNormalDiscrete.name: EpsieNormalDiscrete,
EpsieAdaptiveNormalDiscrete.name: EpsieAdaptiveNormalDiscrete,
EpsieBoundedDiscrete.name: EpsieBoundedDiscrete,
EpsieAdaptiveBoundedDiscrete.name: EpsieAdaptiveBoundedDiscrete,
}
def epsie_proposals_from_config(cp, section='jump_proposal'):
"""Loads epsie jump proposals from the given config file.
This loads jump proposals from sub-sections starting with ``section``
(default is 'jump_proposal'). The tag part of the sub-sections' headers
should list the parameters the proposal is to be used for.
Example::
[jump_proposal-mtotal+q]
name = adaptive_normal
adaptation-duration = 1000
min-q = 1
max-q = 8
min-mtotal = 20
max-mtotal = 160
[jump_proposal-spin1_a]
name = normal
Parameters
----------
cp : WorkflowConfigParser instance
The config file to read.
section : str, optional
The section name to read jump proposals from. Default is
``'jump_proposal'``.
Returns
-------
list :
List of the proposal instances.
"""
tags = cp.get_subsections(section)
proposals = []
for tag in tags:
# get the name of the proposal
name = cp.get_opt_tag(section, "name", tag)
prop = epsie_proposals[name].from_config(cp, section, tag)
proposals.append(prop)
return proposals
| 3,211
| 36.788235
| 77
|
py
|
pycbc
|
pycbc-master/pycbc/inference/jump/bounded_normal.py
|
# Copyright (C) 2020 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Jump proposals that use a bounded normal distribution."""
from epsie import proposals as epsie_proposals
from .normal import (epsie_from_config, epsie_adaptive_from_config,
epsie_at_adaptive_from_config)
class EpsieBoundedNormal(epsie_proposals.BoundedNormal):
"""Adds ``from_config`` method to epsie's bounded normal proposal."""
@classmethod
def from_config(cls, cp, section, tag):
r"""Loads a proposal from a config file.
This calls :py:func:`epsie_from_config` with ``cls`` set to
:py:class:`epsie.proposals.BoundedNormal` and ``with_boundaries`` set
to True. See that function for details on options that can be read.
Example::
[jump_proposal-mchrip+q]
name = bounded_normal
min-q = 1
max-q = 8
min-mchirp = 20
max-mchirp = 80
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.BoundedNormal`:
A bounded normal proposal for use with ``epsie`` samplers.
"""
return epsie_from_config(cls, cp, section, tag, with_boundaries=True)
class EpsieAdaptiveBoundedNormal(epsie_proposals.AdaptiveBoundedNormal):
"""Adds ``from_config`` method to epsie's adaptive normal proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveBoundedNormal`. See that function
for details on options that can be read.
Example::
[jump_proposal-q]
name = adaptive_bounded_normal
adaptation-duration = 1000
min-q = 1
max-q = 8
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveBoundedNormal`:
An adaptive normal proposal for use with ``epsie`` samplers.
"""
return epsie_adaptive_from_config(cls, cp, section, tag)
class EpsieATAdaptiveBoundedNormal(epsie_proposals.ATAdaptiveBoundedNormal):
"""Adds ``from_config`` method to epsie's adaptive bounded proposal."""
@classmethod
def from_config(cls, cp, section, tag):
"""Loads a proposal from a config file.
This calls :py:func:`epsie_adaptive_from_config` with ``cls`` set to
:py:class:`epsie.proposals.AdaptiveBoundedProposal`. See that function
for details on options that can be read.
Example::
[jump_proposal-q]
name = adaptive_bounded_proposal
min-q = 1
max-q = 8
Parameters
----------
cp : WorkflowConfigParser instance
Config file to read from.
section : str
The name of the section to look in.
tag : str
:py:const:`pycbc.VARARGS_DELIM` separated list of parameter names
to create proposals for.
Returns
-------
:py:class:`epsie.proposals.AdaptiveBoundedProposal`:
An adaptive bounded proposal for use with ``epsie`` samplers.
"""
return epsie_at_adaptive_from_config(cls, cp, section, tag,
with_boundaries=True)
| 4,652
| 33.723881
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/waveform.py
|
# Copyright (C) 2012 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Convenience functions to genenerate gravitational wave templates and
waveforms.
"""
import os
import lal, numpy, copy
from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
from pycbc.types import real_same_precision_as, complex_same_precision_as
import pycbc.scheme as _scheme
import inspect
from pycbc.fft import fft
from pycbc import pnutils, libutils
from pycbc.waveform import utils as wfutils
from pycbc.waveform import parameters
from pycbc.conversions import get_final_from_initial, tau_from_final_mass_spin
from pycbc.filter import interpolate_complex_frequency, resample_to_delta_t
import pycbc
from .spa_tmplt import spa_tmplt, spa_tmplt_norm, spa_tmplt_end, \
spa_tmplt_precondition, spa_amplitude_factor, \
spa_length_in_time
class NoWaveformError(Exception):
"""This should be raised if generating a waveform would just result in all
zeros being returned, e.g., if a requested `f_final` is <= `f_lower`.
"""
pass
class FailedWaveformError(Exception):
"""This should be raised if a waveform fails to generate.
"""
pass
# If this is set to True, waveform generation codes will try to regenerate
# waveforms with known failure conditions to try to avoid the failure. For
# example SEOBNRv3 waveforms would be regenerated with double the sample rate.
# If this is set to False waveform failures will always raise exceptions
fail_tolerant_waveform_generation = True
default_args = \
(parameters.fd_waveform_params.default_dict() +
parameters.td_waveform_params).default_dict()
default_sgburst_args = {'eccentricity':0, 'polarization':0}
sgburst_required_args = ['q','frequency','hrss']
# td, fd, filter waveforms generated on the CPU
_lalsim_td_approximants = {}
_lalsim_fd_approximants = {}
_lalsim_enum = {}
_lalsim_sgburst_approximants = {}
def _check_lal_pars(p):
""" Create a laldict object from the dictionary of waveform parameters
Parameters
----------
p: dictionary
The dictionary of lalsimulation paramaters
Returns
-------
laldict: LalDict
The lal type dictionary to pass to the lalsimulation waveform functions.
"""
lal_pars = lal.CreateDict()
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
if p['phase_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order']))
if p['amplitude_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order']))
if p['spin_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order']))
if p['tidal_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order'])
if p['eccentricity_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order'])
if p['lambda1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1'])
if p['lambda2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2'])
if p['lambda_octu1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda1(lal_pars, p['lambda_octu1'])
if p['lambda_octu2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda2(lal_pars, p['lambda_octu2'])
if p['quadfmode1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode1(lal_pars, p['quadfmode1'])
if p['quadfmode2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode2(lal_pars, p['quadfmode2'])
if p['octufmode1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode1(lal_pars, p['octufmode1'])
if p['octufmode2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode2(lal_pars, p['octufmode2'])
if p['dquad_mon1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1'])
if p['dquad_mon2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2'])
if p['numrel_data']:
lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data']))
if p['modes_choice']:
lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice'])
if p['frame_axis']:
lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis'])
if p['side_bands']:
lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands'])
if p['mode_array'] is not None:
ma = lalsimulation.SimInspiralCreateModeArray()
for l,m in p['mode_array']:
lalsimulation.SimInspiralModeArrayActivateMode(ma, l, m)
lalsimulation.SimInspiralWaveformParamsInsertModeArray(lal_pars, ma)
#TestingGR parameters:
if p['dchi0'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi0(lal_pars,p['dchi0'])
if p['dchi1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi1(lal_pars,p['dchi1'])
if p['dchi2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi2(lal_pars,p['dchi2'])
if p['dchi3'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi3(lal_pars,p['dchi3'])
if p['dchi4'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi4(lal_pars,p['dchi4'])
if p['dchi5'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi5(lal_pars,p['dchi5'])
if p['dchi5l'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi5L(lal_pars,p['dchi5l'])
if p['dchi6'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi6(lal_pars,p['dchi6'])
if p['dchi6l'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi6L(lal_pars,p['dchi6l'])
if p['dchi7'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDChi7(lal_pars,p['dchi7'])
if p['dalpha1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha1(lal_pars,p['dalpha1'])
if p['dalpha2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha2(lal_pars,p['dalpha2'])
if p['dalpha3'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha3(lal_pars,p['dalpha3'])
if p['dalpha4'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha4(lal_pars,p['dalpha4'])
if p['dalpha5'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDAlpha5(lal_pars,p['dalpha5'])
if p['dbeta1'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta1(lal_pars,p['dbeta1'])
if p['dbeta2'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta2(lal_pars,p['dbeta2'])
if p['dbeta3'] is not None:
lalsimulation.SimInspiralWaveformParamsInsertNonGRDBeta3(lal_pars,p['dbeta3'])
return lal_pars
def _lalsim_td_waveform(**p):
lal_pars = _check_lal_pars(p)
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
try:
hp1, hc1 = lalsimulation.SimInspiralChooseTDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
float(p['delta_t']), float(p['f_lower']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
except RuntimeError:
if not fail_tolerant_waveform_generation:
raise
# For some cases failure modes can occur. Here we add waveform-specific
# instructions to try to work with waveforms that are known to fail.
if 'SEOBNRv3' in p['approximant']:
# Try doubling the sample time and redoing.
# Don't want to get stuck in a loop though!
if 'delta_t_orig' not in p:
p['delta_t_orig'] = p['delta_t']
p['delta_t'] = p['delta_t'] / 2.
if p['delta_t_orig'] / p['delta_t'] > 9:
raise
hp, hc = _lalsim_td_waveform(**p)
p['delta_t'] = p['delta_t_orig']
hp = resample_to_delta_t(hp, hp.delta_t*2)
hc = resample_to_delta_t(hc, hc.delta_t*2)
return hp, hc
raise
#lal.DestroyDict(lal_pars)
hp = TimeSeries(hp1.data.data[:], delta_t=hp1.deltaT, epoch=hp1.epoch)
hc = TimeSeries(hc1.data.data[:], delta_t=hc1.deltaT, epoch=hc1.epoch)
return hp, hc
_lalsim_td_waveform.required = parameters.cbc_td_required
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc
def _lalsim_fd_waveform(**p):
lal_pars = _check_lal_pars(p)
hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
p['delta_f'], float(p['f_lower']), float(p['f_final']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
hp = FrequencySeries(hp1.data.data[:], delta_f=hp1.deltaF,
epoch=hp1.epoch)
hc = FrequencySeries(hc1.data.data[:], delta_f=hc1.deltaF,
epoch=hc1.epoch)
#lal.DestroyDict(lal_pars)
return hp, hc
_lalsim_fd_waveform.required = parameters.cbc_fd_required
def _lalsim_sgburst_waveform(**p):
hp, hc = lalsimulation.SimBurstSineGaussian(float(p['q']),
float(p['frequency']),
float(p['hrss']),
float(p['eccentricity']),
float(p['polarization']),
float(p['delta_t']))
hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch)
hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch)
return hp, hc
# Populate waveform approximants from lalsimulation if the library is
# available
try:
import lalsimulation
for approx_enum in range(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedTDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_td_approximants[approx_name] = _lalsim_td_waveform
for approx_enum in range(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_fd_approximants[approx_name] = _lalsim_fd_waveform
# sine-Gaussian burst
for approx_enum in range(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_sgburst_approximants[approx_name] = _lalsim_sgburst_waveform
except ImportError:
lalsimulation = libutils.import_optional('lalsimulation')
cpu_sgburst = _lalsim_sgburst_approximants
cpu_td = dict(_lalsim_td_approximants.items())
cpu_fd = _lalsim_fd_approximants
# Waveforms written in CUDA
_cuda_td_approximants = {}
_cuda_fd_approximants = {}
if pycbc.HAVE_CUDA:
from pycbc.waveform.pycbc_phenomC_tmplt import imrphenomc_tmplt
from pycbc.waveform.SpinTaylorF2 import spintaylorf2 as cuda_spintaylorf2
_cuda_fd_approximants["IMRPhenomC"] = imrphenomc_tmplt
_cuda_fd_approximants["SpinTaylorF2"] = cuda_spintaylorf2
cuda_td = dict(list(_lalsim_td_approximants.items()) + list(_cuda_td_approximants.items()))
cuda_fd = dict(list(_lalsim_fd_approximants.items()) + list(_cuda_fd_approximants.items()))
# List the various available approximants ####################################
def print_td_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_td_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_td_approximants.keys():
print(" " + approx)
def print_fd_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_fd_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_fd_approximants.keys():
print(" " + approx)
def print_sgburst_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_sgburst_approximants.keys():
print(" " + approx)
def td_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain approximants for
the given processing scheme.
"""
return list(td_wav[type(scheme)].keys())
def fd_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available fourier domain approximants for
the given processing scheme.
"""
return list(fd_wav[type(scheme)].keys())
def sgburst_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain sgbursts for
the given processing scheme.
"""
return list(sgburst_wav[type(scheme)].keys())
def filter_approximants(scheme=_scheme.mgr.state):
"""Return a list of fourier domain approximants including those
written specifically as templates.
"""
return list(filter_wav[type(scheme)].keys())
# Input parameter handling ###################################################
def get_obj_attrs(obj):
""" Return a dictionary built from the attributes of the given object.
"""
pr = {}
if obj is not None:
if isinstance(obj, numpy.core.records.record):
for name in obj.dtype.names:
pr[name] = getattr(obj, name)
elif hasattr(obj, '__dict__') and obj.__dict__:
pr = obj.__dict__
elif hasattr(obj, '__slots__'):
for slot in obj.__slots__:
if hasattr(obj, slot):
pr[slot] = getattr(obj, slot)
elif isinstance(obj, dict):
pr = obj.copy()
else:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
return pr
def parse_mode_array(input_params):
"""Ensures mode_array argument in a dictionary of input parameters is
a list of tuples of (l, m), where l and m are ints.
Accepted formats for the ``mode_array`` argument is a list of tuples of
ints (e.g., ``[(2, 2), (3, 3), (4, 4)]``), a space-separated string giving
the modes (e.g., ``22 33 44``), or an array of ints or floats (e.g.,
``[22., 33., 44.]``.
"""
if 'mode_array' in input_params and input_params['mode_array'] is not None:
mode_array = input_params['mode_array']
if isinstance(mode_array, str):
mode_array = mode_array.split()
if not isinstance(mode_array, (numpy.ndarray, list)):
mode_array = [mode_array]
for ii, ma in enumerate(mode_array):
# if ma is a float or int, convert to str (e.g., 22. -> '22'), so
# that...
if isinstance(ma, (float, int)):
ma = str(int(ma))
# if ma is a str convert to (int, int) (e.g., '22' -> (2, 2))
if isinstance(ma, str):
l, m = ma
ma = (int(l), int(m))
mode_array[ii] = ma
input_params['mode_array'] = mode_array
return input_params
def props(obj, **kwargs):
""" Return a dictionary built from the combination of defaults, kwargs,
and the attributes of the given object.
"""
pr = get_obj_attrs(obj)
pr.update(kwargs)
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_args.copy()
input_params.update(pr)
# if mode array present and is a string, convert to a list of tuples
input_params = parse_mode_array(input_params)
return input_params
def check_args(args, required_args):
""" check that required args are given """
missing = []
for arg in required_args:
if (arg not in args) or (args[arg] is None):
missing.append(arg)
if len(missing) != 0:
raise ValueError("Please provide {}".format(', '.join(missing)))
# Input parameter handling for bursts ########################################
def props_sgburst(obj, **kwargs):
pr = {}
if obj is not None:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_sgburst_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Waveform generation ########################################################
fd_sequence = {}
fd_det_sequence = {}
fd_det = {}
def _lalsim_fd_sequence(**p):
""" Shim to interface to lalsimulation SimInspiralChooseFDWaveformSequence
"""
lal_pars = _check_lal_pars(p)
hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(
float(p['coa_phase']),
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
float(p['f_ref']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']),
lal_pars,
_lalsim_enum[p['approximant']],
p['sample_points'].lal())
return Array(hp.data.data), Array(hc.data.data)
_lalsim_fd_sequence.required = parameters.cbc_fd_required
for apx in _lalsim_enum:
fd_sequence[apx] = _lalsim_fd_sequence
def get_fd_waveform_sequence(template=None, **kwds):
"""Return values of the waveform evaluated at the sequence of frequency
points. The waveform generator doesn't include detector response.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: Array
The plus phase of the waveform in frequency domain evaluated at the
frequency points.
hcrosstilde: Array
The cross phase of the waveform in frequency domain evaluated at the
frequency points.
"""
input_params = props(template, **kwds)
input_params['delta_f'] = -1
input_params['f_lower'] = -1
if input_params['approximant'] not in fd_sequence:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
wav_gen = fd_sequence[input_params['approximant']]
if hasattr(wav_gen, 'required'):
required = wav_gen.required
else:
required = parameters.fd_required
check_args(input_params, required)
return wav_gen(**input_params)
def get_fd_det_waveform_sequence(template=None, **kwds):
"""Return values of the waveform evaluated at the sequence of frequency
points. The waveform generator includes detector response.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
dict
The detector-frame waveform (with detector response) in frequency
domain evaluated at the frequency points. Keys are requested data
channels, values are FrequencySeries.
"""
input_params = props(template, **kwds)
input_params['delta_f'] = -1
input_params['f_lower'] = -1
if input_params['approximant'] not in fd_det_sequence:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
wav_gen = fd_det_sequence[input_params['approximant']]
if hasattr(wav_gen, 'required'):
required = wav_gen.required
else:
required = parameters.fd_required
check_args(input_params, required)
return wav_gen(**input_params)
get_fd_waveform_sequence.__doc__ = get_fd_waveform_sequence.__doc__.format(
params=parameters.fd_waveform_sequence_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
get_fd_det_waveform_sequence.__doc__ = get_fd_det_waveform_sequence.__doc__.format(
params=parameters.fd_waveform_sequence_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_td_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props(template, **kwargs)
wav_gen = td_wav[type(_scheme.mgr.state)]
if input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
wav_gen = wav_gen[input_params['approximant']]
if hasattr(wav_gen, 'required'):
required = wav_gen.required
else:
required = parameters.td_required
check_args(input_params, required)
return wav_gen(**input_params)
get_td_waveform.__doc__ = get_td_waveform.__doc__.format(
params=parameters.td_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform(template=None, **kwargs):
"""Return a frequency domain gravitational waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: FrequencySeries
The plus phase of the waveform in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of the waveform in frequency domain.
"""
input_params = props(template, **kwargs)
wav_gen = fd_wav[type(_scheme.mgr.state)]
if input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
try:
ffunc = input_params.pop('f_final_func')
if ffunc != '':
# convert the frequency function to a value
input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc](
input_params)
# if the f_final is < f_lower, raise a NoWaveformError
if 'f_final' in input_params and \
(input_params['f_lower']+input_params['delta_f'] >=
input_params['f_final']):
raise NoWaveformError("cannot generate waveform: f_lower >= "
"f_final")
except KeyError:
pass
wav_gen = wav_gen[input_params['approximant']]
if hasattr(wav_gen, 'required'):
required = wav_gen.required
else:
required = parameters.fd_required
check_args(input_params, required)
return wav_gen(**input_params)
get_fd_waveform.__doc__ = get_fd_waveform.__doc__.format(
params=parameters.fd_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform_from_td(**params):
""" Return time domain version of fourier domain approximant.
This returns a frequency domain version of a fourier domain approximant,
with padding and tapering at the start of the waveform.
Parameters
----------
params: dict
The parameters defining the waveform to generator.
See `get_td_waveform`.
Returns
-------
hp: pycbc.types.FrequencySeries
Plus polarization time series
hc: pycbc.types.FrequencySeries
Cross polarization time series
"""
# determine the duration to use
full_duration = duration = get_waveform_filter_length_in_time(**params)
nparams = params.copy()
while full_duration < duration * 1.5:
full_duration = get_waveform_filter_length_in_time(**nparams)
nparams['f_lower'] -= 1
if 'f_fref' not in nparams:
nparams['f_ref'] = params['f_lower']
# We'll try to do the right thing and figure out what the frequency
# end is. Otherwise, we'll just assume 2048 Hz.
# (consider removing as we hopefully have better estimates for more
# approximants
try:
f_end = get_waveform_end_frequency(**params)
delta_t = (0.5 / pnutils.nearest_larger_binary_number(f_end))
except:
delta_t = 1.0 / 2048
nparams['delta_t'] = delta_t
hp, hc = get_td_waveform(**nparams)
# Resize to the right duration
tsamples = int(1.0 / params['delta_f'] / delta_t)
if tsamples < len(hp):
raise ValueError("The frequency spacing (df = {}) is too low to "
"generate the {} approximant from the time "
"domain".format(params['delta_f'], params['approximant']))
hp.resize(tsamples)
hc.resize(tsamples)
# apply the tapering, we will use a safety factor here to allow for
# somewhat innacurate duration difference estimation.
window = (full_duration - duration) * 0.8
hp = wfutils.td_taper(hp, hp.start_time, hp.start_time + window)
hc = wfutils.td_taper(hc, hc.start_time, hc.start_time + window)
# avoid wraparound
hp = hp.to_frequencyseries().cyclic_time_shift(hp.start_time)
hc = hc.to_frequencyseries().cyclic_time_shift(hc.start_time)
return hp, hc
def get_fd_det_waveform(template=None, **kwargs):
"""Return a frequency domain gravitational waveform.
The waveform generator includes detector response.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. An example would be a row in an xml table.
{params}
Returns
-------
dict
The detector-frame waveform (with detector response) in frequency
domain. Keys are requested data channels, values are FrequencySeries.
"""
input_params = props(template, **kwargs)
if 'f_lower' not in input_params:
input_params['f_lower'] = -1
if input_params['approximant'] not in fd_det:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
wav_gen = fd_det[input_params['approximant']]
if hasattr(wav_gen, 'required'):
required = wav_gen.required
else:
required = parameters.fd_required
check_args(input_params, required)
return wav_gen(**input_params)
get_fd_det_waveform.__doc__ = get_fd_det_waveform.__doc__.format(
params=parameters.fd_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def _base_get_td_waveform_from_fd(template=None, rwrap=None, **params):
""" The base function to calculate time domain version of fourier
domain approximant which not include or includes detector response.
Called by `get_td_waveform_from_fd` and `get_td_det_waveform_from_fd_det`.
"""
kwds = props(template, **params)
nparams = kwds.copy()
if rwrap is None:
# In the `pycbc.waveform.parameters` module, spin1z and
# spin2z have the default value 0. Users must have input
# masses, so no else is needed.
mass_spin_params = set(['mass1', 'mass2', 'spin1z', 'spin2z'])
if mass_spin_params.issubset(set(nparams.keys())):
m_final, spin_final = get_final_from_initial(
mass1=nparams['mass1'], mass2=nparams['mass2'],
spin1z=nparams['spin1z'], spin2z=nparams['spin2z'])
rwrap = tau_from_final_mass_spin(m_final, spin_final) * 10
if rwrap < 5:
# Long enough for very massive BBHs in XG detectors,
# up to (3000, 3000) solar mass, while still not a
# computational burden for 2G cases.
rwrap = 5
if nparams['approximant'] not in _filter_time_lengths:
raise ValueError("Approximant %s _filter_time_lengths function \
not available" % (nparams['approximant']))
# determine the duration to use
full_duration = duration = get_waveform_filter_length_in_time(**nparams)
while full_duration < duration * 1.5:
full_duration = get_waveform_filter_length_in_time(**nparams)
nparams['f_lower'] *= 0.99
if 't_obs_start' in nparams and \
full_duration >= nparams['t_obs_start']:
break
if 'f_ref' not in nparams:
nparams['f_ref'] = params['f_lower']
# factor to ensure the vectors are all large enough. We don't need to
# completely trust our duration estimator in this case, at a small
# increase in computational cost
fudge_duration = (max(0, full_duration) + .1 + rwrap) * 1.5
fsamples = int(fudge_duration / nparams['delta_t'])
N = pnutils.nearest_larger_binary_number(fsamples)
fudge_duration = N * nparams['delta_t']
nparams['delta_f'] = 1.0 / fudge_duration
tsize = int(1.0 / nparams['delta_t'] / nparams['delta_f'])
fsize = tsize // 2 + 1
if nparams['approximant'] not in fd_det:
hp, hc = get_fd_waveform(**nparams)
# Resize to the right sample rate
hp.resize(fsize)
hc.resize(fsize)
# avoid wraparound
hp = hp.cyclic_time_shift(-rwrap)
hc = hc.cyclic_time_shift(-rwrap)
hp = wfutils.fd_to_td(hp, delta_t=params['delta_t'],
left_window=(nparams['f_lower'],
params['f_lower']))
hc = wfutils.fd_to_td(hc, delta_t=params['delta_t'],
left_window=(nparams['f_lower'],
params['f_lower']))
return hp, hc
else:
wfs = get_fd_det_waveform(**nparams)
for ifo in wfs.keys():
wfs[ifo].resize(fsize)
# avoid wraparound
wfs[ifo] = wfs[ifo].cyclic_time_shift(-rwrap)
wfs[ifo] = wfutils.fd_to_td(wfs[ifo], delta_t=kwds['delta_t'],
left_window=(nparams['f_lower'],
kwds['f_lower']))
return wfs
def get_td_waveform_from_fd(rwrap=None, **params):
""" Return time domain version of fourier domain approximant.
This returns a time domain version of a fourier domain approximant, with
padding and tapering at the start of the waveform.
Parameters
----------
rwrap: float
Cyclic time shift parameter in seconds. A fudge factor to ensure
that the entire time series is contiguous in the array and not
wrapped around the end.
params: dict
The parameters defining the waveform to generator.
See `get_fd_waveform`.
Returns
-------
hp: pycbc.types.TimeSeries
Plus polarization time series
hc: pycbc.types.TimeSeries
Cross polarization time series
"""
return _base_get_td_waveform_from_fd(None, rwrap, **params)
def get_td_det_waveform_from_fd_det(template=None, rwrap=None, **params):
""" Return time domain version of fourier domain approximant which
includes detector response, with padding and tapering at the start
of the waveform.
Parameters
----------
rwrap: float
Cyclic time shift parameter in seconds. A fudge factor to ensure
that the entire time series is contiguous in the array and not
wrapped around the end.
params: dict
The parameters defining the waveform to generator.
See `get_fd_det_waveform`.
Returns
-------
dict
The detector-frame waveform (with detector response) in time
domain. Keys are requested data channels.
"""
return _base_get_td_waveform_from_fd(template, rwrap, **params)
get_td_det_waveform_from_fd_det.__doc__ = \
get_td_det_waveform_from_fd_det.__doc__.format(
params=parameters.td_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True,
**params):
""" Return a fourier domain waveform approximant, using interpolation
"""
def rulog2(val):
return 2.0 ** numpy.ceil(numpy.log2(float(val)))
orig_approx = params['approximant']
params['approximant'] = params['approximant'].replace('_INTERP', '')
df = params['delta_f']
if 'duration' not in params:
duration = get_waveform_filter_length_in_time(**params)
elif params['duration'] > 0:
duration = params['duration']
else:
err_msg = "Waveform duration must be greater than 0."
raise ValueError(err_msg)
#FIXME We should try to get this length directly somehow
# I think this number should be conservative
ringdown_padding = 0.5
df_min = 1.0 / rulog2(duration + ringdown_padding)
# FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop
# off the inspiral when using ringdown_padding - 0.5.
# Also, if ringdown_padding is set to a very small
# value we can see cases where the ringdown is chopped.
if df_min > 0.5:
df_min = 0.5
params['delta_f'] = df_min
hp, hc = get_fd_waveform(**params)
hp = hp.astype(dtype)
if return_hc:
hc = hc.astype(dtype)
else:
hc = None
f_end = get_waveform_end_frequency(**params)
if f_end is None:
f_end = (len(hp) - 1) * hp.delta_f
if 'f_final' in params and params['f_final'] > 0:
f_end_params = params['f_final']
if f_end is not None:
f_end = min(f_end_params, f_end)
n_min = int(rulog2(f_end / df_min)) + 1
if n_min < len(hp):
hp = hp[:n_min]
if hc is not None:
hc = hc[:n_min]
offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f)
hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left')
if hc is not None:
hc = interpolate_complex_frequency(hc, df, zeros_offset=offset,
side='left')
params['approximant'] = orig_approx
return hp, hc
def get_sgburst_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain
sine-Gaussian burst waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
approximant : string
A string that indicates the chosen approximant. See `td_approximants`
for available options.
q : float
The quality factor of a sine-Gaussian burst
frequency : float
The centre-frequency of a sine-Gaussian burst
delta_t : float
The time step used to generate the waveform
hrss : float
The strain rss
amplitude: float
The strain amplitude
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props_sgburst(template,**kwargs)
for arg in sgburst_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg))
return _lalsim_sgburst_waveform(**input_params)
# Waveform filter routines ###################################################
# Organize Filter Generators
_inspiral_fd_filters = {}
_cuda_fd_filters = {}
_cuda_fd_filters['SPAtmplt'] = spa_tmplt
_inspiral_fd_filters['SPAtmplt'] = spa_tmplt
filter_wav = _scheme.ChooseBySchemeDict()
filter_wav.update( {_scheme.CPUScheme:_inspiral_fd_filters,
_scheme.CUDAScheme:_cuda_fd_filters,
} )
# Organize functions for function conditioning/precalculated values
_filter_norms = {}
_filter_ends = {}
_filter_preconditions = {}
_template_amplitude_norms = {}
_filter_time_lengths = {}
def seobnrv2_final_frequency(**kwds):
return pnutils.get_final_freq("SEOBNRv2", kwds['mass1'], kwds['mass2'],
kwds['spin1z'], kwds['spin2z'])
def get_imr_length(approx, **kwds):
"""Call through to pnutils to obtain IMR waveform durations
"""
m1 = float(kwds['mass1'])
m2 = float(kwds['mass2'])
s1z = float(kwds['spin1z'])
s2z = float(kwds['spin2z'])
f_low = float(kwds['f_lower'])
# 10% margin of error is incorporated in the pnutils function
return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
def seobnrv2_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv2* waveform duration.
"""
return get_imr_length("SEOBNRv2", **kwds)
def seobnrv4_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv4* waveform duration.
"""
return get_imr_length("SEOBNRv4", **kwds)
def imrphenomd_length_in_time(**kwds):
"""Stub for holding the calculation of IMRPhenomD waveform duration.
"""
return get_imr_length("IMRPhenomD", **kwds)
def imrphenomhm_length_in_time(**kwargs):
"""Estimates the duration of IMRPhenom waveforms that include higher modes.
"""
# Default maximum node number for IMRPhenomHM is 4
# The relevant lower order approximant here is IMRPhenomD
return get_hm_length_in_time("IMRPhenomD", 4, **kwargs)
def seobnrv4hm_length_in_time(**kwargs):
""" Estimates the duration of SEOBNRv4HM waveforms that include higher modes.
"""
# Default maximum node number for SEOBNRv4HM is 5
# The relevant lower order approximant here is SEOBNRv4
return get_hm_length_in_time('SEOBNRv4', 5, **kwargs)
def get_hm_length_in_time(lor_approx, maxm_default, **kwargs):
kwargs = parse_mode_array(kwargs)
if 'mode_array' in kwargs and kwargs['mode_array'] is not None:
maxm = max(m for _, m in kwargs['mode_array'])
else:
maxm = maxm_default
try:
flow = kwargs['f_lower']
except KeyError:
raise ValueError("must provide a f_lower")
kwargs['f_lower'] = flow * 2./maxm
return get_imr_length(lor_approx, **kwargs)
_filter_norms["SPAtmplt"] = spa_tmplt_norm
_filter_preconditions["SPAtmplt"] = spa_tmplt_precondition
_filter_ends["SPAtmplt"] = spa_tmplt_end
_filter_ends["TaylorF2"] = spa_tmplt_end
#_filter_ends["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_final_frequency
# PhenomD returns higher frequencies than this, so commenting this out for now
#_filter_ends["IMRPhenomC"] = seobnrv2_final_frequency
#_filter_ends["IMRPhenomD"] = seobnrv2_final_frequency
_template_amplitude_norms["SPAtmplt"] = spa_amplitude_factor
_filter_time_lengths["SPAtmplt"] = spa_length_in_time
_filter_time_lengths["TaylorF2"] = spa_length_in_time
_filter_time_lengths["SpinTaylorT5"] = spa_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2HM_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv4_ROM"] = seobnrv4_length_in_time
_filter_time_lengths["SEOBNRv4HM_ROM"] = seobnrv4hm_length_in_time
_filter_time_lengths["SEOBNRv4"] = seobnrv4_length_in_time
_filter_time_lengths["SEOBNRv4P"] = seobnrv4_length_in_time
_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD_NRTidal"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2_NRTidal"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomHM"] = imrphenomhm_length_in_time
_filter_time_lengths["IMRPhenomPv3HM"] = imrphenomhm_length_in_time
_filter_time_lengths["IMRPhenomXHM"] = imrphenomhm_length_in_time
_filter_time_lengths["IMRPhenomXPHM"] = imrphenomhm_length_in_time
_filter_time_lengths["SpinTaylorF2"] = spa_length_in_time
_filter_time_lengths["TaylorF2NL"] = spa_length_in_time
_filter_time_lengths["PreTaylorF2"] = spa_length_in_time
# Also add generators for switching between approximants
apx_name = "SpinTaylorF2_SWAPPER"
cpu_fd[apx_name] = _spintaylor_aligned_prec_swapper
_filter_time_lengths[apx_name] = _filter_time_lengths["SpinTaylorF2"]
from . nltides import nonlinear_tidal_spa
cpu_fd["TaylorF2NL"] = nonlinear_tidal_spa
from .premerger import premerger_taylorf2
cpu_fd['PreTaylorF2'] = premerger_taylorf2
from .multiband import multiband_fd_waveform
cpu_fd['multiband'] = multiband_fd_waveform
# Load external waveforms #####################################################
if 'PYCBC_WAVEFORM' in os.environ:
mods = os.environ['PYCBC_WAVEFORM'].split(':')
for mod in mods:
mhandle = __import__(mod, fromlist=[''])
mhandle.add_me(cpu_fd=cpu_fd,
cpu_td=cpu_td,
filter_time_lengths=_filter_time_lengths)
def td_fd_waveform_transform(approximant):
'''If the waveform approximant is in time domain, make a frequency domain
version using 'get_fd_waveform_from_td'; If the waveform approximant is in
frequency domain, do interpolation for waveforms with a time length estimator,
and make a time domain version using 'get_td_waveform_from_fd'
Parameters
----------
approximant: string
The name of a waveform approximant.
'''
fd_apx = list(cpu_fd.keys())
td_apx = list(cpu_td.keys())
if (approximant in td_apx) and (approximant not in fd_apx):
# We can make a fd version of td approximants
cpu_fd[approximant] = get_fd_waveform_from_td
if approximant in fd_apx:
# We can do interpolation for waveforms that have a time length
apx_int = approximant + '_INTERP'
cpu_fd[apx_int] = get_interpolated_fd_waveform
_filter_time_lengths[apx_int] = _filter_time_lengths[approximant]
# We can also make a td version of this
# This will override any existing approximants with the same name
# (ex. IMRPhenomXX)
cpu_td[approximant] = get_td_waveform_from_fd
for apx in copy.copy(_filter_time_lengths):
td_fd_waveform_transform(apx)
td_wav = _scheme.ChooseBySchemeDict()
fd_wav = _scheme.ChooseBySchemeDict()
td_wav.update({_scheme.CPUScheme:cpu_td,_scheme.CUDAScheme:cuda_td})
fd_wav.update({_scheme.CPUScheme:cpu_fd,_scheme.CUDAScheme:cuda_fd})
sgburst_wav = {_scheme.CPUScheme:cpu_sgburst}
def get_waveform_filter(out, template=None, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant
"""
n = len(out)
input_params = props(template, **kwargs)
if input_params['approximant'] in filter_approximants(_scheme.mgr.state):
wav_gen = filter_wav[type(_scheme.mgr.state)]
htilde = wav_gen[input_params['approximant']](out=out, **input_params)
htilde.resize(n)
htilde.chirp_length = get_waveform_filter_length_in_time(**input_params)
htilde.length_in_time = htilde.chirp_length
return htilde
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
duration = get_waveform_filter_length_in_time(**input_params)
hp, _ = wav_gen[input_params['approximant']](duration=duration,
return_hc=False, **input_params)
hp.resize(n)
out[0:len(hp)] = hp[:]
hp.data = out
hp.length_in_time = hp.chirp_length = duration
return hp
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, _ = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if 'taper' in input_params.keys() and \
input_params['taper'] is not None:
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
return td_waveform_to_fd_waveform(hp, out=out)
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
if len(waveform) > N:
err_msg = "The time domain template is longer than the intended "
err_msg += "duration in the frequency domain. This situation is "
err_msg += "not supported in this function. Please shorten the "
err_msg += "waveform appropriately before calling this function or "
err_msg += "increase the allowed waveform length. "
err_msg += "Waveform length (in samples): {}".format(len(waveform))
err_msg += ". Intended length: {}.".format(N)
raise ValueError(err_msg)
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant.
Unlike get_waveform_filter this function returns both h_plus and h_cross
components of the waveform, which are needed for searches where h_plus
and h_cross are not related by a simple phase shift.
"""
n = len(outplus)
# If we don't have an inclination column alpha3 might be used
if not hasattr(template, 'inclination') and 'inclination' not in kwargs:
if hasattr(template, 'alpha3'):
kwargs['inclination'] = template.alpha3
input_params = props(template, **kwargs)
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
hp.resize(n)
hc.resize(n)
outplus[0:len(hp)] = hp[:]
hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False)
outcross[0:len(hc)] = hc[:]
hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False)
hp.chirp_length = get_waveform_filter_length_in_time(**input_params)
hp.length_in_time = hp.chirp_length
hc.chirp_length = hp.chirp_length
hc.length_in_time = hp.length_in_time
return hp, hc
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
# N: number of time samples required
N = (n-1)*2
delta_f = 1.0 / (N * input_params['delta_t'])
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if 'taper' in input_params.keys() and \
input_params['taper'] is not None:
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
hc = wfutils.taper_timeseries(hc, input_params['taper'],
return_lal=False)
# total duration of the waveform
tmplt_length = len(hp) * hp.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS
hp.resize(N)
hc.resize(N)
k_zero = int(hp.start_time / hp.delta_t)
hp.roll(k_zero)
hc.roll(k_zero)
hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False)
hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False)
fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde)
fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde)
hp_tilde.length_in_time = tmplt_length
hp_tilde.chirp_length = tChirp
hc_tilde.length_in_time = tmplt_length
hc_tilde.chirp_length = tChirp
return hp_tilde, hc_tilde
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def waveform_norm_exists(approximant):
if approximant in _filter_norms:
return True
else:
return False
def get_template_amplitude_norm(template=None, **kwargs):
""" Return additional constant template normalization. This only affects
the effective distance calculation. Returns None for all templates with a
physically meaningful amplitude.
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _template_amplitude_norms:
return _template_amplitude_norms[approximant](**input_params)
else:
return None
def get_waveform_filter_precondition(approximant, length, delta_f):
"""Return the data preconditioning factor for this approximant.
"""
if approximant in _filter_preconditions:
return _filter_preconditions[approximant](length, delta_f)
else:
return None
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None
def get_waveform_end_frequency(template=None, **kwargs):
"""Return the stop frequency of a template
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs):
"""For filter templates, return the length in time of the template.
"""
kwargs = props(template, **kwargs)
if approximant in _filter_time_lengths:
return _filter_time_lengths[approximant](**kwargs)
else:
return None
__all__ = ["get_td_waveform", "get_td_det_waveform_from_fd_det",
"get_fd_waveform", "get_fd_waveform_sequence",
"get_fd_det_waveform", "get_fd_det_waveform_sequence",
"get_fd_waveform_from_td",
"print_td_approximants", "print_fd_approximants",
"td_approximants", "fd_approximants",
"get_waveform_filter", "filter_approximants",
"get_waveform_filter_norm", "get_waveform_end_frequency",
"waveform_norm_exists", "get_template_amplitude_norm",
"get_waveform_filter_length_in_time", "get_sgburst_waveform",
"print_sgburst_approximants", "sgburst_approximants",
"td_waveform_to_fd_waveform", "get_two_pol_waveform_filter",
"NoWaveformError", "FailedWaveformError", "get_td_waveform_from_fd",
'cpu_fd', 'cpu_td', 'fd_sequence', 'fd_det_sequence', 'fd_det',
'_filter_time_lengths']
| 57,449
| 39.860597
| 107
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/pycbc_phenomC_tmplt.py
|
# Copyright (C) 2012 Prayush Kumar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import lal
import numpy
from numpy import sqrt, log, float128
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
from pycbc.types import FrequencySeries, zeros, Array, complex64
preamble = """
#include <lal/LALConstants.h>
"""
phenomC_text = """
/* ********* Main paper : Phys Rev D82, 064016 (2010) ********* */
const double f = (double) (i + kmin ) * delta_f;
const double fd = (double) m_sec * f;
const double v = (double) cbrt(piM*f);
const double v2 = v * v;
const double v3 = v * v * v;
const double v4 = v2 * v2;
const double v5 = v2 * v3;
const double v6 = v3 * v3;
const double v7 = v3 * v4;
const double w = (double) cbrt( m_sec * f );
const double w3 = (double) w * w * w;
/* ******************************************************* */
/* *********************** Phasing *********************** */
/* This is defined in Eq 5.1 - 5.9, 3.13 of the main paper */
/* ******************************************************* */
double phSPA = 1. + pfa2 * v2 + pfa3 * v3 + pfa4 * v4 +
(1. + log(v3)) * pfa5 * v5 + (pfa6 + pfa6log * log(v3))*v6 +
pfa7 * v7;
phSPA *= (pfaN / v5);
phSPA -= (LAL_PI/4.0);
double phPM = (a1/(w3 * w * w)) + (a2/w3) + (a3/w) + a4 + (a5 * w * w) +(a6 * w3);
phPM /= eta;
double phRD = b1 + b2*fd;
double wPlusf1 = 0.5*(1. + tanh( (4*(fd - Mf1)/d1) ));
double wMinusf1 = 0.5*(1. - tanh( (4*(fd - Mf1)/d1) ));
double wPlusf2 = 0.5*(1. + tanh( (4*(fd - Mf2)/d2) ));
double wMinusf2 = 0.5*(1. - tanh( (4*(fd - Mf2)/d2) ));
double phasing = (phSPA * ((double) wMinusf1)) + (phPM * ((double) wPlusf1 * wMinusf2)) +
(phRD * ((double) wPlusf2));
/* ******************************************************* */
/* ********************** Amplitude **************** */
/* *** This is defined in Eq 5.11 - 5.13, 3.10, 3.6 ****** */
/* ******************************************************* */
double xdot = 1. + xdota2 * v2 + xdota3 * v3 + xdota4 * v4 + xdota5 * v5 +
(xdota6 + xdota6log * log(v2)) * v6 + xdota7 * v7;
xdot *= (xdotaN * v5 * v5);
double omgdot = 0.0, ampfac = 0.0;
double ampSPA = 0.0, ampSPAre = 0.0, ampSPAim = 0.0;
/* If xdot becomes negative, take ampSPA = 0.0 */
/* This is valid because it becomes negative much after ISCO */
if( xdot > 0.0 )
{
omgdot = 1.5 * v * xdot;
ampfac = sqrt( LAL_PI / omgdot );
ampSPAre = ampfac * AN * v2 * (1. + A2 * v2 + A3 * v3 + A4 * v4 +
A5 * v5 + (A6 + A6log * log(v2)) * v6);
ampSPAim = ampfac * AN * v2 * (A5imag * v5 + A6imag * v6);
ampSPA = sqrt( ampSPAre * ampSPAre + ampSPAim * ampSPAim );
}
double ampPM = ampSPA + (g1 * pow(fd, 5./6.));
const double sig = Mfrd * del2 / Q;
double sig2 = sig * sig;
double L = sig2 / ((fd - Mfrd) * (fd - Mfrd) + sig2/4.);
double ampRD = del1 * L * pow( fd, -7./6.);
double wPlusf0 = 0.5*(1. + tanh( (4*(fd - Mf0)/d0) ));
double wMinusf0 = 0.5*(1. - tanh( (4*(fd - Mf0)/d0) ));
double amplitude = (ampPM * ((double) wMinusf0)) + (ampRD * ((double) wPlusf0));
amplitude /= distance;
/* ************** htilde **************** */
htilde[i]._M_re = amplitude * cos( phasing );
htilde[i]._M_im = -1.0 * amplitude * sin( phasing );
"""
phenomC_kernel = ElementwiseKernel("""pycuda::complex<double> *htilde, int kmin, double delta_f,
double eta, double Xi, double distance,
double m_sec, double piM, double Mfrd,
double pfaN, double pfa2, double pfa3, double pfa4,
double pfa5, double pfa6, double pfa6log, double pfa7,
double a1, double a2, double a3, double a4,
double a5, double a6, double b1, double b2,
double Mf1, double Mf2, double Mf0,
double d1, double d2, double d0,
double xdota2, double xdota3, double xdota4,
double xdota5, double xdota6, double xdota6log,
double xdota7, double xdotaN, double AN,
double A2, double A3, double A4, double A5,
double A5imag, double A6, double A6log, double A6imag,
double g1, double del1, double del2, double Q""",
phenomC_text, "phenomC_kernel",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def FinalSpin( Xi, eta ):
"""Computes the spin of the final BH that gets formed after merger. This is done usingn Eq 5-6 of arXiv:0710.3345"""
s4 = -0.129
s5 = -0.384
t0 = -2.686
t2 = -3.454
t3 = 2.353
etaXi = eta * Xi
eta2 = eta*eta
finspin = (Xi + s4*Xi*etaXi + s5*etaXi*eta + t0*etaXi + 2.*(3.**0.5)*eta + t2*eta2 + t3*eta2*eta)
if finspin > 1.0:
raise ValueError("Value of final spin > 1.0. Aborting")
else:
return finspin
def fRD( a, M):
"""Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper"""
f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292)
return f
def Qa( a ):
"""Calculate the quality factor of ring-down, using Eq 5.6 of Main paper"""
return (0.7 + 1.4187*(1.0-a)**-0.4990)
#Functions to calculate the Tanh window, defined in Eq 5.8 of the main paper
def imrphenomc_tmplt(**kwds):
""" Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude
Main Paper: arXiv:1005.3306
"""
# Pull out the input arguments
f_min = float128(kwds['f_lower'])
f_max = float128(kwds['f_final'])
delta_f = float128(kwds['delta_f'])
distance = float128(kwds['distance'])
mass1 = float128(kwds['mass1'])
mass2 = float128(kwds['mass2'])
spin1z = float128(kwds['spin1z'])
spin2z = float128(kwds['spin2z'])
if 'out' in kwds:
out = kwds['out']
else:
out = None
# Calculate binary parameters
M = mass1 + mass2
eta = mass1 * mass2 / (M * M)
Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M)
Xisum = 2.*Xi
Xiprod = Xi*Xi
Xi2 = Xi*Xi
m_sec = M * lal.MTSUN_SI;
piM = lal.PI * m_sec;
## The units of distance given as input is taken to pe Mpc. Converting to SI
distance *= (1.0e6 * lal.PC_SI / (2. * sqrt(5. / (64.*lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI))
# Check if the value of f_max is correctly given, else replace with the fCut
# used in the PhenomB code in lalsimulation. The various coefficients come
# from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and
# Table I of http://arxiv.org/pdf/0712.0343
if not f_max:
f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM
# Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main
# paper.
z101 = -2.417e-03
z102 = -1.093e-03
z111 = -1.917e-02
z110 = 7.267e-02
z120 = -2.504e-01
z201 = 5.962e-01
z202 = -5.600e-02
z211 = 1.520e-01
z210 = -2.970e+00
z220 = 1.312e+01
z301 = -3.283e+01
z302 = 8.859e+00
z311 = 2.931e+01
z310 = 7.954e+01
z320 = -4.349e+02
z401 = 1.619e+02
z402 = -4.702e+01
z411 = -1.751e+02
z410 = -3.225e+02
z420 = 1.587e+03
z501 = -6.320e+02
z502 = 2.463e+02
z511 = 1.048e+03
z510 = 3.355e+02
z520 = -5.115e+03
z601 = -4.809e+01
z602 = -3.643e+02
z611 = -5.215e+02
z610 = 1.870e+03
z620 = 7.354e+02
z701 = 4.149e+00
z702 = -4.070e+00
z711 = -8.752e+01
z710 = -4.897e+01
z720 = 6.665e+02
z801 = -5.472e-02
z802 = 2.094e-02
z811 = 3.554e-01
z810 = 1.151e-01
z820 = 9.640e-01
z901 = -1.235e+00
z902 = 3.423e-01
z911 = 6.062e+00
z910 = 5.949e+00
z920 = -1.069e+01
eta2 = eta*eta
Xi2 = Xiprod
# Calculate alphas, gamma, deltas from Table II and Eq 5.14 of Main paper
a1 = z101 * Xi + z102 * Xi2 + z111 * eta * Xi + z110 * eta + z120 * eta2
a2 = z201 * Xi + z202 * Xi2 + z211 * eta * Xi + z210 * eta + z220 * eta2
a3 = z301 * Xi + z302 * Xi2 + z311 * eta * Xi + z310 * eta + z320 * eta2
a4 = z401 * Xi + z402 * Xi2 + z411 * eta * Xi + z410 * eta + z420 * eta2
a5 = z501 * Xi + z502 * Xi2 + z511 * eta * Xi + z510 * eta + z520 * eta2
a6 = z601 * Xi + z602 * Xi2 + z611 * eta * Xi + z610 * eta + z620 * eta2
g1 = z701 * Xi + z702 * Xi2 + z711 * eta * Xi + z710 * eta + z720 * eta2
del1 = z801 * Xi + z802 * Xi2 + z811 * eta * Xi + z810 * eta + z820 * eta2
del2 = z901 * Xi + z902 * Xi2 + z911 * eta * Xi + z910 * eta + z920 * eta2
# Get the spin of the final BH
afin = FinalSpin( Xi, eta )
Q = Qa( abs(afin) )
# Get the fRD
frd = fRD( abs(afin), M)
Mfrd = frd * m_sec
# Define the frequencies where SPA->PM->RD
f1 = 0.1 * frd
Mf1 = m_sec * f1
f2 = frd
Mf2 = m_sec * f2
d1 = 0.005
d2 = 0.005
f0 = 0.98 * frd
Mf0 = m_sec * f0
d0 = 0.015
# Now use this frequency for calculation of betas
# calculate beta1 and beta2, that appear in Eq 5.7 in the main paper.
b2 = ((-5./3.)* a1 * pow(Mfrd,(-8./3.)) - a2/(Mfrd*Mfrd) - \
(a3/3.)*pow(Mfrd,(-4./3.)) + (2./3.)* a5 * pow(Mfrd,(-1./3.)) + a6)/eta
psiPMrd = (a1 * pow(Mfrd,(-5./3.)) + a2/Mfrd + a3 * pow(Mfrd,(-1./3.)) + \
a4 + a5 * pow(Mfrd,(2./3.)) + a6 * Mfrd)/eta
b1 = psiPMrd - (b2 * Mfrd)
### Calculate the PN coefficients, Eq A3 - A5 of main paper ###
pfaN = 3.0/(128.0 * eta)
pfa2 = (3715./756.) + (55.*eta/9.0)
pfa3 = -16.0*lal.PI + (113./3.)*Xi - 38.*eta*Xisum/3.
pfa4 = (152.93365/5.08032) - 50.*Xi2 + eta*(271.45/5.04 + 1.25*Xiprod) + \
3085.*eta2/72.
pfa5 = lal.PI*(386.45/7.56 - 65.*eta/9.) - \
Xi*(735.505/2.268 + 130.*eta/9.) + Xisum*(1285.0*eta/8.1 + 170.*eta2/9.) - \
10.*Xi2*Xi/3. + 10.*eta*Xi*Xiprod
pfa6 = 11583.231236531/4.694215680 - 640.0*lal.PI*lal.PI/3. - \
6848.0*lal.GAMMA/21. - 684.8*log(64.)/6.3 + \
eta*(2255.*lal.PI*lal.PI/12. - 15737.765635/3.048192) + \
76.055*eta2/1.728 - (127.825*eta2*eta/1.296) + \
2920.*lal.PI*Xi/3. - (175. - 1490.*eta)*Xi2/3. - \
(1120.*lal.PI/3. - 1085.*Xi/3.)*eta*Xisum + \
(269.45*eta/3.36 - 2365.*eta2/6.)*Xiprod
pfa6log = -6848./63.
pfa7 = lal.PI*(770.96675/2.54016 + 378.515*eta/1.512 - 740.45*eta2/7.56) - \
Xi*(20373.952415/3.048192 + 1509.35*eta/2.24 - 5786.95*eta2/4.32) + \
Xisum*(4862.041225*eta/1.524096 + 1189.775*eta2/1.008 - 717.05*eta2*eta/2.16 - 830.*eta*Xi2/3. + 35.*eta2*Xiprod/3.) - \
560.*lal.PI*Xi2 + 20.*lal.PI*eta*Xiprod + \
Xi2*Xi*(945.55/1.68 - 85.*eta) + Xi*Xiprod*(396.65*eta/1.68 + 255.*eta2)
xdotaN = 64.*eta/5.
xdota2 = -7.43/3.36 - 11.*eta/4.
xdota3 = 4.*lal.PI - 11.3*Xi/1.2 + 19.*eta*Xisum/6.
xdota4 = 3.4103/1.8144 + 5*Xi2 + eta*(13.661/2.016 - Xiprod/8.) + 5.9*eta2/1.8
xdota5 = -lal.PI*(41.59/6.72 + 189.*eta/8.) - Xi*(31.571/1.008 - 116.5*eta/2.4) + \
Xisum*(21.863*eta/1.008 - 79.*eta2/6.) - 3*Xi*Xi2/4. + \
9.*eta*Xi*Xiprod/4.
xdota6 = 164.47322263/1.39708800 - 17.12*lal.GAMMA/1.05 + \
16.*lal.PI*lal.PI/3 - 8.56*log(16.)/1.05 + \
eta*(45.1*lal.PI*lal.PI/4.8 - 561.98689/2.17728) + \
5.41*eta2/8.96 - 5.605*eta*eta2/2.592 - 80.*lal.PI*Xi/3. + \
eta*Xisum*(20.*lal.PI/3. - 113.5*Xi/3.6) + \
Xi2*(64.153/1.008 - 45.7*eta/3.6) - \
Xiprod*(7.87*eta/1.44 - 30.37*eta2/1.44)
xdota6log = -856./105.
xdota7 = -lal.PI*(4.415/4.032 - 358.675*eta/6.048 - 91.495*eta2/1.512) - \
Xi*(252.9407/2.7216 - 845.827*eta/6.048 + 415.51*eta2/8.64) + \
Xisum*(158.0239*eta/5.4432 - 451.597*eta2/6.048 + 20.45*eta2*eta/4.32 + 107.*eta*Xi2/6. - 5.*eta2*Xiprod/24.) + \
12.*lal.PI*Xi2 - Xi2*Xi*(150.5/2.4 + eta/8.) + \
Xi*Xiprod*(10.1*eta/2.4 + 3.*eta2/8.)
AN = 8.*eta*sqrt(lal.PI/5.)
A2 = (-107. + 55.*eta)/42.
A3 = 2.*lal.PI - 4.*Xi/3. + 2.*eta*Xisum/3.
A4 = -2.173/1.512 - eta*(10.69/2.16 - 2.*Xiprod) + 2.047*eta2/1.512
A5 = -10.7*lal.PI/2.1 + eta*(3.4*lal.PI/2.1)
A5imag = -24.*eta
A6 = 270.27409/6.46800 - 8.56*lal.GAMMA/1.05 + \
2.*lal.PI*lal.PI/3. + \
eta*(4.1*lal.PI*lal.PI/9.6 - 27.8185/3.3264) - \
20.261*eta2/2.772 + 11.4635*eta*eta2/9.9792 - \
4.28*log(16.)/1.05
A6log = -428./105.
A6imag = 4.28*lal.PI/1.05
### Define other parameters needed by waveform generation ###
kmin = int(f_min / delta_f)
kmax = int(f_max / delta_f)
n = kmax + 1;
if not out:
htilde = FrequencySeries(zeros(n,dtype=numpy.complex128), delta_f=delta_f, copy=False)
else:
if type(out) is not Array:
raise TypeError("Output must be an instance of Array")
if len(out) < kmax:
raise TypeError("Output array is too small")
if out.dtype != complex64:
raise TypeError("Output array is the wrong dtype")
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
phenomC_kernel(htilde.data[kmin:kmax], kmin, delta_f, eta, Xi, distance,
m_sec, piM, Mfrd,
pfaN, pfa2, pfa3, pfa4, pfa5, pfa6, pfa6log, pfa7,
a1, a2, a3, a4, a5, a6, b1, b2,
Mf1, Mf2, Mf0, d1, d2, d0,
xdota2, xdota3, xdota4, xdota5, xdota6, xdota6log,
xdota7, xdotaN, AN, A2, A3, A4, A5,
A5imag, A6, A6log, A6imag,
g1, del1, del2, Q )
hp = htilde
hc = htilde * 1j
return hp, hc
| 15,018
| 37.022785
| 128
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/supernovae.py
|
"""Generate core-collapse supernovae waveform for core bounce and
subsequent postbounce oscillations.
"""
import numpy
import h5py
from pycbc.types import TimeSeries
_pc_dict = {}
def get_corecollapse_bounce(**kwargs):
""" Generates core bounce and postbounce waveform by using principal
component basis vectors from a .hdf file. The waveform parameters are the
coefficients of the principal components and the distance. The number of
principal components used can also be varied.
"""
try:
principal_components = _pc_dict['principal_components']
except KeyError:
with h5py.File(kwargs['principal_components_file'], 'r') as pc_file:
principal_components = numpy.array(pc_file['principal_components'])
_pc_dict['principal_components'] = principal_components
if 'coefficients_array' in kwargs:
coefficients_array = kwargs['coefficients_array']
else:
coeffs_keys = [x for x in kwargs if x.startswith('coeff_')]
coeffs_keys = numpy.sort(numpy.array(coeffs_keys))
coefficients_array = numpy.array([kwargs[x] for x in coeffs_keys])
no_of_pcs = int(kwargs['no_of_pcs'])
coefficients_array = coefficients_array[:no_of_pcs]
principal_components = principal_components[:no_of_pcs]
pc_len = len(principal_components)
assert len(coefficients_array) == pc_len
distance = kwargs['distance']
mpc_conversion = 3.08567758128e+22
distance *= mpc_conversion
strain = numpy.dot(coefficients_array, principal_components) / distance
delta_t = kwargs['delta_t']
outhp = TimeSeries(strain, delta_t=delta_t)
outhc = TimeSeries(numpy.zeros(len(strain)), delta_t=delta_t)
return outhp, outhc
# Approximant names ###########################################################
supernovae_td_approximants = {'CoreCollapseBounce': get_corecollapse_bounce}
| 1,893
| 34.735849
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/spa_tmplt_cuda.py
|
#
# Apapted from code in LALSimInpspiralTaylorF2.c
#
# Copyright (C) 2007 Jolien Creighton, B.S. Sathyaprakash, Thomas Cokelaer
# Copyright (C) 2012 Leo Singer, Alex Nitz
# Adapted from code found in:
# - LALSimInspiralTaylorF2.c
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
preamble = """
#include <lal/LALConstants.h>
"""
taylorf2_text = """
const float f = (i + kmin ) * delta_f;
const float amp2 = amp * __powf(f, -7.0/6.0);
const float v = __powf(piM*f, 1.0/3.0);
const float v2 = v * v;
const float v3 = v2 * v;
const float v4 = v2 * v2;
const float v5 = v2 * v3;
const float v6 = v3 * v3;
const float v7 = v3 * v4;
float phasing = 0.;
float log4 = 1.386294361;
float logv = __logf(v);
switch (phase_order)
{
case -1:
case 7:
phasing += pfa7 * v7;
case 6:
phasing += (pfa6 + pfl6 * (logv + log4) ) * v6;
case 5:
phasing += (pfa5 + pfl5 * (logv) ) * v5;
case 4:
phasing += pfa4 * v4;
case 3:
phasing += pfa3 * v3;
case 2:
phasing += pfa2 * v2;
case 0:
phasing += 1.;
break;
default:
break;
}
phasing *= pfaN / v5;
phasing -= LAL_PI_4;
phasing -= int(phasing / (LAL_TWOPI)) * LAL_TWOPI;
float pcos;
float psin;
__sincosf(phasing, &psin, &pcos);
htilde[i]._M_re = pcos * amp2;
htilde[i]._M_im = - psin * amp2;
"""
taylorf2_kernel = ElementwiseKernel("""pycuda::complex<float> *htilde, int kmin, int phase_order,
float delta_f, float piM, float pfaN,
float pfa2, float pfa3, float pfa4, float pfa5, float pfl5,
float pfa6, float pfl6, float pfa7, float amp""",
taylorf2_text, "SPAtmplt",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def spa_tmplt_engine(htilde, kmin, phase_order,
delta_f, piM, pfaN,
pfa2, pfa3, pfa4, pfa5, pfl5,
pfa6, pfl6, pfa7, amp_factor):
""" Calculate the spa tmplt phase
"""
taylorf2_kernel(htilde.data, kmin, phase_order,
delta_f, piM, pfaN,
pfa2, pfa3, pfa4, pfa5, pfl5,
pfa6, pfl6, pfa7, amp_factor)
| 3,243
| 32.443299
| 98
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/ringdown.py
|
# Copyright (C) 2016 Miriam Cabero Mueller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Generate ringdown templates in the time and frequency domain.
"""
import numpy, lal
try:
import pykerr
except ImportError:
pykerr = None
from pycbc.types import (TimeSeries, FrequencySeries, float64, complex128,
zeros)
from pycbc.waveform.waveform import get_obj_attrs
from pycbc.conversions import get_lm_f0tau_allmodes
qnm_required_args = ['f_0', 'tau', 'amp', 'phi']
mass_spin_required_args = ['final_mass','final_spin', 'lmns', 'inclination']
freqtau_required_args = ['lmns']
td_args = {'delta_t': None, 't_final': None, 'taper': False}
fd_args = {'t_0': 0, 'delta_f': None, 'f_lower': 0, 'f_final': None}
max_freq = 16384/2.
min_dt = 1. / (2 * max_freq)
pi = numpy.pi
two_pi = 2 * numpy.pi
pi_sq = numpy.pi * numpy.pi
# Input parameters ############################################################
def props(obj, required, domain_args, **kwargs):
""" Return a dictionary built from the combination of defaults, kwargs,
and the attributes of the given object.
"""
# Get the attributes of the template object
pr = get_obj_attrs(obj)
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = domain_args.copy()
input_params.update(pr)
input_params.update(kwargs)
# Check if the required arguments are given
for arg in required:
if arg not in input_params:
raise ValueError('Please provide ' + str(arg))
return input_params
def format_lmns(lmns):
"""Checks if the format of the parameter lmns is correct, returning the
appropriate format if not, and raise an error if nmodes=0.
The required format for the ringdown approximants is a list of lmn modes
as a single whitespace-separated string, with n the number
of overtones desired. Alternatively, a list object may be used,
containing individual strings as elements for the lmn modes.
For instance, lmns = '223 331' are the modes 220, 221, 222, and 330.
Giving lmns = ['223', '331'] is equivalent.
The ConfigParser of a workflow might convert that to a single string
(case 1 below) or a list with a single string (case 2), and this function
will return the appropriate list of strings. If a different format is
given, raise an error.
"""
# Catch case of lmns given as float (as int injection values are cast
# to float by pycbc_create_injections), cast to int, then string
if isinstance(lmns, float):
lmns = str(int(lmns))
# Case 1: the lmns are given as a string, e.g. '221 331'
if isinstance(lmns, str):
lmns = lmns.split(' ')
# Case 2: the lmns are given as strings in a list, e.g. ['221', '331']
elif isinstance(lmns, list):
pass
else:
raise ValueError('Format of parameter lmns not recognized. See '
'approximant documentation for more info.')
out = []
# Cycle over the lmns to ensure that we get back a list of strings that
# are three digits long, and that nmodes!=0
for lmn in lmns:
# The following line is to be used with Python3 if the lmns are stored
# as a list of strings in the HDF files and the workflow converts that
# to a string
# lmn = lmn.strip(" b'")
# Try to convert to int and then str, to ensure the right format
lmn = str(int(lmn))
if len(lmn) != 3:
raise ValueError('Format of parameter lmns not recognized. See '
'approximant documentation for more info.')
elif int(lmn[2]) == 0:
raise ValueError('Number of overtones (nmodes) must be greater '
'than zero in lmn={}.'.format(lmn))
out.append(lmn)
return out
def parse_mode(lmn):
"""Extracts overtones from an lmn.
"""
lm, nmodes = lmn[0:2], int(lmn[2])
overtones = []
for n in range(nmodes):
mode = lm + '{}'.format(n)
overtones.append(mode)
return overtones
def lm_amps_phases(**kwargs):
r"""Takes input_params and return dictionaries with amplitudes and phases
of each overtone of a specific lm mode, checking that all of them are
given. Will also look for dbetas and dphis. If ``(dphi|dbeta)`` (i.e.,
without a mode suffix) are provided, they will be used for all modes that
don't explicitly set a ``(dphi|dbeta){lmn}``.
"""
lmns = format_lmns(kwargs['lmns'])
amps = {}
phis = {}
dbetas = {}
dphis = {}
# reference mode
ref_amp = kwargs.pop('ref_amp', None)
if ref_amp is None:
# default to the 220 mode
ref_amp = 'amp220'
# check for reference dphi and dbeta
ref_dbeta = kwargs.pop('dbeta', 0.)
ref_dphi = kwargs.pop('dphi', 0.)
if isinstance(ref_amp, str) and ref_amp.startswith('amp'):
# assume a mode was provided; check if the mode exists
ref_mode = ref_amp.replace('amp', '')
try:
ref_amp = kwargs.pop(ref_amp)
amps[ref_mode] = ref_amp
except KeyError:
raise ValueError("Must provide an amplitude for the reference "
"mode {}".format(ref_amp))
else:
ref_mode = None
# Get amplitudes and phases of the modes
for lmn in lmns:
overtones = parse_mode(lmn)
for mode in overtones:
# skip the reference mode
if mode != ref_mode:
try:
amps[mode] = kwargs['amp' + mode] * ref_amp
except KeyError:
raise ValueError('amp{} is required'.format(mode))
try:
phis[mode] = kwargs['phi' + mode]
except KeyError:
raise ValueError('phi{} is required'.format(mode))
dphis[mode] = kwargs.pop('dphi'+mode, ref_dphi)
dbetas[mode] = kwargs.pop('dbeta'+mode, ref_dbeta)
return amps, phis, dbetas, dphis
def lm_freqs_taus(**kwargs):
"""Take input_params and return dictionaries with frequencies and damping
times of each overtone of a specific lm mode, checking that all of them
are given.
"""
lmns = format_lmns(kwargs['lmns'])
freqs, taus = {}, {}
for lmn in lmns:
overtones = parse_mode(lmn)
for mode in overtones:
try:
freqs[mode] = kwargs['f_' + mode]
except KeyError:
raise ValueError('f_{} is required'.format(mode))
try:
taus[mode] = kwargs['tau_' + mode]
except KeyError:
raise ValueError('tau_{} is required'.format(mode))
return freqs, taus
def lm_arbitrary_harmonics(**kwargs):
"""Take input_params and return dictionaries with arbitrary harmonics
for each mode.
"""
lmns = format_lmns(kwargs['lmns'])
pols = {}
polnms = {}
for lmn in lmns:
overtones = parse_mode(lmn)
for mode in overtones:
pols[mode] = kwargs.pop('pol{}'.format(mode), None)
polnms[mode] = kwargs.pop('polnm{}'.format(mode), None)
return pols, polnms
# Functions to obtain t_final, f_final and output vector ######################
def qnm_time_decay(tau, decay):
"""Return the time at which the amplitude of the
ringdown falls to decay of the peak amplitude.
Parameters
----------
tau : float
The damping time of the sinusoid.
decay : float
The fraction of the peak amplitude.
Returns
-------
t_decay : float
The time at which the amplitude of the time-domain
ringdown falls to decay of the peak amplitude.
"""
return -tau * numpy.log(decay)
def qnm_freq_decay(f_0, tau, decay):
"""Return the frequency at which the amplitude of the
ringdown falls to decay of the peak amplitude.
Parameters
----------
f_0 : float
The ringdown-frequency, which gives the peak amplitude.
tau : float
The damping time of the sinusoid.
decay : float
The fraction of the peak amplitude.
Returns
-------
f_decay : float
The frequency at which the amplitude of the frequency-domain
ringdown falls to decay of the peak amplitude.
"""
q_0 = pi * f_0 * tau
alpha = 1. / decay
alpha_sq = 1. / decay / decay
# Expression obtained analytically under the assumption
# that 1./alpha_sq, q_0^2 >> 1
q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0))/4.
return numpy.sqrt(q_sq) / pi / tau
def lm_tfinal(damping_times):
"""Return the maximum t_final of the modes given, with t_final the time
at which the amplitude falls to 1/1000 of the peak amplitude
"""
if isinstance(damping_times, dict):
t_max = {}
for lmn in damping_times.keys():
t_max[lmn] = qnm_time_decay(damping_times[lmn], 1./1000)
t_final = max(t_max.values())
else:
t_final = qnm_time_decay(damping_times, 1./1000)
return t_final
def lm_deltat(freqs, damping_times):
"""Return the minimum delta_t of all the modes given, with delta_t given by
the inverse of the frequency at which the amplitude of the ringdown falls
to 1/1000 of the peak amplitude.
"""
if isinstance(freqs, dict) and isinstance(damping_times, dict):
dt = {}
for lmn in freqs.keys():
dt[lmn] = 1. / qnm_freq_decay(freqs[lmn],
damping_times[lmn], 1./1000)
delta_t = min(dt.values())
elif isinstance(freqs, dict) and not isinstance(damping_times, dict):
raise ValueError('Missing damping times.')
elif isinstance(damping_times, dict) and not isinstance(freqs, dict):
raise ValueError('Missing frequencies.')
else:
delta_t = 1. / qnm_freq_decay(freqs, damping_times, 1./1000)
if delta_t < min_dt:
delta_t = min_dt
return delta_t
def lm_ffinal(freqs, damping_times):
"""Return the maximum f_final of the modes given, with f_final the
frequency at which the amplitude falls to 1/1000 of the peak amplitude
"""
if isinstance(freqs, dict) and isinstance(damping_times, dict):
f_max = {}
for lmn in freqs.keys():
f_max[lmn] = qnm_freq_decay(freqs[lmn],
damping_times[lmn], 1./1000)
f_final = max(f_max.values())
elif isinstance(freqs, dict) and not isinstance(damping_times, dict):
raise ValueError('Missing damping times.')
elif isinstance(damping_times, dict) and not isinstance(freqs, dict):
raise ValueError('Missing frequencies.')
else:
f_final = qnm_freq_decay(freqs, damping_times, 1./1000)
if f_final > max_freq:
f_final = max_freq
return f_final
def lm_deltaf(damping_times):
"""Return the minimum delta_f of all the modes given, with delta_f given by
the inverse of the time at which the amplitude of the ringdown falls to
1/1000 of the peak amplitude.
"""
if isinstance(damping_times, dict):
df = {}
for lmn in damping_times.keys():
df[lmn] = 1. / qnm_time_decay(damping_times[lmn], 1./1000)
delta_f = min(df.values())
else:
delta_f = 1. / qnm_time_decay(damping_times, 1./1000)
return delta_f
def td_output_vector(freqs, damping_times, taper=False,
delta_t=None, t_final=None):
"""Return an empty TimeSeries with the appropriate size to fit all
the quasi-normal modes present in freqs, damping_times
"""
if not delta_t:
delta_t = lm_deltat(freqs, damping_times)
if not t_final:
t_final = lm_tfinal(damping_times)
kmax = int(t_final / delta_t) + 1
# Different modes will have different tapering window-size
# Find maximum window size to create long enough output vector
if taper:
max_tau = max(damping_times.values()) if \
isinstance(damping_times, dict) else damping_times
kmax += int(max_tau/delta_t)
outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
if taper:
# Change epoch of output vector if tapering will be applied
start = - max_tau
# To ensure that t=0 is still in output vector
start -= start % delta_t
outplus._epoch, outcross._epoch = start, start
return outplus, outcross
def fd_output_vector(freqs, damping_times, delta_f=None, f_final=None):
"""Return an empty FrequencySeries with the appropriate size to fit all
the quasi-normal modes present in freqs, damping_times
"""
if not delta_f:
delta_f = lm_deltaf(damping_times)
if not f_final:
f_final = lm_ffinal(freqs, damping_times)
kmax = int(f_final / delta_f) + 1
outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
return outplus, outcross
# Spherical harmonics and Kerr factor #########################################
def spher_harms(harmonics='spherical', l=None, m=None, n=0,
inclination=0., azimuthal=0.,
spin=None, pol=None, polnm=None):
r"""Return the +/-m harmonic polarizations.
This will return either spherical, spheroidal, or an arbitrary complex
number depending on what ``harmonics`` is set to. If harmonics is set to
arbitrary, then the "(+/-)m" harmonic will be :math:`e^{i \psi_(+/-)}`,
where :math:`\psi_{+/-}` are arbitrary angles provided by the user (using
the ``pol(nm)`` arguments).
Parameters
----------
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
The type of harmonic to generate. Default is spherical.
l : int, optional
The l index. Must be provided if harmonics is 'spherical' or
'spheroidal'.
m : int, optional
The m index. Must be provided if harmonics is 'spherical' or
'spheroidal'.
n : int, optional
The overtone number. Only used if harmonics is 'spheroidal'. Default
is 0.
inclination : float, optional
The inclination angle. Only used if harmonics is 'spherical' or
'spheroidal'. Default is 0.
azimuthal : float, optional
The azimuthal angle. Only used if harmonics is 'spherical' or
'spheroidal'. Default is 0.
spin : float, optional
The dimensionless spin of the black hole. Must be provided if
harmonics is 'spheroidal'. Ignored otherwise.
pol : float, optional
Angle (in radians) to use for arbitrary "+m" harmonic. Must be provided
if harmonics is 'arbitrary'. Ignored otherwise.
polnm : float, optional
Angle (in radians) to use for arbitrary "-m" harmonic. Must be provided
if harmonics is 'arbitrary'. Ignored otherwise.
Returns
-------
xlm : complex
The harmonic of the +m mode.
xlnm : complex
The harmonic of the -m mode.
"""
if harmonics == 'spherical':
xlm = lal.SpinWeightedSphericalHarmonic(inclination, azimuthal, -2,
l, m)
xlnm = lal.SpinWeightedSphericalHarmonic(inclination, azimuthal, -2,
l, -m)
elif harmonics == 'spheroidal':
if spin is None:
raise ValueError("must provide a spin for spheroidal harmonics")
if pykerr is None:
raise ImportError("pykerr must be installed for spheroidal "
"harmonics")
xlm = pykerr.spheroidal(inclination, spin, l, m, n, phi=azimuthal)
xlnm = pykerr.spheroidal(inclination, spin, l, -m, n, phi=azimuthal)
elif harmonics == 'arbitrary':
if pol is None or polnm is None:
raise ValueError('must provide a pol and a polnm for arbitrary '
'harmonics')
xlm = numpy.exp(1j*pol)
xlnm = numpy.exp(1j*polnm)
else:
raise ValueError("harmonics must be either spherical, spheroidal, "
"or arbitrary")
return xlm, xlnm
def Kerr_factor(final_mass, distance):
"""Return the factor final_mass/distance (in dimensionless units) for Kerr
ringdowns
"""
# Convert solar masses to meters
mass = final_mass * lal.MSUN_SI * lal.G_SI / lal.C_SI**2
# Convert Mpc to meters
dist = distance * 1e6 * lal.PC_SI
return mass / dist
######################################################
#### Basic functions to generate damped sinusoid
######################################################
def td_damped_sinusoid(f_0, tau, amp, phi, times,
l=2, m=2, n=0, inclination=0., azimuthal=0.,
dphi=0., dbeta=0.,
harmonics='spherical', final_spin=None,
pol=None, polnm=None):
r"""Return a time domain damped sinusoid (plus and cross polarizations)
with central frequency f_0, damping time tau, amplitude amp and phase phi.
This returns the plus and cross polarization of the QNM, defined as
:math:`h^{+,\times}_{l|m|n} = (\Re, \Im) \{ h_{l|m|n}(t)\right}`, where
.. math::
h_{l|m|n}(t) &:= A_{lmn} X_{lmn}(\theta, \varphi)
e^{-t/\tau_{lmn} + i(2\pi f_{lmn}t + \phi_{lmn})} \\
& + A_{l-mn} X_{l-mn}(\theta, \varphi)
e^{-t/\tau_{lmn} - i(2\pi f_{lmn}t + \phi_{l-mn})}
Here, the :math:`X_{lmn}(\theta, \varphi)` are either the spherical or
spheroidal harmonics, or an arbitrary complex number, depending on the
input arguments. This uses the convention that the +/-m modes are
related to each other via :math:`f_{l-mn} = -f_{lmn}` and
:math:`\tau_{l-mn} = \tau_{lmn}`. The amplitudes :math:`A_{l(-)mn}` and
phases :math:`\phi_{l(-)mn}` of the +/-m modes are related to each other
by:
.. math::
\phi_{l-mn} = l\pi + \Delta \phi_{lmn} - \phi_{lmn}
and
.. math::
A_{lmn} &= A^0_{lmn} \sqrt{2} \cos(\pi/4 + \Delta \beta_{lmn})\\
A_{lmn} &= A^0_{lmn} \sqrt{2} \sin(\pi/4 + \Delta \beta_{lmn}).
Here, :math:`A^0_{lmn}` is an overall fiducial amplitude (set by the
``amp``) parameter, and
.. math::
\Delta \beta_{lmn} &\in [-pi/4, pi/4], \\
\Delta \phi_{lmn} &\in (-pi, pi)
are parameters that define the deviation from circular polarization.
Circular polarization occurs when both :math:`\Delta \beta_{lmn}` and
:math:`\Delta \phi_{lmn}` are zero (this is equivalent to assuming that
:math:`h_{l-mn} = (-1)^l h_{lmn}^*`).
Parameters
----------
f_0 : float
The central frequency of the damped sinusoid, in Hz.
tau : float
The damping time, in seconds.
amp : float
The intrinsic amplitude of the QNM (:math:`A^0_{lmn}` in the above).
phi : float
The reference phase of the QNM (:math:`\phi_{lmn}`) in the above.
times : array
Array of times to use, where t=0 is considered the start of the
ringdown. Times are assumed to be monotonically increasing. A taper of
10x the damping time will be used for any negative times.
l : int, optional
The l index; default is 2.
m : int, optional
The m index; default is 2.
n : int, optional
The overtone index; default is 0.
inclination : float, optional
The inclination angle (:math:`\theta` in the above). Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle (:math:`\varphi` in the above). Ignored if
``harmonics='arbitrary'``. Default is 0.
dphi : float, optional
The difference in phase between the +m and -m mode
(:math:`\Delta \phi_{lmn}` in the above). Default is 0.
dbeta : float, optional
The angular difference in the amplitudes of the +m and -m mode
(:math:`\Delta \beta_{lmn}` in the above). Default is 0. If this and
dphi are both 0, will have circularly polarized waves.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
final_spin : float, optional
The dimensionless spin of the black hole. Only needed if
``harmonics='spheroidal'``.
pol : float, optional
Angle to use for +m arbitrary harmonics. Only needed if
``harmonics='arbitrary'``. See :py:func:`spher_harms` for details.
polnm : float, optional
Angle to use for -m arbitrary harmonics. Only needed if
``harmonics='arbitrary'``. See :py:func:`spher_harms` for details.
Returns
-------
hplus : numpy.ndarray
The plus polarization.
hcross : numpy.ndarray
The cross polarization.
"""
# evaluate the harmonics
xlm, xlnm = spher_harms(harmonics=harmonics, l=l, m=m, n=n,
inclination=inclination, azimuthal=azimuthal,
spin=final_spin, pol=pol, polnm=polnm)
# generate the +/-m modes
# we measure things as deviations from circular polarization, which occurs
# when h_{l-m} = (-1)^l h_{lm}^*; that implies that
# phi_{l-m} = - phi_{lm} and A_{l-m} = (-1)^l A_{lm}
omegalm = two_pi * f_0 * times
damping = -times/tau
# check for negative times
mask = times < 0
if mask.any():
damping[mask] = 10*times[mask]/tau
if m == 0:
# no -m, just calculate
hlm = xlm * amp * numpy.exp(damping + 1j*(omegalm + phi))
else:
# amplitude
if dbeta == 0:
alm = alnm = amp
else:
beta = pi/4 + dbeta
alm = 2**0.5 * amp * numpy.cos(beta)
alnm = 2**0.5 * amp * numpy.sin(beta)
# phase
phinm = l*pi + dphi - phi
hlm = xlm * alm * numpy.exp(damping + 1j*(omegalm + phi)) \
+ xlnm * alnm * numpy.exp(damping - 1j*(omegalm - phinm))
return hlm.real, hlm.imag
def fd_damped_sinusoid(f_0, tau, amp, phi, freqs, t_0=0.,
l=2, m=2, n=0, inclination=0., azimuthal=0.,
harmonics='spherical', final_spin=None,
pol=None, polnm=None):
r"""Return the frequency domain version of a damped sinusoid.
This is the frequency domain version :py:func:`td_damped_sinusoid` without
a taper if an infinite sample rate were used to resolve the step function
that turns on the damped sinusoid. See :py:func:`td_damped_sinusoid` for
details.
.. note::
This function currently does not support using a different amplitude
and phase for the -m modes (equivalent to setting ``dphi = dbeta = 0``
in :py:func:`td_damped_sinusoid`.
Parameters
----------
f_0 : float
The central frequency of the damped sinusoid, in Hz.
tau : float
The damping time, in seconds.
amp : float
The intrinsic amplitude of the QNM (:math:`A^0_{lmn}` in the above).
phi : float
The reference phase of the QNM (:math:`\phi_{lmn}`) in the above.
freqs : array
Array of frequencies to evaluate the damped sinusoid over.
t_0 : float, optional
The start time of ringdown. Default (0.) corresponds to the ringdown
starting at the beginning of the equivalent segment in the time
domain. A non-zero value for ``t_0`` will shift the ringdown in time
to the corresponding number of seconds from the start of the segment.
l : int, optional
The l index; default is 2.
m : int, optional
The m index; default is 2.
n : int, optional
The overtone index; default is 0.
inclination : float, optional
The inclination angle (:math:`\theta` in the above). Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle (:math:`\varphi` in the above). Ignored if
``harmonics='arbitrary'``. Default is 0.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
final_spin : float, optional
The dimensionless spin of the black hole. Only needed if
``harmonics='spheroidal'``.
pol : float, optional
Angle to use for +m arbitrary harmonics. Only needed if
``harmonics='arbitrary'``. See :py:func:`spher_harms` for details.
polnm : float, optional
Angle to use for -m arbitrary harmonics. Only needed if
``harmonics='arbitrary'``. See :py:func:`spher_harms` for details.
Returns
-------
hptilde : numpy.ndarray
The plus polarization.
hctilde : numpy.ndarray
The cross polarization.
"""
# evaluate the harmonics
if inclination is None:
inclination = 0.
if azimuthal is None:
azimuthal = 0.
xlm, xlnm = spher_harms(harmonics=harmonics, l=l, m=m, n=n,
inclination=inclination, azimuthal=azimuthal,
spin=final_spin, pol=pol, polnm=polnm)
# we'll assume circular polarization
xp = xlm + (-1)**l * xlnm
xc = xlm - (-1)**l * xlnm
denominator = 1 + (4j * pi * freqs * tau) - \
(4 * pi_sq * (freqs*freqs - f_0*f_0) * tau*tau)
norm = amp * tau / denominator
if t_0 != 0:
time_shift = numpy.exp(-1j * two_pi * freqs * t_0)
norm *= time_shift
A1 = (1 + 2j * pi * freqs * tau)
A2 = two_pi * f_0 * tau
# Analytical expression for the Fourier transform of the ringdown
hptilde = norm * xp * (A1 * numpy.cos(phi) - A2 * numpy.sin(phi))
hctilde = norm * xc * (A1 * numpy.sin(phi) + A2 * numpy.cos(phi))
return hptilde, hctilde
######################################################
#### Base multi-mode for all approximants
######################################################
def multimode_base(input_params, domain, freq_tau_approximant=False):
"""Return a superposition of damped sinusoids in either time or frequency
domains with parameters set by input_params.
Parameters
----------
input_params : dict
Dictionary of parameters to generate the ringdowns with. See
:py:func:`td_damped_sinusoid` and :py:func:`fd_damped_sinusoid` for
supported parameters.
domain : string
Choose domain of the waveform, either 'td' for time domain
or 'fd' for frequency domain. If 'td' ('fd'), the damped sinusoids will
be generated with :py:func:`td_damped_sinusoid`
(:py:func:`fd_damped_sinusoid`).
freq_tau_approximant : {False, bool}, optional
Choose choose the waveform approximant to use. Either based on
mass/spin (set to False, default), or on frequencies/damping times
of the modes (set to True).
Returns
-------
hplus : TimeSeries
The plus phase of a ringdown with the lm modes specified and
n overtones in the chosen domain (time or frequency).
hcross : TimeSeries
The cross phase of a ringdown with the lm modes specified and
n overtones in the chosen domain (time or frequency).
"""
input_params['lmns'] = format_lmns(input_params['lmns'])
amps, phis, dbetas, dphis = lm_amps_phases(**input_params)
pols, polnms = lm_arbitrary_harmonics(**input_params)
# get harmonics argument
try:
harmonics = input_params['harmonics']
except KeyError:
harmonics = 'spherical'
# we'll need the final spin for spheroidal harmonics
if harmonics == 'spheroidal':
final_spin = input_params['final_spin']
else:
final_spin = None
# add inclination and azimuthal if they aren't provided
if 'inclination' not in input_params:
input_params['inclination'] = 0.
if 'azimuthal' not in input_params:
input_params['azimuthal'] = 0.
# figure out the frequencies and damping times
if freq_tau_approximant:
freqs, taus = lm_freqs_taus(**input_params)
norm = 1.
else:
freqs, taus = get_lm_f0tau_allmodes(input_params['final_mass'],
input_params['final_spin'], input_params['lmns'])
norm = Kerr_factor(input_params['final_mass'],
input_params['distance']) if 'distance' in input_params.keys() \
else 1.
for mode, freq in freqs.items():
if 'delta_f{}'.format(mode) in input_params:
freqs[mode] += input_params['delta_f{}'.format(mode)]*freq
for mode, tau in taus.items():
if 'delta_tau{}'.format(mode) in input_params:
taus[mode] += input_params['delta_tau{}'.format(mode)]*tau
# setup the output
if domain == 'td':
outplus, outcross = td_output_vector(freqs, taus,
input_params['taper'], input_params['delta_t'],
input_params['t_final'])
sample_times = outplus.sample_times.numpy()
elif domain == 'fd':
outplus, outcross = fd_output_vector(freqs, taus,
input_params['delta_f'], input_params['f_final'])
kmin = int(input_params['f_lower'] / input_params['delta_f'])
sample_freqs = outplus.sample_frequencies.numpy()[kmin:]
else:
raise ValueError('unrecognised domain argument {}; '
'must be either fd or td'.format(domain))
# cyclce over the modes, generating the waveforms
for lmn in freqs:
if amps[lmn] == 0.:
# skip
continue
if domain == 'td':
hplus, hcross = td_damped_sinusoid(
freqs[lmn], taus[lmn], amps[lmn], phis[lmn], sample_times,
l=int(lmn[0]), m=int(lmn[1]), n=int(lmn[2]),
inclination=input_params['inclination'],
azimuthal=input_params['azimuthal'],
dphi=dphis[lmn], dbeta=dbetas[lmn],
harmonics=harmonics, final_spin=final_spin,
pol=pols[lmn], polnm=polnms[lmn])
outplus += hplus
outcross += hcross
elif domain == 'fd':
hplus, hcross = fd_damped_sinusoid(
freqs[lmn], taus[lmn], amps[lmn], phis[lmn], sample_freqs,
l=int(lmn[0]), m=int(lmn[1]), n=int(lmn[2]),
inclination=input_params['inclination'],
azimuthal=input_params['azimuthal'],
harmonics=harmonics, final_spin=final_spin,
pol=pols[lmn], polnm=polnms[lmn])
outplus[kmin:] += hplus
outcross[kmin:] += hcross
return norm * outplus, norm * outcross
######################################################
#### Approximants
######################################################
def get_td_from_final_mass_spin(template=None, **kwargs):
"""Return time domain ringdown with all the modes specified.
Parameters
----------
template : object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
final_mass : float
Mass of the final black hole in solar masses.
final_spin : float
Dimensionless spin of the final black hole.
distance : {None, float}, optional
Luminosity distance of the system. If specified, the returned ringdown
will include the Kerr factor (final_mass/distance).
lmns : list
Desired lmn modes as strings. All modes up to l = m = 7 are available.
The n specifies the number of overtones desired for the corresponding
lm pair, not the overtone number; maximum n=8. Example:
lmns = ['223','331'] are the modes 220, 221, 222, and 330
ref_amp : str, optional
Which mode to use as the reference for computing amplitudes. Must be
'amp220' if distance is given. Default is 'amp220'. The amplitude of
the reference mode should be specified directly, while all other
amplitudes are specified as ratios with respect to that mode. For
example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no
distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would
result in the 220 mode having a strain amplitude of 1e-22 and the 330
mode having a strain amplitude of 1e-23. If distance is given, the
amplitude of the reference mode will have a completely different order
of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an
estimate. An amplitude for the reference mode must always be provided,
even if that mode is not being generated. For example, if
``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220``
and ``amp330`` must be provided even though only the 330 mode will
be created.
amplmn : float
The amplitude of each mode, required for all modes specifed plus the
reference mode. As described above, amplitudes should be specified
relative to the reference mode.
philmn : float
Phase of the lmn overtone, as many as the number of modes.
inclination : float
Inclination of the system in radians. Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``.
Usually this is not necessary to specify since it is degenerate with
initial phase ``philmn``; i.e., this is only useful if you have an
expectation for what the phase of each mode is. Default is 0.
dphi[lmn] : float, optional
The difference in phase between the +m and -m mode. See the
documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for
details. You may specify a
``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a
single ``dphi`` (without any lmn) for all modes that do not have
``dphi`` specified. Default is to use 0 for all modes.
dbeta[lmn] : float, optional
The angular difference in the amplitudes of the +m and -m mode. See the
documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for
details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``)
separately for each mode, and/or a
single ``dbeta`` (without any lmn) for all modes that do not have
``dbeta`` specified. Default is to use 0 for all modes.
pollmn : float, optional
Angle to use for +m arbitrary harmonics of the lmn mode in radians
(example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``,
ignored otherwise. See :py:func:`spher_harms` for details.
polnmlmn : float, optional
Angle to use for -m arbitrary harmonics of the lmn mode in radians
(example: ``polnm220 = 0.1``). Only needed if
``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms`
for details.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
delta_flmn: {None, float}, optional
GR deviation for the frequency of the lmn mode. If given, the lmn
frequency will be converted to new_flmn = flmn + delta_flmn * flmn,
with flmn the GR predicted value for the corresponding mass and spin.
delta_taulmn: {None, float}, optional
GR deviation for the damping time of the lmn mode. If given, the lmn
tau will be converted to new_taulmn = taulmn + delta_taulmn * taulmn,
with taulmn the GR predicted value for the corresponding mass and spin.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
taper : bool, optional
Add a rapid ringup with timescale tau/10 at the beginning of the
waveform to avoid the abrupt turn on of the ringdown. Each mode and
overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers. Default is
False.
Returns
-------
hplus : TimeSeries
The plus phase of a ringdown with the lm modes specified and
n overtones in time domain.
hcross : TimeSeries
The cross phase of a ringdown with the lm modes specified and
n overtones in time domain.
"""
input_params = props(template, mass_spin_required_args, td_args, **kwargs)
return multimode_base(input_params, domain='td')
def get_fd_from_final_mass_spin(template=None, **kwargs):
"""Return frequency domain ringdown with all the modes specified.
Parameters
----------
template : object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
final_mass : float
Mass of the final black hole in solar masses.
final_spin : float
Dimensionless spin of the final black hole.
distance : {None, float}, optional
Luminosity distance of the system. If specified, the returned ringdown
will include the Kerr factor (final_mass/distance).
lmns : list
Desired lmn modes as strings. All modes up to l = m = 7 are available.
The n specifies the number of overtones desired for the corresponding
lm pair, not the overtone number; maximum n=8. Example:
lmns = ['223','331'] are the modes 220, 221, 222, and 330
ref_amp : str, optional
Which mode to use as the reference for computing amplitudes. Must be
'amp220' if distance is given. Default is 'amp220'. The amplitude of
the reference mode should be specified directly, while all other
amplitudes are specified as ratios with respect to that mode. For
example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no
distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would
result in the 220 mode having a strain amplitude of 1e-22 and the 330
mode having a strain amplitude of 1e-23. If distance is given, the
amplitude of the reference mode will have a completely different order
of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an
estimate. An amplitude for the reference mode must always be provided,
even if that mode is not being generated. For example, if
``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220``
and ``amp330`` must be provided even though only the 330 mode will
be created.
amplmn : float
The amplitude of each mode, required for all modes specifed plus the
reference mode. As described above, amplitudes should be specified
relative to the reference mode.
philmn : float
Phase of the lmn overtone, as many as the number of modes.
inclination : float
Inclination of the system in radians. Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``.
Usually this is not necessary to specify since it is degenerate with
initial phase ``philmn``; i.e., this is only useful if you have an
expectation for what the phase of each mode is. Default is 0.
pollmn : float, optional
Angle to use for +m arbitrary harmonics of the lmn mode in radians
(example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``,
ignored otherwise. See :py:func:`spher_harms` for details.
polnmlmn : float, optional
Angle to use for -m arbitrary harmonics of the lmn mode in radians
(example: ``polnm220 = 0.1``). Only needed if
``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms`
for details.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
delta_flmn: {None, float}, optional
GR deviation for the frequency of the lmn mode. If given, the lmn
frequency will be converted to new_flmn = flmn + delta_flmn * flmn,
with flmn the GR predicted value for the corresponding mass and spin.
delta_taulmn: {None, float}, optional
GR deviation for the damping time of the lmn mode. If given, the lmn
tau will be converted to new_taulmn = taulmn + delta_taulmn * taulmn,
with taulmn the GR predicted value for the corresponding mass and spin.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown (not to be confused
with the delta_flmn parameter that simulates GR violations).
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower : {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde : FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde : FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, mass_spin_required_args, fd_args, **kwargs)
return multimode_base(input_params, domain='fd')
def get_td_from_freqtau(template=None, **kwargs):
"""Return time domain ringdown with all the modes specified.
Parameters
----------
template : object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings. All modes up to l = m = 7 are available.
The n specifies the number of overtones desired for the corresponding
lm pair, not the overtone number; maximum n=8. Example:
lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn : float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn : float
Damping time of the lmn overtone, as many as number of modes.
ref_amp : str, optional
Which mode to use as the reference for computing amplitudes. Must be
'amp220' if distance is given. Default is 'amp220'. The amplitude of
the reference mode should be specified directly, while all other
amplitudes are specified as ratios with respect to that mode. For
example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no
distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would
result in the 220 mode having a strain amplitude of 1e-22 and the 330
mode having a strain amplitude of 1e-23. If distance is given, the
amplitude of the reference mode will have a completely different order
of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an
estimate. An amplitude for the reference mode must always be provided,
even if that mode is not being generated. For example, if
``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220``
and ``amp330`` must be provided even though only the 330 mode will
be created.
amplmn : float
The amplitude of each mode, required for all modes specifed plus the
reference mode. As described above, amplitudes should be specified
relative to the reference mode.
philmn : float
Phase of the lmn overtone, as many as the number of modes.
inclination : float
Inclination of the system in radians. Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``.
Usually this is not necessary to specify since it is degenerate with
initial phase ``philmn``; i.e., this is only useful if you have an
expectation for what the phase of each mode is. Default is 0.
dphi[lmn] : float, optional
The difference in phase between the +m and -m mode. See the
documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for
details. You may specify a
``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a
single ``dphi`` (without any lmn) for all modes that do not have
``dphi`` specified. Default is to use 0 for all modes.
dbeta[lmn] : float, optional
The angular difference in the amplitudes of the +m and -m mode. See the
documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for
details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``)
separately for each mode, and/or a
single ``dbeta`` (without any lmn) for all modes that do not have
``dbeta`` specified. Default is to use 0 for all modes.
pollmn : float, optional
Angle to use for +m arbitrary harmonics of the lmn mode in radians
(example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``,
ignored otherwise. See :py:func:`spher_harms` for details.
polnmlmn : float, optional
Angle to use for -m arbitrary harmonics of the lmn mode in radians
(example: ``polnm220 = 0.1``). Only needed if
``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms`
for details.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
final_spin : float, optional
Dimensionless spin of the final black hole. This is required if
``harmonics='spheroidal'``. Ignored otherwise.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
taper : bool, optional
Add a rapid ringup with timescale tau/10 at the beginning of the
waveform to avoid the abrupt turn on of the ringdown. Each mode and
overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers. Default is
False.
Returns
-------
hplus : TimeSeries
The plus phase of a ringdown with the lm modes specified and
n overtones in time domain.
hcross : TimeSeries
The cross phase of a ringdown with the lm modes specified and
n overtones in time domain.
"""
input_params = props(template, freqtau_required_args, td_args, **kwargs)
return multimode_base(input_params, domain='td', freq_tau_approximant=True)
def get_fd_from_freqtau(template=None, **kwargs):
"""Return frequency domain ringdown with all the modes specified.
Parameters
----------
template : object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings. All modes up to l = m = 7 are available.
The n specifies the number of overtones desired for the corresponding
lm pair, not the overtone number; maximum n=8. Example:
lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn : float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn : float
Damping time of the lmn overtone, as many as number of modes.
ref_amp : str, optional
Which mode to use as the reference for computing amplitudes. Must be
'amp220' if distance is given. Default is 'amp220'. The amplitude of
the reference mode should be specified directly, while all other
amplitudes are specified as ratios with respect to that mode. For
example, if ``ref_amp = 'amp220'``, ``lmns = ['221', '331']``, and no
distance is provided, then ``amp220 = 1e-22, amp330 = 0.1`` would
result in the 220 mode having a strain amplitude of 1e-22 and the 330
mode having a strain amplitude of 1e-23. If distance is given, the
amplitude of the reference mode will have a completely different order
of magnitude. See table II in https://arxiv.org/abs/1107.0854 for an
estimate. An amplitude for the reference mode must always be provided,
even if that mode is not being generated. For example, if
``ref_amp = 'amp220'`` and ``lmns = ['331']`` then both a ``amp220``
and ``amp330`` must be provided even though only the 330 mode will
be created.
amplmn : float
The amplitude of each mode, required for all modes specifed plus the
reference mode. As described above, amplitudes should be specified
relative to the reference mode.
philmn : float
Phase of the lmn overtone, as many as the number of modes.
inclination : float
Inclination of the system in radians. Ignored if
``harmonics='arbitrary'``. Default is 0.
azimuthal : float, optional
The azimuthal angle in radians. Ignored if ``harmonics='arbitrary'``.
Usually this is not necessary to specify since it is degenerate with
initial phase ``philmn``; i.e., this is only useful if you have an
expectation for what the phase of each mode is. Default is 0.
dphi[lmn] : float, optional
The difference in phase between the +m and -m mode. See the
documentation for ``dphi`` in :py:func:`td_damped_sinusoid` for
details. You may specify a
``dphi{lmn}`` (ex. ``dphi220``) separately for each mode, and/or a
single ``dphi`` (without any lmn) for all modes that do not have
``dphi`` specified. Default is to use 0 for all modes.
dbeta[lmn] : float, optional
The angular difference in the amplitudes of the +m and -m mode. See the
documentation for ``dbeta`` in :py:func:`td_damped_sinusoid` for
details. You may specify a ``dbeta{lmn}`` (ex. ``dbeta220``)
separately for each mode, and/or a
single ``dbeta`` (without any lmn) for all modes that do not have
``dbeta`` specified. Default is to use 0 for all modes.
pollmn : float, optional
Angle to use for +m arbitrary harmonics of the lmn mode in radians
(example: ``pol220 = 0.1``. Only needed if ``harmonics='arbitrary'``,
ignored otherwise. See :py:func:`spher_harms` for details.
polnmlmn : float, optional
Angle to use for -m arbitrary harmonics of the lmn mode in radians
(example: ``polnm220 = 0.1``). Only needed if
``harmonics='arbitrary'``, ignored otherwise. See :py:func:`spher_harms`
for details.
harmonics : {'spherical', 'spheroidal', 'arbitrary'}, optional
Which harmonics to use. See :py:func:`spher_harms` for details.
Default is spherical.
final_spin : float, optional
Dimensionless spin of the final black hole. This is required if
``harmonics='spheroidal'``. Ignored otherwise.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown.
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower : {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde : FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde : FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, freqtau_required_args, fd_args, **kwargs)
return multimode_base(input_params, domain='fd', freq_tau_approximant=True)
# Approximant names ###########################################################
ringdown_fd_approximants = {
'FdQNMfromFinalMassSpin': get_fd_from_final_mass_spin,
'FdQNMfromFreqTau': get_fd_from_freqtau}
ringdown_td_approximants = {
'TdQNMfromFinalMassSpin': get_td_from_final_mass_spin,
'TdQNMfromFreqTau': get_td_from_freqtau}
| 54,746
| 43.329555
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/plugin.py
|
""" Utilities for handling waveform plugins
"""
def add_custom_waveform(approximant, function, domain,
sequence=False, has_det_response=False,
force=False,):
""" Make custom waveform available to pycbc
Parameters
----------
approximant : str
The name of the waveform
function : function
The function to generate the waveform
domain : str
Either 'frequency' or 'time' to indicate the domain of the waveform.
sequence : bool, False
Function evaluates waveform at only chosen points (instead of a
equal-spaced grid).
has_det_response : bool, False
Check if waveform generator has built-in detector response.
"""
from pycbc.waveform.waveform import (cpu_fd, cpu_td, fd_sequence,
fd_det, fd_det_sequence)
used = RuntimeError("Can't load plugin waveform {}, the name is"
" already in use.".format(approximant))
if domain == 'time':
if not force and (approximant in cpu_td):
raise used
cpu_td[approximant] = function
elif domain == 'frequency':
if sequence:
if not has_det_response:
if not force and (approximant in fd_sequence):
raise used
fd_sequence[approximant] = function
else:
if not force and (approximant in fd_det_sequence):
raise used
fd_det_sequence[approximant] = function
else:
if not has_det_response:
if not force and (approximant in cpu_fd):
raise used
cpu_fd[approximant] = function
else:
if not force and (approximant in fd_det):
raise used
fd_det[approximant] = function
else:
raise ValueError("Invalid domain ({}), should be "
"'time' or 'frequency'".format(domain))
def add_length_estimator(approximant, function):
""" Add length estimator for an approximant
Parameters
----------
approximant : str
Name of approximant
function : function
A function which takes kwargs and returns the waveform length
"""
from pycbc.waveform.waveform import _filter_time_lengths
if approximant in _filter_time_lengths:
raise RuntimeError("Can't load length estimator {}, the name is"
" already in use.".format(approximant))
_filter_time_lengths[approximant] = function
from pycbc.waveform.waveform import td_fd_waveform_transform
td_fd_waveform_transform(approximant)
def retrieve_waveform_plugins():
""" Process external waveform plugins
"""
import pkg_resources
# Check for fd waveforms (no detector response)
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd'):
add_custom_waveform(plugin.name, plugin.resolve(), 'frequency')
# Check for fd waveforms (has detector response)
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_det'):
add_custom_waveform(plugin.name, plugin.resolve(), 'frequency',
has_det_response=True)
# Check for fd sequence waveforms (no detector response)
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_sequence'):
add_custom_waveform(plugin.name, plugin.resolve(), 'frequency',
sequence=True)
# Check for fd sequence waveforms (has detector response)
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.fd_det_sequence'):
add_custom_waveform(plugin.name, plugin.resolve(), 'frequency',
sequence=True, has_det_response=True)
# Check for td waveforms
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.td'):
add_custom_waveform(plugin.name, plugin.resolve(), 'time')
# Check for waveform length estimates
for plugin in pkg_resources.iter_entry_points('pycbc.waveform.length'):
add_length_estimator(plugin.name, plugin.resolve())
| 4,171
| 37.275229
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/compress.py
|
# Copyright (C) 2016 Alex Nitz, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" Utilities for handling frequency compressed an unequally spaced frequency
domain waveforms.
"""
import lal, numpy, logging, h5py
from pycbc import filter
from scipy import interpolate
from pycbc.types import FrequencySeries, real_same_precision_as
from pycbc.waveform import utils
from pycbc.scheme import schemed
def rough_time_estimate(m1, m2, flow, fudge_length=1.1, fudge_min=0.02):
""" A very rough estimate of the duration of the waveform.
An estimate of the waveform duration starting from flow. This is intended
to be fast but not necessarily accurate. It should be an overestimate of
the length. It is derived from a simplification of the 0PN post-newtonian
terms and includes a fudge factor for possible ringdown, etc.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
flow: float
starting frequency of the waveform
fudge_length: optional, {1.1, float}
Factor to multiply length estimate by to ensure it is a convservative
value
fudge_min: optional, {0.2, float}
Minimum signal duration that can be returned. This should be long
enough to encompass the ringdown and errors in the precise end time.
Returns
-------
time: float
Time from flow untill the end of the waveform
"""
m = m1 + m2
msun = m * lal.MTSUN_SI
t = 5.0 / 256.0 * m * m * msun / (m1 * m2) / \
(numpy.pi * msun * flow) ** (8.0 / 3.0)
# fudge factoriness
return .022 if t < 0 else (t + fudge_min) * fudge_length
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None):
"""Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
"""
sample_points = []
f = fmin
while f < fmax:
if df_multiple is not None:
f = int(f/df_multiple)*df_multiple
sample_points.append(f)
f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen)
# add the last point
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points)
def spa_compression(htilde, fmin, fmax, min_seglen=0.02,
sample_frequencies=None):
"""Returns the frequencies needed to compress the given frequency domain
waveform. This is done by estimating t(f) of the waveform using the
stationary phase approximation.
Parameters
----------
htilde : FrequencySeries
The waveform to compress.
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
sample_frequencies : {None, array}
The frequencies that the waveform is evaluated at. If None, will
retrieve the frequencies from the waveform's sample_frequencies
attribute.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
"""
if sample_frequencies is None:
sample_frequencies = htilde.sample_frequencies.numpy()
kmin = int(fmin/htilde.delta_f)
kmax = int(fmax/htilde.delta_f)
tf = abs(utils.time_from_frequencyseries(htilde,
sample_frequencies=sample_frequencies).data[kmin:kmax])
sample_frequencies = sample_frequencies[kmin:kmax]
sample_points = []
f = fmin
while f < fmax:
f = int(f/htilde.delta_f)*htilde.delta_f
sample_points.append(f)
jj = numpy.searchsorted(sample_frequencies, f)
f += 1./(tf[jj:].max()+min_seglen)
# add the last point
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points)
compression_algorithms = {
'mchirp': mchirp_compression,
'spa': spa_compression
}
def _vecdiff(htilde, hinterp, fmin, fmax, psd=None):
return abs(filter.overlap_cplx(htilde, htilde,
low_frequency_cutoff=fmin,
high_frequency_cutoff=fmax,
normalized=False, psd=psd)
- filter.overlap_cplx(htilde, hinterp,
low_frequency_cutoff=fmin,
high_frequency_cutoff=fmax,
normalized=False, psd=psd))
def vecdiff(htilde, hinterp, sample_points, psd=None):
"""Computes a statistic indicating between which sample points a waveform
and the interpolated waveform differ the most.
"""
vecdiffs = numpy.zeros(sample_points.size-1, dtype=float)
for kk,thisf in enumerate(sample_points[:-1]):
nextf = sample_points[kk+1]
vecdiffs[kk] = abs(_vecdiff(htilde, hinterp, thisf, nextf, psd=psd))
return vecdiffs
def compress_waveform(htilde, sample_points, tolerance, interpolation,
precision, decomp_scratch=None, psd=None):
"""Retrieves the amplitude and phase at the desired sample points, and adds
frequency points in order to ensure that the interpolated waveform
has a mismatch with the full waveform that is <= the desired tolerance. The
mismatch is computed by finding 1-overlap between `htilde` and the
decompressed waveform; no maximimization over phase/time is done, a
PSD may be used.
.. note::
The decompressed waveform is only garaunteed to have a true mismatch
<= the tolerance for the given `interpolation` and for no PSD.
However, since no maximization over time/phase is performed when
adding points, the actual mismatch between the decompressed waveform
and `htilde` is better than the tolerance, using no PSD. Using a PSD
does increase the mismatch, and can lead to mismatches > than the
desired tolerance, but typically by only a factor of a few worse.
Parameters
----------
htilde : FrequencySeries
The waveform to compress.
sample_points : array
The frequencies at which to store the amplitude and phase. More points
may be added to this, depending on the desired tolerance.
tolerance : float
The maximum mismatch to allow between a decompressed waveform and
`htilde`.
interpolation : str
The interpolation to use for decompressing the waveform when computing
overlaps.
precision : str
The precision being used to generate and store the compressed waveform
points.
decomp_scratch : {None, FrequencySeries}
Optionally provide scratch space for decompressing the waveform. The
provided frequency series must have the same `delta_f` and length
as `htilde`.
psd : {None, FrequencySeries}
The psd to use for calculating the overlap between the decompressed
waveform and the original full waveform.
Returns
-------
CompressedWaveform
The compressed waveform data; see `CompressedWaveform` for details.
"""
fmin = sample_points.min()
df = htilde.delta_f
sample_index = (sample_points / df).astype(int)
amp = utils.amplitude_from_frequencyseries(htilde)
phase = utils.phase_from_frequencyseries(htilde)
comp_amp = amp.take(sample_index)
comp_phase = phase.take(sample_index)
if decomp_scratch is None:
outdf = df
else:
outdf = None
hdecomp = fd_decompress(comp_amp, comp_phase, sample_points,
out=decomp_scratch, df=outdf, f_lower=fmin,
interpolation=interpolation)
kmax = min(len(htilde), len(hdecomp))
htilde = htilde[:kmax]
hdecomp = hdecomp[:kmax]
mismatch = 1. - filter.overlap(hdecomp, htilde, psd=psd,
low_frequency_cutoff=fmin)
if mismatch > tolerance:
# we'll need the difference in the waveforms as a function of frequency
vecdiffs = vecdiff(htilde, hdecomp, sample_points, psd=psd)
# We will find where in the frequency series the interpolated waveform
# has the smallest overlap with the full waveform, add a sample point
# there, and re-interpolate. We repeat this until the overall mismatch
# is > than the desired tolerance
added_points = []
while mismatch > tolerance:
minpt = vecdiffs.argmax()
# add a point at the frequency halfway between minpt and minpt+1
add_freq = sample_points[[minpt, minpt+1]].mean()
addidx = int(round(add_freq/df))
# ensure that only new points are added
if addidx in sample_index:
diffidx = vecdiffs.argsort()
addpt = -1
while addidx in sample_index:
addpt -= 1
try:
minpt = diffidx[addpt]
except IndexError:
raise ValueError("unable to compress to desired tolerance")
add_freq = sample_points[[minpt, minpt+1]].mean()
addidx = int(round(add_freq/df))
new_index = numpy.zeros(sample_index.size+1, dtype=int)
new_index[:minpt+1] = sample_index[:minpt+1]
new_index[minpt+1] = addidx
new_index[minpt+2:] = sample_index[minpt+1:]
sample_index = new_index
sample_points = (sample_index * df).astype(
real_same_precision_as(htilde))
# get the new compressed points
comp_amp = amp.take(sample_index)
comp_phase = phase.take(sample_index)
# update the vecdiffs and mismatch
hdecomp = fd_decompress(comp_amp, comp_phase, sample_points,
out=decomp_scratch, df=outdf,
f_lower=fmin, interpolation=interpolation)
hdecomp = hdecomp[:kmax]
new_vecdiffs = numpy.zeros(vecdiffs.size+1)
new_vecdiffs[:minpt] = vecdiffs[:minpt]
new_vecdiffs[minpt+2:] = vecdiffs[minpt+1:]
new_vecdiffs[minpt:minpt+2] = vecdiff(htilde, hdecomp,
sample_points[minpt:minpt+2],
psd=psd)
vecdiffs = new_vecdiffs
mismatch = 1. - filter.overlap(hdecomp, htilde, psd=psd,
low_frequency_cutoff=fmin)
added_points.append(addidx)
logging.info("mismatch: %f, N points: %i (%i added)" %(mismatch,
len(comp_amp), len(added_points)))
return CompressedWaveform(sample_points, comp_amp, comp_phase,
interpolation=interpolation,
tolerance=tolerance, mismatch=mismatch,
precision=precision)
_precision_map = {
'float32': 'single',
'float64': 'double',
'complex64': 'single',
'complex128': 'double'
}
_complex_dtypes = {
'single': numpy.complex64,
'double': numpy.complex128
}
_real_dtypes = {
'single': numpy.float32,
'double': numpy.float64
}
@schemed("pycbc.waveform.decompress_")
def inline_linear_interp(amp, phase, sample_frequencies, output,
df, f_lower, imin, start_index):
"""Generate a frequency-domain waveform via linear interpolation
from sampled amplitude and phase. The sample frequency locations
for the amplitude and phase must be the same. This function may
be less accurate than scipy's linear interpolation, but should be
much faster. Additionally, it is 'schemed' and so may run under
either CPU or GPU schemes.
This function is not ordinarily called directly, but rather by
giving the argument 'interpolation' the value 'inline_linear'
when calling the function 'fd_decompress' below.
Parameters
----------
amp : array
The amplitude of the waveform at the sample frequencies.
phase : array
The phase of the waveform at the sample frequencies.
sample_frequencies : array
The frequency (in Hz) of the waveform at the sample frequencies.
output : {None, FrequencySeries}
The output array to save the decompressed waveform to. If this contains
slots for frequencies > the maximum frequency in sample_frequencies,
the rest of the values are zeroed. If not provided, must provide a df.
df : {None, float}
The frequency step to use for the decompressed waveform. Must be
provided if out is None.
f_lower : float
The frequency to start the decompression at. All values at
frequencies less than this will be 0 in the decompressed waveform.
imin : int
The index at which to start in the sampled frequency series. Must
therefore be 0 <= imin < len(sample_frequencies)
start_index : int
The index at which to start in the output frequency;
i.e., ceil(f_lower/df).
Returns
-------
output : FrequencySeries
If out was provided, writes to that array. Otherwise, a new
FrequencySeries with the decompressed waveform.
"""
return
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None,
f_lower=None, interpolation='inline_linear'):
"""Decompresses an FD waveform using the given amplitude, phase, and the
frequencies at which they are sampled at.
Parameters
----------
amp : array
The amplitude of the waveform at the sample frequencies.
phase : array
The phase of the waveform at the sample frequencies.
sample_frequencies : array
The frequency (in Hz) of the waveform at the sample frequencies.
out : {None, FrequencySeries}
The output array to save the decompressed waveform to. If this contains
slots for frequencies > the maximum frequency in sample_frequencies,
the rest of the values are zeroed. If not provided, must provide a df.
df : {None, float}
The frequency step to use for the decompressed waveform. Must be
provided if out is None.
f_lower : {None, float}
The frequency to start the decompression at. If None, will use whatever
the lowest frequency is in sample_frequencies. All values at
frequencies less than this will be 0 in the decompressed waveform.
interpolation : {'inline_linear', str}
The interpolation to use for the amplitude and phase. Default is
'inline_linear'. If 'inline_linear' a custom interpolater is used.
Otherwise, ``scipy.interpolate.interp1d`` is used; for other options,
see possible values for that function's ``kind`` argument.
Returns
-------
out : FrequencySeries
If out was provided, writes to that array. Otherwise, a new
FrequencySeries with the decompressed waveform.
"""
precision = _precision_map[sample_frequencies.dtype.name]
if _precision_map[amp.dtype.name] != precision or \
_precision_map[phase.dtype.name] != precision:
raise ValueError("amp, phase, and sample_points must all have the "
"same precision")
if out is None:
if df is None:
raise ValueError("Either provide output memory or a df")
hlen = int(numpy.ceil(sample_frequencies.max()/df+1))
out = FrequencySeries(numpy.zeros(hlen,
dtype=_complex_dtypes[precision]), copy=False,
delta_f=df)
else:
# check for precision compatibility
if out.precision == 'double' and precision == 'single':
raise ValueError("cannot cast single precision to double")
df = out.delta_f
hlen = len(out)
if f_lower is None:
imin = 0 # pylint:disable=unused-variable
f_lower = sample_frequencies[0]
start_index = 0
else:
if f_lower >= sample_frequencies.max():
raise ValueError("f_lower is > than the maximum sample frequency")
if f_lower < sample_frequencies.min():
raise ValueError("f_lower is < than the minimum sample frequency")
imin = int(numpy.searchsorted(sample_frequencies, f_lower,
side='right')) - 1 # pylint:disable=unused-variable
start_index = int(numpy.ceil(f_lower/df))
if start_index >= hlen:
raise ValueError('requested f_lower >= largest frequency in out')
# interpolate the amplitude and the phase
if interpolation == "inline_linear":
# Call the scheme-dependent function
inline_linear_interp(amp, phase, sample_frequencies, out,
df, f_lower, imin, start_index)
else:
# use scipy for fancier interpolation
sample_frequencies = numpy.array(sample_frequencies)
amp = numpy.array(amp)
phase = numpy.array(phase)
outfreq = out.sample_frequencies.numpy()
amp_interp = interpolate.interp1d(sample_frequencies, amp,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
phase_interp = interpolate.interp1d(sample_frequencies, phase,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
A = amp_interp(outfreq)
phi = phase_interp(outfreq)
out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi)
return out
class CompressedWaveform(object):
"""Class that stores information about a compressed waveform.
Parameters
----------
sample_points : {array, h5py.Dataset}
The frequency points at which the compressed waveform is sampled.
amplitude : {array, h5py.Dataset}
The amplitude of the waveform at the given `sample_points`.
phase : {array, h5py.Dataset}
The phase of the waveform at the given `sample_points`.
interpolation : {None, str}
The interpolation that was used when compressing the waveform for
computing tolerance. This is also the default interpolation used when
decompressing; see `decompress` for details.
tolerance : {None, float}
The tolerance that was used when compressing the waveform.
mismatch : {None, float}
The actual mismatch between the decompressed waveform (using the given
`interpolation`) and the full waveform.
precision : {'double', str}
The precision used to generate the compressed waveform's amplitude and
phase points. Default is 'double'.
load_to_memory : {True, bool}
If `sample_points`, `amplitude`, and/or `phase` is an hdf dataset, they
will be cached in memory the first time they are accessed. Default is
True.
Attributes
----------
load_to_memory : bool
Whether or not to load `sample_points`/`amplitude`/`phase` into memory
the first time they are accessed, if they are hdf datasets. Can be
set directly to toggle this behavior.
interpolation : str
The interpolation that was used when compressing the waveform, for
checking the mismatch. Also the default interpolation used when
decompressing.
tolerance : {None, float}
The tolerance that was used when compressing the waveform.
mismatch : {None, float}
The mismatch between the decompressed waveform and the original
waveform.
precision : {'double', str}
The precision used to generate and store the compressed waveform
points. Options are 'double' or 'single'; default is 'double
"""
def __init__(self, sample_points, amplitude, phase,
interpolation=None, tolerance=None, mismatch=None,
precision='double', load_to_memory=True):
self._sample_points = sample_points
self._amplitude = amplitude
self._phase = phase
self._cache = {}
self.load_to_memory = load_to_memory
# if sample points, amplitude, and/or phase are hdf datasets,
# save their filenames
self._filenames = {}
self._groupnames = {}
for arrname in ['sample_points', 'amplitude', 'phase']:
try:
fname = getattr(self, '_{}'.format(arrname)).file.filename
gname = getattr(self, '_{}'.format(arrname)).name
except AttributeError:
fname = None
gname = None
self._filenames[arrname] = fname
self._groupnames[arrname] = gname
# metadata
self.interpolation = interpolation
self.tolerance = tolerance
self.mismatch = mismatch
self.precision = precision
def _get(self, param):
val = getattr(self, '_%s' %param)
if isinstance(val, h5py.Dataset):
try:
val = self._cache[param]
except KeyError:
try:
val = val[:]
except ValueError:
# this can happen if the file is closed; if so, open it
# and get the data
fp = h5py.File(self._filenames[param], 'r')
val = fp[self._groupnames[param]][:]
fp.close()
if self.load_to_memory:
self._cache[param] = val
return val
@property
def amplitude(self):
"""The amplitude of the waveform at the `sample_points`.
This is always returned as an array; the same logic as for
`sample_points` is used to determine whether or not to cache in
memory.
Returns
-------
amplitude : Array
"""
return self._get('amplitude')
@property
def phase(self):
"""The phase of the waveform as the `sample_points`.
This is always returned as an array; the same logic as for
`sample_points` returned as an array; the same logic as for
`sample_points` is used to determine whether or not to cache in
memory.
Returns
-------
phase : Array
"""
return self._get('phase')
@property
def sample_points(self):
"""The frequencies at which the compressed waveform is sampled.
This is
always returned as an array, even if the stored `sample_points` is an
hdf dataset. If `load_to_memory` is True and the stored points are
an hdf dataset, the `sample_points` will cached in memory the first
time this attribute is accessed.
Returns
-------
sample_points : Array
"""
return self._get('sample_points')
def clear_cache(self):
"""Clear self's cache of amplitude, phase, and sample_points."""
self._cache.clear()
def decompress(self, out=None, df=None, f_lower=None, interpolation=None):
"""Decompress self.
Parameters
----------
out : {None, FrequencySeries}
Write the decompressed waveform to the given frequency series. The
decompressed waveform will have the same `delta_f` as `out`.
Either this or `df` must be provided.
df : {None, float}
Decompress the waveform such that its `delta_f` has the given
value. Either this or `out` must be provided.
f_lower : {None, float}
The starting frequency at which to decompress the waveform. Cannot
be less than the minimum frequency in `sample_points`. If `None`
provided, will default to the minimum frequency in `sample_points`.
interpolation : {None, str}
The interpolation to use for decompressing the waveform. If `None`
provided, will default to `self.interpolation`.
Returns
-------
FrequencySeries
The decompressed waveform.
"""
if f_lower is None:
# use the minimum of the samlpe points
f_lower = self.sample_points.min()
if interpolation is None:
interpolation = self.interpolation
return fd_decompress(self.amplitude, self.phase, self.sample_points,
out=out, df=df, f_lower=f_lower,
interpolation=interpolation)
def write_to_hdf(self, fp, template_hash, root=None, precision=None):
"""Write the compressed waveform to the given hdf file handler.
The waveform is written to:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`. The
`interpolation`, `tolerance`, `mismatch` and `precision` are saved
to the group's attributes.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
A hash, int, or string to map the template to the waveform.
root : {None, str}
Put the `compressed_waveforms` group in the given directory in the
hdf file. If `None`, `compressed_waveforms` will be the root
directory.
precision : {None, str}
Cast the saved parameters to the given precision before saving. If
None provided, will use whatever their current precision is. This
will raise an error if the parameters have single precision but the
requested precision is double.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
if precision is None:
precision = self.precision
elif precision == 'double' and self.precision == 'single':
raise ValueError("cannot cast single precision to double")
outdtype = _real_dtypes[precision]
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
for param in ['amplitude', 'phase', 'sample_points']:
fp['%s/%s' %(group, param)] = self._get(param).astype(outdtype)
fp_group = fp[group]
fp_group.attrs['mismatch'] = self.mismatch
fp_group.attrs['interpolation'] = self.interpolation
fp_group.attrs['tolerance'] = self.tolerance
fp_group.attrs['precision'] = precision
@classmethod
def from_hdf(cls, fp, template_hash, root=None, load_to_memory=True,
load_now=False):
"""Load a compressed waveform from the given hdf file handler.
The waveform is retrieved from:
`fp['[{root}/]compressed_waveforms/{template_hash}/{param}']`,
where `param` is the `sample_points`, `amplitude`, and `phase`.
Parameters
----------
fp : h5py.File
An open hdf file to write the compressed waveform to.
template_hash : {hash, int, str}
The id of the waveform.
root : {None, str}
Retrieve the `compressed_waveforms` group from the given string.
If `None`, `compressed_waveforms` will be assumed to be in the
top level.
load_to_memory : {True, bool}
Set the `load_to_memory` attribute to the given value in the
returned instance.
load_now : {False, bool}
Immediately load the `sample_points`/`amplitude`/`phase` to memory.
Returns
-------
CompressedWaveform
An instance of this class with parameters loaded from the hdf file.
"""
if root is None:
root = ''
else:
root = '%s/'%(root)
group = '%scompressed_waveforms/%s' %(root, str(template_hash))
fp_group = fp[group]
sample_points = fp_group['sample_points']
amp = fp_group['amplitude']
phase = fp_group['phase']
if load_now:
sample_points = sample_points[:]
amp = amp[:]
phase = phase[:]
return cls(sample_points, amp, phase,
interpolation=fp_group.attrs['interpolation'],
tolerance=fp_group.attrs['tolerance'],
mismatch=fp_group.attrs['mismatch'],
precision=fp_group.attrs['precision'],
load_to_memory=load_to_memory)
| 30,086
| 39.990463
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/bank.py
|
# Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides classes that describe banks of waveforms
"""
import types
import logging
import os.path
import h5py
from copy import copy
import numpy as np
from ligo.lw import lsctables, utils as ligolw_utils
import pycbc.waveform
import pycbc.pnutils
import pycbc.waveform.compress
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries, zeros
import pycbc.io
from pycbc.io.ligolw import LIGOLWContentHandler
import hashlib
def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = \
pycbc.waveform.get_waveform_filter_norm(
self.approximant,
psd,
len(psd),
psd.delta_f,
self.min_f_lower
)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
curr_sigmasq = psd.sigmasq_vec[self.approximant]
kmin = int(self.f_lower / psd.delta_f)
self._sigmasq[key] = self.sigma_scale * \
(curr_sigmasq[self.end_idx-1] - curr_sigmasq[kmin])
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt[self.sslice])
return self._sigmasq[key]
# helper function for parsing approximant strings
def boolargs_from_apprxstr(approximant_strs):
"""Parses a list of strings specifying an approximant and where that
approximant should be used into a list that can be understood by
FieldArray.parse_boolargs.
Parameters
----------
apprxstr : (list of) string(s)
The strings to parse. Each string should be formatted `APPRX:COND`,
where `APPRX` is the approximant and `COND` is a string specifying
where it should be applied (see `FieldArgs.parse_boolargs` for examples
of conditional strings). The last string in the list may exclude a
conditional argument, which is the same as specifying ':else'.
Returns
-------
boolargs : list
A list of tuples giving the approximant and where to apply them. This
can be passed directly to `FieldArray.parse_boolargs`.
"""
if not isinstance(approximant_strs, list):
approximant_strs = [approximant_strs]
return [tuple(arg.split(':')) for arg in approximant_strs]
def add_approximant_arg(parser, default=None, help=None):
"""Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
"""
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, 'Apprx:(mtotal > 4) & "
"(mchirp <= 5)' would use approximant 'Apprx' where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
"'else' as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs='+', type=str, default=default,
metavar='APPRX[:COND]',
help=help)
def parse_approximant_arg(approximant_arg, warray):
"""Given an approximant arg (see add_approximant_arg) and a field
array, figures out what approximant to use for each template in the array.
Parameters
----------
approximant_arg : list
The approximant argument to parse. Should be the thing returned by
ArgumentParser when parsing the argument added by add_approximant_arg.
warray : FieldArray
The array to parse. Must be an instance of a FieldArray, or a class
that inherits from FieldArray.
Returns
-------
array
A numpy array listing the approximants to use for each element in
the warray.
"""
return warray.parse_boolargs(boolargs_from_apprxstr(approximant_arg))[0]
def tuple_to_hash(tuple_to_be_hashed):
"""
Return a hash for a numpy array, avoids native (unsafe) python3 hash function
Parameters
----------
tuple_to_be_hashed: tuple
The tuple which is being hashed
Must be convertible to a numpy array
Returns
-------
int
an integer representation of the hashed array
"""
h = hashlib.blake2b(np.array(tuple_to_be_hashed).tobytes('C'),
digest_size=8)
return np.fromstring(h.digest(), dtype=int)[0]
class TemplateBank(object):
"""Class to provide some basic helper functions and information
about elements of a template bank.
Parameters
----------
filename : string
The name of the file to load. Must end in '.xml[.gz]' or '.hdf'. If an
hdf file, it should have a 'parameters' in its `attrs` which gives a
list of the names of fields to load from the file. If no 'parameters'
are found, all of the top-level groups in the file will assumed to be
parameters (a warning will be printed to stdout in this case). If an
xml file, it must have a `SnglInspiral` table.
approximant : {None, (list of) string(s)}
Specify the approximant(s) for each template in the bank. If None
provided, will try to load the approximant from the file. The
approximant may either be a single string (in which case the same
approximant will be used for all templates) or a list of strings and
conditionals specifying where to use the approximant. See
`boolargs_from_apprxstr` for syntax.
parameters : {None, (list of) sting(s)}
Specify what parameters to load from the file. If None, all of the
parameters in the file (if an xml file, this is all of the columns in
the SnglInspiral table, if an hdf file, this is given by the
parameters attribute in the file). The list may include parameters that
are derived from the file's parameters, or functions thereof. For a
full list of possible parameters, see `WaveformArray.default_fields`.
If a derived parameter is specified, only the parameters needed to
compute that parameter will be loaded from the file. For example, if
`parameters='mchirp'`, then only `mass1, mass2` will be loaded from
the file. Note that derived parameters can only be used if the
needed parameters are in the file; e.g., you cannot use `chi_eff` if
`spin1z`, `spin2z`, `mass1`, and `mass2` are in the input file.
\**kwds :
Any additional keyword arguments are stored to the `extra_args`
attribute.
Attributes
----------
table : WaveformArray
An instance of a WaveformArray containing all of the information about
the parameters of the bank.
has_compressed_waveforms : {False, bool}
True if compressed waveforms are present in the the (hdf) file; False
otherwise.
indoc : {None, xmldoc}
If an xml file was provided, an in-memory representation of the xml.
Otherwise, None.
filehandler : {None, h5py.File}
If an hdf file was provided, the file handler pointing to the hdf file
(left open after initialization). Otherwise, None.
extra_args : {None, dict}
Any extra keyword arguments that were provided on initialization.
"""
def __init__(self, filename, approximant=None, parameters=None,
**kwds):
self.has_compressed_waveforms = False
ext = os.path.basename(filename)
if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
self.filehandler = None
self.indoc = ligolw_utils.load_filename(
filename, False, contenthandler=LIGOLWContentHandler)
self.table = lsctables.SnglInspiralTable.get_table(self.indoc)
self.table = pycbc.io.WaveformArray.from_ligolw_table(self.table,
columns=parameters)
# inclination stored in xml alpha3 column
names = list(self.table.dtype.names)
names = tuple([n if n != 'alpha3' else 'inclination' for n in names])
# low frequency cutoff in xml alpha6 column
names = tuple([n if n!= 'alpha6' else 'f_lower' for n in names])
self.table.dtype.names = names
elif ext.endswith(('hdf', '.h5', '.hdf5')):
self.indoc = None
f = h5py.File(filename, 'r')
self.filehandler = f
try:
fileparams = list(f.attrs['parameters'])
except KeyError:
# just assume all of the top-level groups are the parameters
fileparams = list(f.keys())
logging.info("WARNING: no parameters attribute found. "
"Assuming that %s " %(', '.join(fileparams)) +
"are the parameters.")
tmp_params = []
# At this point fileparams might be bytes. Fix if it is
for param in fileparams:
try:
param = param.decode()
tmp_params.append(param)
except AttributeError:
tmp_params.append(param)
fileparams = tmp_params
# use WaveformArray's syntax parser to figure out what fields
# need to be loaded
if parameters is None:
parameters = fileparams
common_fields = list(pycbc.io.WaveformArray(1,
names=parameters).fieldnames)
add_fields = list(set(parameters) &
(set(fileparams) - set(common_fields)))
# load
dtype = []
data = {}
for key in common_fields+add_fields:
data[key] = f[key][:]
dtype.append((key, data[key].dtype))
num = f[fileparams[0]].size
self.table = pycbc.io.WaveformArray(num, dtype=dtype)
for key in data:
self.table[key] = data[key]
# add the compressed waveforms, if they exist
self.has_compressed_waveforms = 'compressed_waveforms' in f
else:
raise ValueError("Unsupported template bank file extension %s" %(
ext))
# if approximant is specified, override whatever was in the file
# (if anything was in the file)
if approximant is not None:
# get the approximant for each template
dtype = h5py.string_dtype(encoding='utf-8')
apprxs = np.array(self.parse_approximant(approximant),
dtype=dtype)
if 'approximant' not in self.table.fieldnames:
self.table = self.table.add_fields(apprxs, 'approximant')
else:
self.table['approximant'] = apprxs
self.extra_args = kwds
self.ensure_hash()
@property
def parameters(self):
"""tuple: The parameters loaded from the input file.
Same as `table.fieldnames`.
"""
return self.table.fieldnames
def ensure_hash(self):
"""Ensure that there is a correctly populated template_hash.
Check for a correctly populated template_hash and create if it doesn't
already exist.
"""
fields = self.table.fieldnames
if 'template_hash' in fields:
return
# The fields to use in making a template hash
hash_fields = ['mass1', 'mass2', 'inclination',
'spin1x', 'spin1y', 'spin1z',
'spin2x', 'spin2y', 'spin2z',]
fields = [f for f in hash_fields if f in fields]
template_hash = np.array([tuple_to_hash(v) for v in zip(*[self.table[p]
for p in fields])])
if not np.unique(template_hash).size == template_hash.size:
raise RuntimeError("Some template hashes clash. This should not "
"happen.")
self.table = self.table.add_fields(template_hash, 'template_hash')
def write_to_hdf(self, filename, start_index=None, stop_index=None,
force=False, skip_fields=None,
write_compressed_waveforms=True):
"""Writes self to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to. Must be a recognised HDF5
file extension
start_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
first template in the slice
stop_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
last template in the slice
force : {False, bool}
If the file already exists, it will be overwritten if True.
Otherwise, an OSError is raised if the file exists.
skip_fields : {None, (list of) strings}
Do not write the given fields to the hdf file. Default is None,
in which case all fields in self.table.fieldnames are written.
write_compressed_waveforms : {True, bool}
Write compressed waveforms to the output (hdf) file if this is
True, which is the default setting. If False, do not write the
compressed waveforms group, but only the template parameters to
the output file.
Returns
-------
h5py.File
The file handler to the output hdf file (left open).
"""
if not filename.endswith(('.hdf', '.h5', '.hdf5')):
raise ValueError("Unrecoginized file extension")
if os.path.exists(filename) and not force:
raise IOError("File %s already exists" %(filename))
f = h5py.File(filename, 'w')
parameters = self.parameters
if skip_fields is not None:
if not isinstance(skip_fields, list):
skip_fields = [skip_fields]
parameters = [p for p in parameters if p not in skip_fields]
# save the parameters
f.attrs['parameters'] = parameters
write_tbl = self.table[start_index:stop_index]
for p in parameters:
f[p] = write_tbl[p]
if write_compressed_waveforms and self.has_compressed_waveforms:
for tmplt_hash in write_tbl.template_hash:
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
compressed_waveform.write_to_hdf(f, tmplt_hash)
return f
def end_frequency(self, index):
""" Return the end frequency of the waveform at the given index value
"""
if hasattr(self.table[index], 'f_final'):
return self.table[index].f_final
return pycbc.waveform.get_waveform_end_frequency(
self.table[index],
approximant=self.approximant(index),
**self.extra_args)
def parse_approximant(self, approximant):
"""Parses the given approximant argument, returning the approximant to
use for each template in self. This is done by calling
`parse_approximant_arg` using self's table as the array; see that
function for more details."""
return parse_approximant_arg(approximant, self.table)
def approximant(self, index):
""" Return the name of the approximant ot use at the given index
"""
if 'approximant' not in self.table.fieldnames:
raise ValueError("approximant not found in input file and no "
"approximant was specified on initialization")
apx = self.table["approximant"][index]
if hasattr(apx, 'decode'):
apx = apx.decode()
return apx
def __len__(self):
return len(self.table)
def template_thinning(self, inj_filter_rejector):
"""Remove templates from bank that are far from all injections."""
if not inj_filter_rejector.enabled or \
inj_filter_rejector.chirp_time_window is None:
# Do nothing!
return
injection_parameters = inj_filter_rejector.injection_params.table
fref = inj_filter_rejector.f_lower
threshold = inj_filter_rejector.chirp_time_window
m1= self.table['mass1']
m2= self.table['mass2']
tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref)
indices = []
sort = tau0_temp.argsort()
tau0_temp = tau0_temp[sort]
for inj in injection_parameters:
tau0_inj, _ = \
pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
fref)
lid = np.searchsorted(tau0_temp, tau0_inj - threshold)
rid = np.searchsorted(tau0_temp, tau0_inj + threshold)
inj_indices = sort[lid:rid]
indices.append(inj_indices)
indices_combined = np.concatenate(indices)
indices_unique= np.unique(indices_combined)
self.table = self.table[indices_unique]
def ensure_standard_filter_columns(self, low_frequency_cutoff=None):
""" Initialize FilterBank common fields
Parameters
----------
low_frequency_cutoff: {float, None}, Optional
A low frequency cutoff which overrides any given within the
template bank file.
"""
# Make sure we have a template duration field
if not hasattr(self.table, 'template_duration'):
self.table = self.table.add_fields(np.zeros(len(self.table),
dtype=np.float32), 'template_duration')
# Make sure we have a f_lower field
if low_frequency_cutoff is not None:
if not hasattr(self.table, 'f_lower'):
vec = np.zeros(len(self.table), dtype=np.float32)
self.table = self.table.add_fields(vec, 'f_lower')
self.table['f_lower'][:] = low_frequency_cutoff
self.min_f_lower = min(self.table['f_lower'])
if self.f_lower is None and self.min_f_lower == 0.:
raise ValueError('Invalid low-frequency cutoff settings')
class LiveFilterBank(TemplateBank):
def __init__(self, filename, sample_rate, minimum_buffer,
approximant=None, increment=8, parameters=None,
low_frequency_cutoff=None,
**kwds):
self.increment = increment
self.filename = filename
self.sample_rate = sample_rate
self.minimum_buffer = minimum_buffer
self.f_lower = low_frequency_cutoff
super(LiveFilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
self.param_lookup = {}
for i, p in enumerate(self.table):
key = (p.mass1, p.mass2, p.spin1z, p.spin2z)
assert(key not in self.param_lookup) # Uh, oh, template confusion!
self.param_lookup[key] = i
def round_up(self, num):
"""Determine the length to use for this waveform by rounding.
Parameters
----------
num : int
Proposed size of waveform in samples.
Returns
-------
size: int
The rounded size to use for the waveform buffer in samples.
This is calculated using an internal `increment` attribute, which
determines the discreteness of the rounding.
"""
inc = self.increment
size = np.ceil(num / self.sample_rate / inc) * self.sample_rate * inc
return size
def getslice(self, sindex):
instance = copy(self)
instance.table = self.table[sindex]
return instance
def id_from_param(self, param_tuple):
"""Get the index of this template based on its param tuple
Parameters
----------
param_tuple : tuple
Tuple of the parameters which uniquely identify this template
Returns
--------
index : int
The ordered index that this template has in the template bank.
"""
return self.param_lookup[param_tuple]
def __getitem__(self, index):
if isinstance(index, slice):
return self.getslice(index)
return self.get_template(index)
def get_template(self, index, min_buffer=None):
approximant = self.approximant(index)
f_end = self.end_frequency(index)
flow = self.table[index].f_lower
# Determine the length of time of the filter, rounded up to
# nearest power of two
if min_buffer is None:
min_buffer = self.minimum_buffer
min_buffer += 0.5
from pycbc.waveform.waveform import props
p = props(self.table[index])
p.pop('approximant')
buff_size = pycbc.waveform.get_waveform_filter_length_in_time(approximant, **p)
if not buff_size:
raise RuntimeError('Template waveform %s not recognized!' % approximant)
tlen = self.round_up((buff_size + min_buffer) * self.sample_rate)
flen = int(tlen / 2 + 1)
delta_f = self.sample_rate / float(tlen)
if f_end is None or f_end >= (flen * delta_f):
f_end = (flen - 1) * delta_f
logging.info("Generating %s, %ss, %i, starting from %s Hz",
approximant, 1.0 / delta_f, index, flow)
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
htilde = pycbc.waveform.get_waveform_filter(
zeros(flen, dtype=np.complex64), self.table[index],
approximant=approximant, f_lower=flow, f_final=f_end,
delta_f=delta_f, delta_t=1.0 / self.sample_rate, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = -1
time_offset = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
if hasattr(htilde, 'time_offset'):
time_offset = htilde.time_offset
self.table[index].template_duration = template_duration
htilde = htilde.astype(np.complex64)
htilde.f_lower = flow
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
if time_offset:
htilde.time_offset = time_offset
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde.id = self.id_from_param((htilde.params.mass1,
htilde.params.mass2,
htilde.params.spin1z,
htilde.params.spin2z))
return htilde
class FilterBank(TemplateBank):
def __init__(self, filename, filter_length, delta_f, dtype,
out=None, max_template_length=None,
approximant=None, parameters=None,
enable_compressed_waveforms=True,
low_frequency_cutoff=None,
waveform_decompression_method=None,
**kwds):
self.out = out
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
self.enable_compressed_waveforms = enable_compressed_waveforms
self.waveform_decompression_method = waveform_decompression_method
super(FilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def get_decompressed_waveform(self, tempout, index, f_lower=None,
approximant=None, df=None):
"""Returns a frequency domain decompressed waveform for the template
in the bank corresponding to the index taken in as an argument. The
decompressed waveform is obtained by interpolating in frequency space,
the amplitude and phase points for the compressed template that are
read in from the bank."""
from pycbc.waveform.waveform import props
from pycbc.waveform import get_waveform_filter_length_in_time
# Get the template hash corresponding to the template index taken in as argument
tmplt_hash = self.table.template_hash[index]
# Read the compressed waveform from the bank file
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
# Get the interpolation method to be used to decompress the waveform
if self.waveform_decompression_method is not None :
decompression_method = self.waveform_decompression_method
else :
decompression_method = compressed_waveform.interpolation
logging.info("Decompressing waveform using %s", decompression_method)
if df is not None :
delta_f = df
else :
delta_f = self.delta_f
# Create memory space for writing the decompressed waveform
decomp_scratch = FrequencySeries(tempout[0:self.filter_length], delta_f=delta_f, copy=False)
# Get the decompressed waveform
hdecomp = compressed_waveform.decompress(out=decomp_scratch, f_lower=f_lower, interpolation=decompression_method)
p = props(self.table[index])
p.pop('approximant')
try:
tmpltdur = self.table[index].template_duration
except AttributeError:
tmpltdur = None
if tmpltdur is None or tmpltdur==0.0 :
tmpltdur = get_waveform_filter_length_in_time(approximant, **p)
hdecomp.chirp_length = tmpltdur
hdecomp.length_in_time = hdecomp.chirp_length
return hdecomp
def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f,
low_frequency_cutoff=None,
cached_mem=None):
"""Generate the template with index t_num using custom length."""
approximant = self.approximant(t_num)
# Don't want to use INTERP waveforms in here
if approximant.endswith('_INTERP'):
approximant = approximant.replace('_INTERP', '')
# Using SPAtmplt here is bad as the stored cbrt and logv get
# recalculated as we change delta_f values. Fall back to TaylorF2
# in lalsimulation.
if approximant == 'SPAtmplt':
approximant = 'TaylorF2'
if cached_mem is None:
wav_len = int(max_freq / delta_f) + 1
cached_mem = zeros(wav_len, dtype=np.complex64)
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(cached_mem, t_num,
f_lower=low_frequency_cutoff,
approximant=approximant,
df=delta_f)
else :
htilde = pycbc.waveform.get_waveform_filter(
cached_mem, self.table[t_num], approximant=approximant,
f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f,
distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq))
return htilde
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out is None:
tempout = zeros(self.filter_length, dtype=self.dtype)
else:
tempout = self.out
approximant = self.approximant(index)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz' % (index, approximant, f_low))
# Clear the storage memory
poke = tempout.data # pylint:disable=unused-variable
tempout.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(tempout, index, f_lower=f_low,
approximant=approximant, df=None)
else :
htilde = pycbc.waveform.get_waveform_filter(
tempout[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low, f_final=f_end,
delta_f=self.delta_f, delta_t=self.delta_t, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
self.table[index].template_duration = template_duration
htilde = htilde.astype(self.dtype)
htilde.f_lower = f_low
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde._sigmasq = {}
return htilde
def find_variable_start_frequency(approximant, parameters, f_start, max_length,
delta_f = 1):
""" Find a frequency value above the starting frequency that results in a
waveform shorter than max_length.
"""
if (f_start is None):
f = parameters.f_lower
elif (max_length is not None):
l = max_length + 1
f = f_start - delta_f
while l > max_length:
f += delta_f
l = pycbc.waveform.get_waveform_filter_length_in_time(approximant,
parameters, f_lower=f)
else :
f = f_start
return f
class FilterBankSkyMax(TemplateBank):
def __init__(self, filename, filter_length, delta_f,
dtype, out_plus=None, out_cross=None,
max_template_length=None, parameters=None,
low_frequency_cutoff=None, **kwds):
self.out_plus = out_plus
self.out_cross = out_cross
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
super(FilterBankSkyMax, self).__init__(filename, parameters=parameters,
**kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out_plus is None:
tempoutplus = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutplus = self.out_plus
if self.out_cross is None:
tempoutcross = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutcross = self.out_cross
approximant = self.approximant(index)
# Get the end of the waveform if applicable (only for SPAtmplt atm)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz', index, approximant, f_low)
# What does this do???
poke1 = tempoutplus.data # pylint:disable=unused-variable
poke2 = tempoutcross.data # pylint:disable=unused-variable
# Clear the storage memory
tempoutplus.clear()
tempoutcross.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
hplus, hcross = pycbc.waveform.get_two_pol_waveform_filter(
tempoutplus[0:self.filter_length],
tempoutcross[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low,
f_final=f_end, delta_f=self.delta_f, delta_t=self.delta_t,
distance=distance, **self.extra_args)
if hasattr(hplus, 'chirp_length') and hplus.chirp_length is not None:
self.table[index].template_duration = hplus.chirp_length
hplus = hplus.astype(self.dtype)
hcross = hcross.astype(self.dtype)
hplus.f_lower = f_low
hcross.f_lower = f_low
hplus.min_f_lower = self.min_f_lower
hcross.min_f_lower = self.min_f_lower
hplus.end_frequency = f_end
hcross.end_frequency = f_end
hplus.end_idx = int(hplus.end_frequency / hplus.delta_f)
hcross.end_idx = int(hplus.end_frequency / hplus.delta_f)
hplus.params = self.table[index]
hcross.params = self.table[index]
hplus.approximant = approximant
hcross.approximant = approximant
# Add sigmasq as a method of this instance
hplus.sigmasq = types.MethodType(sigma_cached, hplus)
hplus._sigmasq = {}
hcross.sigmasq = types.MethodType(sigma_cached, hcross)
hcross._sigmasq = {}
return hplus, hcross
__all__ = ('sigma_cached', 'boolargs_from_apprxstr', 'add_approximant_arg',
'parse_approximant_arg', 'tuple_to_hash', 'TemplateBank',
'LiveFilterBank', 'FilterBank', 'find_variable_start_frequency',
'FilterBankSkyMax')
| 39,507
| 41.299786
| 121
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/waveform_modes.py
|
# Copyright (C) 2020 Collin Capano, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides functions and utilities for generating waveforms mode-by-mode.
"""
from string import Formatter
import lal
from pycbc import libutils, pnutils
from pycbc.types import (TimeSeries, FrequencySeries)
from .waveform import (props, _check_lal_pars, check_args)
from . import parameters
lalsimulation = libutils.import_optional('lalsimulation')
def _formatdocstr(docstr):
"""Utility for formatting docstrings with parameter information.
"""
return docstr.format(
**{_p[1]: getattr(parameters, _p[1]).docstr(
prefix=" ", include_label=False).lstrip(' ')
for _p in Formatter().parse(docstr) if _p[1] is not None
})
def _formatdocstrlist(docstr, paramlist, skip_params=None):
"""Utility for formatting docstrings with parameter information.
"""
if skip_params is None:
skip_params = []
pl = '\n'.join([_p.docstr(prefix=" ", include_label=False)
for _p in paramlist if _p not in skip_params]).lstrip(' ')
return docstr.format(params=pl)
def sum_modes(hlms, inclination, phi):
"""Applies spherical harmonics and sums modes to produce a plus and cross
polarization.
Parameters
----------
hlms : dict
Dictionary of ``(l, m)`` -> complex ``hlm``. The ``hlm`` may be a
complex number or array, or complex ``TimeSeries``. All modes in the
dictionary will be summed.
inclination : float
The inclination to use.
phi : float
The phase to use.
Returns
-------
complex float or array
The plus and cross polarization as a complex number. The real part
gives the plus, the negative imaginary part the cross.
"""
out = None
for mode in hlms:
l, m = mode
hlm = hlms[l, m]
ylm = lal.SpinWeightedSphericalHarmonic(inclination, phi, -2, l, m)
if out is None:
out = ylm * hlm
else:
out += ylm * hlm
return out
def default_modes(approximant):
"""Returns the default modes for the given approximant.
"""
# FIXME: this should be replaced to a call to a lalsimulation function,
# whenever that's added
if approximant in ['IMRPhenomXPHM', 'IMRPhenomXHM']:
# according to arXiv:2004.06503
ma = [(2, 2), (2, 1), (3, 3), (3, 2), (4, 4)]
# add the -m modes
ma += [(l, -m) for l, m in ma]
elif approximant in ['IMRPhenomPv3HM', 'IMRPhenomHM']:
# according to arXiv:1911.06050
ma = [(2, 2), (2, 1), (3, 3), (3, 2), (4, 4), (4, 3)]
# add the -m modes
ma += [(l, -m) for l, m in ma]
elif approximant.startswith('NRSur7dq4'):
# according to arXiv:1905.09300
ma = [(l, m) for l in [2, 3, 4] for m in range(-l, l+1)]
else:
raise ValueError("I don't know what the default modes are for "
"approximant {}, sorry!".format(approximant))
return ma
def get_glm(l, m, theta):
r"""The maginitude of the :math:`{}_{-2}Y_{\ell m}`.
The spin-weighted spherical harmonics can be written as
:math:`{}_{-2}Y_{\ell m}(\theta, \phi) = g_{\ell m}(\theta)e^{i m \phi}`.
This returns the `g_{\ell m}(\theta)` part. Note that this is real.
Parameters
----------
l : int
The :math:`\ell` index of the spherical harmonic.
m : int
The :math:`m` index of the spherical harmonic.
theta : float
The polar angle (in radians).
Returns
-------
float :
The amplitude of the harmonic at the given polar angle.
"""
return lal.SpinWeightedSphericalHarmonic(theta, 0., -2, l, m).real
def get_nrsur_modes(**params):
"""Generates NRSurrogate waveform mode-by-mode.
All waveform parameters should be provided as keyword arguments.
Recognized parameters are listed below. Unrecognized arguments are ignored.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
approximant : str
The approximant to generate. Must be one of the ``NRSur*`` models.
{delta_t}
{mass1}
{mass2}
{spin1x}
{spin1y}
{spin1z}
{spin2x}
{spin2y}
{spin2z}
{f_lower}
{f_ref}
{distance}
{mode_array}
Returns
-------
dict :
Dictionary of ``(l, m)`` -> ``(h_+, -h_x)`` ``TimeSeries``.
"""
laldict = _check_lal_pars(params)
ret = lalsimulation.SimInspiralPrecessingNRSurModes(
params['delta_t'],
params['mass1']*lal.MSUN_SI,
params['mass2']*lal.MSUN_SI,
params['spin1x'], params['spin1y'], params['spin1z'],
params['spin2x'], params['spin2y'], params['spin2z'],
params['f_lower'], params['f_ref'],
params['distance']*1e6*lal.PC_SI, laldict,
getattr(lalsimulation, params['approximant'])
)
hlms = {}
while ret:
hlm = TimeSeries(ret.mode.data.data, delta_t=ret.mode.deltaT,
epoch=ret.mode.epoch)
hlms[ret.l, ret.m] = (hlm.real(), hlm.imag())
ret = ret.next
return hlms
get_nrsur_modes.__doc__ = _formatdocstr(get_nrsur_modes.__doc__)
def get_imrphenomxh_modes(**params):
"""Generates ``IMRPhenomXHM`` waveforms mode-by-mode. """
approx = params['approximant']
if not approx.startswith('IMRPhenomX'):
raise ValueError("unsupported approximant")
mode_array = params.pop('mode_array', None)
if mode_array is None:
mode_array = default_modes(approx)
if 'f_final' not in params:
# setting to 0 will default to ringdown frequency
params['f_final'] = 0.
hlms = {}
for (l, m) in mode_array:
params['mode_array'] = [(l, m)]
laldict = _check_lal_pars(params)
hlm = lalsimulation.SimIMRPhenomXHMGenerateFDOneMode(
float(pnutils.solar_mass_to_kg(params['mass1'])),
float(pnutils.solar_mass_to_kg(params['mass2'])),
float(params['spin1z']),
float(params['spin2z']), l, m,
pnutils.megaparsecs_to_meters(float(params['distance'])),
params['f_lower'], params['f_final'], params['delta_f'],
params['coa_phase'], params['f_ref'],
laldict)
hlm = FrequencySeries(hlm.data.data, delta_f=hlm.deltaF,
epoch=hlm.epoch)
# Plus, cross strains without Y_lm.
# (-1)**(l) factor ALREADY included in FDOneMode
hplm = 0.5 * hlm # Plus strain
hclm = 0.5j * hlm # Cross strain
if m > 0:
hclm *= -1
hlms[l, m] = (hplm, hclm)
return hlms
_mode_waveform_td = {'NRSur7dq4': get_nrsur_modes,
}
_mode_waveform_fd = {'IMRPhenomXHM': get_imrphenomxh_modes,
}
# 'IMRPhenomXPHM':get_imrphenomhm_modes needs to be implemented
# LAL function do not split strain mode by mode
def fd_waveform_mode_approximants():
"""Frequency domain approximants that will return separate modes."""
return sorted(_mode_waveform_fd.keys())
def td_waveform_mode_approximants():
"""Time domain approximants that will return separate modes."""
return sorted(_mode_waveform_td.keys())
def get_fd_waveform_modes(template=None, **kwargs):
r"""Generates frequency domain waveforms, but does not sum over the modes.
The returned values are the frequency-domain equivalents of the real and
imaginary parts of the complex :math:`\mathfrak{{h}}_{{\ell m}}(t)` time
series. In other words, the returned values are equivalent to the Fourier
Transform of the two time series returned by
:py:func:`get_td_waveform_modes`; see that function for more details.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments.
{params}
Returns
-------
ulm : dict
Dictionary of mode tuples -> fourier transform of the real part of the
hlm time series, as a :py:class:`pycbc.types.FrequencySeries`.
vlm : dict
Dictionary of mode tuples -> fourier transform of the imaginary part of
the hlm time series, as a :py:class:`pycbc.types.FrequencySeries`.
"""
params = props(template, **kwargs)
required = parameters.fd_required
check_args(params, required)
apprx = params['approximant']
if apprx not in _mode_waveform_fd:
raise ValueError("I don't support approximant {}, sorry"
.format(apprx))
return _mode_waveform_fd[apprx](**params)
get_fd_waveform_modes.__doc__ = _formatdocstrlist(
get_fd_waveform_modes.__doc__, parameters.fd_waveform_params,
skip_params=['inclination', 'coa_phase'])
def get_td_waveform_modes(template=None, **kwargs):
r"""Generates time domain waveforms, but does not sum over the modes.
The returned values are the real and imaginary parts of the complex
:math:`\mathfrak{{h}}_{{\ell m}}(t)`. These are defined such that the plus
and cross polarizations :math:`h_{{+,\times}}` are:
.. math::
h_{{+,\times}}(\theta, \phi; t) = (\Re, -\Im) \sum_{{\ell m}}
{{}}_{{-2}}Y_{{\ell m}}(\theta, \phi) \mathfrak{{h}}_{{\ell m}}(t).
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments.
{params}
Returns
-------
ulm : dict
Dictionary of mode tuples -> real part of the hlm, as a
:py:class:`pycbc.types.TimeSeries`.
vlm : dict
Dictionary of mode tuples -> imaginary part of the hlm, as a
:py:class:`pycbc.types.TimeSeries`.
"""
params = props(template, **kwargs)
required = parameters.td_required
check_args(params, required)
apprx = params['approximant']
if apprx not in _mode_waveform_td:
raise ValueError("I don't support approximant {}, sorry"
.format(apprx))
return _mode_waveform_td[apprx](**params)
get_td_waveform_modes.__doc__ = _formatdocstrlist(
get_td_waveform_modes.__doc__, parameters.td_waveform_params,
skip_params=['inclination', 'coa_phase'])
| 11,042
| 33.509375
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/utils.py
|
# Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains convenience utilities for manipulating waveforms
"""
from pycbc.types import TimeSeries, FrequencySeries, Array, float32, float64, complex_same_precision_as, real_same_precision_as
import lal
from math import frexp
import numpy
from pycbc.scheme import schemed
from scipy import signal
def ceilpow2(n):
"""convenience function to determine a power-of-2 upper frequency limit"""
signif,exponent = frexp(n)
if (signif < 0):
return 1;
if (signif == 0.5):
exponent -= 1;
return (1) << exponent;
def coalign_waveforms(h1, h2, psd=None,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
resize=True):
""" Return two time series which are aligned in time and phase.
The alignment is only to the nearest sample point and all changes to the
phase are made to the first input waveform. Waveforms should not be split
accross the vector boundary. If it is, please use roll or cyclic time shift
to ensure that the entire signal is contiguous in the time series.
Parameters
----------
h1: pycbc.types.TimeSeries
The first waveform to align.
h2: pycbc.types.TimeSeries
The second waveform to align.
psd: {None, pycbc.types.FrequencySeries}
A psd to weight the alignment
low_frequency_cutoff: {None, float}
The low frequency cutoff to weight the matching in Hz.
high_frequency_cutoff: {None, float}
The high frequency cutoff to weight the matching in Hz.
resize: Optional, {True, boolean}
If true, the vectors will be resized to match each other. If false,
they must be the same length and even in length
Returns
-------
h1: pycbc.types.TimeSeries
The shifted waveform to align with h2
h2: pycbc.type.TimeSeries
The resized (if necessary) waveform to align with h1.
"""
from pycbc.filter import matched_filter
mlen = ceilpow2(max(len(h1), len(h2)))
h1 = h1.copy()
h2 = h2.copy()
if resize:
h1.resize(mlen)
h2.resize(mlen)
elif len(h1) != len(h2) or len(h2) % 2 != 0:
raise ValueError("Time series must be the same size and even if you do "
"not allow resizing")
snr = matched_filter(h1, h2, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
_, l = snr.abs_max_loc()
rotation = snr[l] / abs(snr[l])
h1 = (h1.to_frequencyseries() * rotation).to_timeseries()
h1.roll(l)
h1 = TimeSeries(h1, delta_t=h2.delta_t, epoch=h2.start_time)
return h1, h2
def phase_from_frequencyseries(htilde, remove_start_phase=True):
"""Returns the phase from the given frequency-domain waveform. This assumes
that the waveform has been sampled finely enough that the phase cannot
change by more than pi radians between each step.
Parameters
----------
htilde : FrequencySeries
The waveform to get the phase for; must be a complex frequency series.
remove_start_phase : {True, bool}
Subtract the initial phase before returning.
Returns
-------
FrequencySeries
The phase of the waveform as a function of frequency.
"""
p = numpy.unwrap(numpy.angle(htilde.data)).astype(
real_same_precision_as(htilde))
if remove_start_phase:
p += -p[0]
return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def amplitude_from_frequencyseries(htilde):
"""Returns the amplitude of the given frequency-domain waveform as a
FrequencySeries.
Parameters
----------
htilde : FrequencySeries
The waveform to get the amplitude of.
Returns
-------
FrequencySeries
The amplitude of the waveform as a function of frequency.
"""
amp = abs(htilde.data).astype(real_same_precision_as(htilde))
return FrequencySeries(amp, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def time_from_frequencyseries(htilde, sample_frequencies=None,
discont_threshold=0.99*numpy.pi):
"""Computes time as a function of frequency from the given
frequency-domain waveform. This assumes the stationary phase
approximation. Any frequencies lower than the first non-zero value in
htilde are assigned the time at the first non-zero value. Times for any
frequencies above the next-to-last non-zero value in htilde will be
assigned the time of the next-to-last non-zero value.
.. note::
Some waveform models (e.g., `SEOBNRv2_ROM_DoubleSpin`) can have
discontinuities in the phase towards the end of the waveform due to
numerical error. We therefore exclude any points that occur after a
discontinuity in the phase, as the time estimate becomes untrustworthy
beyond that point. What determines a discontinuity in the phase is set
by the `discont_threshold`. To turn this feature off, just set
`discont_threshold` to a value larger than pi (due to the unwrapping
of the phase, no two points can have a difference > pi).
Parameters
----------
htilde : FrequencySeries
The waveform to get the time evolution of; must be complex.
sample_frequencies : {None, array}
The frequencies at which the waveform is sampled. If None, will
retrieve from ``htilde.sample_frequencies``.
discont_threshold : {0.99*pi, float}
If the difference in the phase changes by more than this threshold,
it is considered to be a discontinuity. Default is 0.99*pi.
Returns
-------
FrequencySeries
The time evolution of the waveform as a function of frequency.
"""
if sample_frequencies is None:
sample_frequencies = htilde.sample_frequencies.numpy()
phase = phase_from_frequencyseries(htilde).data
dphi = numpy.diff(phase)
time = -dphi / (2.*numpy.pi*numpy.diff(sample_frequencies))
nzidx = numpy.nonzero(abs(htilde.data))[0]
kmin, kmax = nzidx[0], nzidx[-2]
# exclude everything after a discontinuity
discont_idx = numpy.where(abs(dphi[kmin:]) >= discont_threshold)[0]
if discont_idx.size != 0:
kmax = min(kmax, kmin + discont_idx[0]-1)
time[:kmin] = time[kmin]
time[kmax:] = time[kmax]
return FrequencySeries(time.astype(real_same_precision_as(htilde)),
delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def phase_from_polarizations(h_plus, h_cross, remove_start_phase=True):
"""Return gravitational wave phase
Return the gravitation-wave phase from the h_plus and h_cross
polarizations of the waveform. The returned phase is always
positive and increasing with an initial phase of 0.
Parameters
----------
h_plus : TimeSeries
An PyCBC TmeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TmeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWPhase : TimeSeries
A TimeSeries containing the gravitational wave phase.
Examples
--------s
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> phase = phase_from_polarizations(hp, hc)
"""
p = numpy.unwrap(numpy.arctan2(h_cross.data, h_plus.data)).astype(
real_same_precision_as(h_plus))
if remove_start_phase:
p += -p[0]
return TimeSeries(p, delta_t=h_plus.delta_t, epoch=h_plus.start_time,
copy=False)
def amplitude_from_polarizations(h_plus, h_cross):
"""Return gravitational wave amplitude
Return the gravitation-wave amplitude from the h_plus and h_cross
polarizations of the waveform.
Parameters
----------
h_plus : TimeSeries
An PyCBC TmeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TmeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWAmplitude : TimeSeries
A TimeSeries containing the gravitational wave amplitude.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> amp = amplitude_from_polarizations(hp, hc)
"""
amp = (h_plus.squared_norm() + h_cross.squared_norm()) ** (0.5)
return TimeSeries(amp, delta_t=h_plus.delta_t, epoch=h_plus.start_time)
def frequency_from_polarizations(h_plus, h_cross):
"""Return gravitational wave frequency
Return the gravitation-wave frequency as a function of time
from the h_plus and h_cross polarizations of the waveform.
It is 1 bin shorter than the input vectors and the sample times
are advanced half a bin.
Parameters
----------
h_plus : TimeSeries
A PyCBC TimeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TimeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWFrequency : TimeSeries
A TimeSeries containing the gravitational wave frequency as a function
of time.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> freq = frequency_from_polarizations(hp, hc)
"""
phase = phase_from_polarizations(h_plus, h_cross)
freq = numpy.diff(phase) / ( 2 * lal.PI * phase.delta_t )
start_time = phase.start_time + phase.delta_t / 2
return TimeSeries(freq.astype(real_same_precision_as(h_plus)),
delta_t=phase.delta_t, epoch=start_time)
# map between tapering string in sim_inspiral table or inspiral
# code option and lalsimulation constants
try:
import lalsimulation as sim
taper_map = {
'TAPER_NONE' : None,
'TAPER_START' : sim.SIM_INSPIRAL_TAPER_START,
'start' : sim.SIM_INSPIRAL_TAPER_START,
'TAPER_END' : sim.SIM_INSPIRAL_TAPER_END,
'end' : sim.SIM_INSPIRAL_TAPER_END,
'TAPER_STARTEND': sim.SIM_INSPIRAL_TAPER_STARTEND,
'startend' : sim.SIM_INSPIRAL_TAPER_STARTEND
}
taper_func_map = {
numpy.dtype(float32): sim.SimInspiralREAL4WaveTaper,
numpy.dtype(float64): sim.SimInspiralREAL8WaveTaper
}
except ImportError:
taper_map = {}
taper_func_map = {}
def taper_timeseries(tsdata, tapermethod=None, return_lal=False):
"""
Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series.
"""
if tapermethod is None:
raise ValueError("Must specify a tapering method (function was called"
"with tapermethod=None)")
if tapermethod not in taper_map.keys():
raise ValueError("Unknown tapering method %s, valid methods are %s" % \
(tapermethod, ", ".join(taper_map.keys())))
if tsdata.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not "
+ str(tsdata.dtype))
taper_func = taper_func_map[tsdata.dtype]
# make a LAL TimeSeries to pass to the LALSim function
ts_lal = tsdata.astype(tsdata.dtype).lal()
if taper_map[tapermethod] is not None:
taper_func(ts_lal.data, taper_map[tapermethod])
if return_lal:
return ts_lal
else:
return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT,
epoch=ts_lal.epoch)
@schemed("pycbc.waveform.utils_")
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True):
"""Shifts a frequency domain waveform in time. The waveform is assumed to
be sampled at equal frequency intervals.
"""
def apply_fd_time_shift(htilde, shifttime, kmin=0, fseries=None, copy=True):
"""Shifts a frequency domain waveform in time. The shift applied is
shiftime - htilde.epoch.
Parameters
----------
htilde : FrequencySeries
The waveform frequency series.
shifttime : float
The time to shift the frequency series to.
kmin : {0, int}
The starting index of htilde to apply the time shift. Default is 0.
fseries : {None, numpy array}
The frequencies of each element in htilde. This is only needed if htilde is not
sampled at equal frequency steps.
copy : {True, bool}
Make a copy of htilde before applying the time shift. If False, the time
shift will be applied to htilde's data.
Returns
-------
FrequencySeries
A frequency series with the waveform shifted to the new time. If makecopy
is True, will be a new frequency series; if makecopy is False, will be
the same as htilde.
"""
dt = float(shifttime - htilde.epoch)
if dt == 0.:
# no shift to apply, just copy if desired
if copy:
htilde = 1. * htilde
elif isinstance(htilde, FrequencySeries):
# FrequencySeries means equally sampled in frequency, use faster shifting
htilde = apply_fseries_time_shift(htilde, dt, kmin=kmin, copy=copy)
else:
if fseries is None:
fseries = htilde.sample_frequencies.numpy()
shift = Array(numpy.exp(-2j*numpy.pi*dt*fseries),
dtype=complex_same_precision_as(htilde))
if copy:
htilde = 1. * htilde
htilde *= shift
return htilde
def td_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_t)
window = Array(signal.get_window(('kaiser', beta), winlen))
xmin = int((start - out.start_time) / out.delta_t)
xmax = xmin + winlen//2
if side == 'left':
out[xmin:xmax] *= window[:winlen//2]
if xmin > 0:
out[:xmin].clear()
elif side == 'right':
out[xmin:xmax] *= window[winlen//2:]
if xmax < len(out):
out[xmax:].clear()
else:
raise ValueError("unrecognized side argument {}".format(side))
return out
def fd_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given FrequencySeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : FrequencySeries
The ``FrequencySeries`` to taper.
start : float
The frequency (in Hz) to start the taper window.
end : float
The frequency (in Hz) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
FrequencySeries
The tapered frequency series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_f)
window = Array(signal.get_window(('kaiser', beta), winlen))
kmin = int(start / out.delta_f)
kmax = kmin + winlen//2
if side == 'left':
out[kmin:kmax] *= window[:winlen//2]
out[:kmin] *= 0.
elif side == 'right':
out[kmin:kmax] *= window[winlen//2:]
out[kmax:] *= 0.
else:
raise ValueError("unrecognized side argument {}".format(side))
return out
def fd_to_td(htilde, delta_t=None, left_window=None, right_window=None,
left_beta=8, right_beta=8):
"""Converts a FD waveform to TD.
A window can optionally be applied using ``fd_taper`` to the left or right
side of the waveform before being converted to the time domain.
Parameters
----------
htilde : FrequencySeries
The waveform to convert.
delta_t : float, optional
Make the returned time series have the given ``delta_t``.
left_window : tuple of float, optional
A tuple giving the start and end frequency of the FD taper to apply
on the left side. If None, no taper will be applied on the left.
right_window : tuple of float, optional
A tuple giving the start and end frequency of the FD taper to apply
on the right side. If None, no taper will be applied on the right.
left_beta : int, optional
The beta parameter to use for the left taper. See ``fd_taper`` for
details. Default is 8.
right_beta : int, optional
The beta parameter to use for the right taper. Default is 8.
Returns
-------
TimeSeries
The time-series representation of ``htilde``.
"""
if left_window is not None:
start, end = left_window
htilde = fd_taper(htilde, start, end, side='left', beta=left_beta)
if right_window is not None:
start, end = right_window
htilde = fd_taper(htilde, start, end, side='right', beta=right_beta)
return htilde.to_timeseries(delta_t=delta_t)
| 19,760
| 35.867537
| 127
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/multiband.py
|
""" Tools and functions to calculate interpolate waveforms using multi-banding
"""
import numpy
from pycbc.types import TimeSeries, zeros
def multiband_fd_waveform(bands=None, lengths=None, overlap=0, **p):
""" Generate a fourier domain waveform using multibanding
Speed up generation of a fouerier domain waveform using multibanding. This
allows for multi-rate sampling of the frequeny space. Each band is
smoothed and stitched together to produce the final waveform. The base
approximant must support 'f_ref' and 'f_final'. The other parameters
must be chosen carefully by the user.
Parameters
----------
bands: list or str
The frequencies to split the waveform by. These should be chosen
so that the corresponding length include all the waveform's frequencies
within this band.
lengths: list or str
The corresponding length for each frequency band. This sets the
resolution of the subband and should be chosen carefully so that it is
sufficiently long to include all of the bands frequency content.
overlap: float
The frequency width to apply tapering between bands.
params: dict
The remaining keyworkd arguments passed to the base approximant
waveform generation.
Returns
-------
hp: pycbc.types.FrequencySeries
Plus polarization
hc: pycbc.type.FrequencySeries
Cross polarization
"""
from pycbc.waveform import get_fd_waveform
if isinstance(bands, str):
bands = [float(s) for s in bands.split(' ')]
if isinstance(lengths, str):
lengths = [float(s) for s in lengths.split(' ')]
p['approximant'] = p['base_approximant']
df = p['delta_f']
fmax = p['f_final']
flow = p['f_lower']
bands = [flow] + bands + [fmax]
dfs = [df] + [1.0 / l for l in lengths]
dt = 1.0 / (2.0 * fmax)
tlen = int(1.0 / dt / df)
flen = tlen / 2 + 1
wf_plus = TimeSeries(zeros(tlen, dtype=numpy.float32),
copy=False, delta_t=dt, epoch=-1.0/df)
wf_cross = TimeSeries(zeros(tlen, dtype=numpy.float32),
copy=False, delta_t=dt, epoch=-1.0/df)
# Iterate over the sub-bands
for i in range(len(lengths)+1):
taper_start = taper_end = False
if i != 0:
taper_start = True
if i != len(lengths):
taper_end = True
# Generate waveform for sub-band of full waveform
start = bands[i]
stop = bands[i+1]
p2 = p.copy()
p2['delta_f'] = dfs[i]
p2['f_lower'] = start
p2['f_final'] = stop
if taper_start:
p2['f_lower'] -= overlap / 2.0
if taper_end:
p2['f_final'] += overlap / 2.0
tlen = int(1.0 / dt / dfs[i])
flen = tlen / 2 + 1
hp, hc = get_fd_waveform(**p2)
# apply window function to smooth over transition regions
kmin = int(p2['f_lower'] / dfs[i])
kmax = int(p2['f_final'] / dfs[i])
taper = numpy.hanning(int(overlap * 2 / dfs[i]))
for wf, h in zip([wf_plus, wf_cross], [hp, hc]):
h = h.astype(numpy.complex64)
if taper_start:
h[kmin:kmin + len(taper) // 2] *= taper[:len(taper)//2]
if taper_end:
l, r = kmax - (len(taper) - len(taper) // 2), kmax
h[l:r] *= taper[len(taper)//2:]
# add frequency band to total and use fft to interpolate
h.resize(flen)
h = h.to_timeseries()
wf[len(wf)-len(h):] += h
return (wf_plus.to_frequencyseries().astype(hp.dtype),
wf_cross.to_frequencyseries().astype(hp.dtype))
| 3,724
| 31.964602
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/sinegauss.py
|
""" Generation of sine-Gaussian bursty type things
"""
import pycbc.types
import numpy
def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f):
""" Generate a Fourier domain sine-Gaussian
Parameters
----------
amp: float
Amplitude of the sine-Gaussian
quality: float
The quality factor
central_frequency: float
The central frequency of the sine-Gaussian
fmin: float
The minimum frequency to generate the sine-Gaussian. This determines
the length of the output vector.
fmax: float
The maximum frequency to generate the sine-Gaussian
delta_f: float
The size of the frequency step
Returns
-------
sg: pycbc.types.Frequencyseries
A Fourier domain sine-Gaussian
"""
kmin = int(round(fmin / delta_f))
kmax = int(round(fmax / delta_f))
f = numpy.arange(kmin, kmax) * delta_f
tau = quality / 2 / numpy.pi / central_frequency
A = amp * numpy.pi ** 0.5 / 2 * tau
d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0)
d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency))
v = numpy.zeros(kmax, dtype=numpy.complex128)
v[kmin:kmax] = d[:]
return pycbc.types.FrequencySeries(v, delta_f=delta_f)
| 1,285
| 30.365854
| 76
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/spa_tmplt.py
|
# Adapted from code in LALSimInspiralTaylorF2.c
#
# Copyright (C) 2007 Jolien Creighton, B.S. Sathyaprakash, Thomas Cokelaer
# Copyright (C) 2012 Leo Singer, Alex Nitz
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
"""This module contains functions for generating common SPA template precalculated
vectors.
"""
from math import sqrt, log
import warnings
import numpy, lal, pycbc.pnutils
from pycbc.scheme import schemed
from pycbc.types import FrequencySeries, Array, complex64, float32, zeros
from pycbc.waveform.utils import ceilpow2
lalsimulation = pycbc.libutils.import_optional('lalsimulation')
def findchirp_chirptime(m1, m2, fLower, porder):
# variables used to compute chirp time
m1 = float(m1)
m2 = float(m2)
m = m1 + m2
eta = m1 * m2 / m / m
c0T = c2T = c3T = c4T = c5T = c6T = c6LogT = c7T = 0.
# All implemented option
if porder == -1:
porder = 7
if porder >= 7:
c7T = lal.PI * (14809.0 * eta * eta / 378.0 - 75703.0 * eta / 756.0 - 15419335.0 / 127008.0)
if porder >= 6:
c6T = lal.GAMMA * 6848.0 / 105.0 - 10052469856691.0 / 23471078400.0 +\
lal.PI * lal.PI * 128.0 / 3.0 + \
eta * (3147553127.0 / 3048192.0 - lal.PI * lal.PI * 451.0 / 12.0) -\
eta * eta * 15211.0 / 1728.0 + eta * eta * eta * 25565.0 / 1296.0 +\
eta * eta * eta * 25565.0 / 1296.0 + numpy.log(4.0) * 6848.0 / 105.0
c6LogT = 6848.0 / 105.0
if porder >= 5:
c5T = 13.0 * lal.PI * eta / 3.0 - 7729.0 * lal.PI / 252.0
if porder >= 4:
c4T = 3058673.0 / 508032.0 + eta * (5429.0 / 504.0 + eta * 617.0 / 72.0)
c3T = -32.0 * lal.PI / 5.0
c2T = 743.0 / 252.0 + eta * 11.0 / 3.0
c0T = 5.0 * m * lal.MTSUN_SI / (256.0 * eta)
# This is the PN parameter v evaluated at the lower freq. cutoff
xT = pow (lal.PI * m * lal.MTSUN_SI * fLower, 1.0 / 3.0)
x2T = xT * xT
x3T = xT * x2T
x4T = x2T * x2T
x5T = x2T * x3T
x6T = x3T * x3T
x7T = x3T * x4T
x8T = x4T * x4T
# Computes the chirp time as tC = t(v_low);
# tC = t(v_low) - t(v_upper) would be more
# correct, but the difference is negligible.
# This formula works for any PN order, because
# higher order coeffs will be set to zero.
return c0T * (1 + c2T * x2T + c3T * x3T + c4T * x4T + c5T * x5T +
(c6T + c6LogT * numpy.log(xT)) * x6T + c7T * x7T) / x8T
def spa_length_in_time(**kwds):
"""
Returns the length in time of the template,
based on the masses, PN order, and low-frequency
cut-off.
"""
m1 = kwds['mass1']
m2 = kwds['mass2']
flow = kwds['f_lower']
porder = int(kwds['phase_order'])
# For now, we call the swig-wrapped function below in
# lalinspiral. Eventually would be nice to replace this
# with a function using PN coeffs from lalsimulation.
return findchirp_chirptime(m1, m2, flow, porder)
def spa_amplitude_factor(**kwds):
m1 = kwds['mass1']
m2 = kwds['mass2']
_, eta = pycbc.pnutils.mass1_mass2_to_mchirp_eta(m1, m2)
FTaN = 32. * eta * eta / 5.
dETaN = 2. * -eta / 2.
M = m1 + m2
m_sec = M * lal.MTSUN_SI
piM = lal.PI * m_sec
amp0 = 4. * m1 * m2 / (1e6 * lal.PC_SI) * lal.MRSUN_SI * lal.MTSUN_SI * sqrt(lal.PI / 12.)
fac = numpy.sqrt(-dETaN / FTaN) * amp0 * (piM ** (-7./6.))
return -fac
_prec = None
def spa_tmplt_precondition(length, delta_f, kmin=0):
"""Return the amplitude portion of the TaylorF2 approximant, used to precondition
the strain data. The result is cached, and so should not be modified, only read.
"""
global _prec
if _prec is None or _prec.delta_f != delta_f or len(_prec) < length:
v = numpy.arange(0, (kmin + length*2), 1.) * delta_f
v = numpy.power(v[1:len(v)], -7./6.)
_prec = FrequencySeries(v, delta_f=delta_f, dtype=float32)
return _prec[kmin:kmin + length]
def spa_tmplt_norm(psd, length, delta_f, f_lower):
amp = spa_tmplt_precondition(length, delta_f)
k_min = int(f_lower / delta_f)
sigma = (amp[k_min:length].numpy() ** 2. / psd[k_min:length].numpy())
norm_vec = numpy.zeros(length)
norm_vec[k_min:length] = sigma.cumsum() * 4. * delta_f
return norm_vec
def spa_tmplt_end(**kwds):
return pycbc.pnutils.f_SchwarzISCO(kwds['mass1'] + kwds['mass2'])
def spa_distance(psd, mass1, mass2, lower_frequency_cutoff, snr=8):
""" Return the distance at a given snr (default=8) of the SPA TaylorF2
template.
"""
kend = int(spa_tmplt_end(mass1=mass1, mass2=mass2) / psd.delta_f)
norm1 = spa_tmplt_norm(psd, len(psd), psd.delta_f, lower_frequency_cutoff)
norm2 = spa_amplitude_factor(mass1=mass1, mass2=mass2) ** 2.0
if kend >= len(psd):
kend = len(psd) - 2
return sqrt(norm1[kend] * norm2) / snr
@schemed("pycbc.waveform.spa_tmplt_")
def spa_tmplt_engine(htilde, kmin, phase_order, delta_f, piM, pfaN,
pfa2, pfa3, pfa4, pfa5, pfl5,
pfa6, pfl6, pfa7, amp_factor):
""" Calculate the spa tmplt phase
"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
def spa_tmplt(**kwds):
""" Generate a minimal TaylorF2 approximant with optimizations for the sin/cos
"""
distance = kwds['distance']
mass1 = kwds['mass1']
mass2 = kwds['mass2']
s1z = kwds['spin1z']
s2z = kwds['spin2z']
phase_order = int(kwds['phase_order'])
#amplitude_order = int(kwds['amplitude_order'])
spin_order = int(kwds['spin_order'])
if 'out' in kwds:
out = kwds['out']
else:
out = None
amp_factor = spa_amplitude_factor(mass1=mass1, mass2=mass2) / distance
lal_pars = lal.CreateDict()
if phase_order != -1:
lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(
lal_pars, phase_order)
if spin_order != -1:
lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(
lal_pars, spin_order)
# Calculate the PN terms
phasing = lalsimulation.SimInspiralTaylorF2AlignedPhasing(
float(mass1), float(mass2),
float(s1z), float(s2z),
lal_pars)
pfaN = phasing.v[0]
pfa2 = phasing.v[2] / pfaN
pfa3 = phasing.v[3] / pfaN
pfa4 = phasing.v[4] / pfaN
pfa5 = phasing.v[5] / pfaN
pfa6 = (phasing.v[6] - phasing.vlogv[6] * log(4)) / pfaN
pfa7 = phasing.v[7] / pfaN
pfl5 = phasing.vlogv[5] / pfaN
pfl6 = phasing.vlogv[6] / pfaN
piM = lal.PI * (mass1 + mass2) * lal.MTSUN_SI
if 'sample_points' not in kwds:
f_lower = kwds['f_lower']
delta_f = kwds['delta_f']
kmin = int(f_lower / float(delta_f))
# Get max frequency one way or another
# f_final is assigned default value 0 in parameters.py
if 'f_final' in kwds and kwds['f_final'] > 0.:
fstop = kwds['f_final']
elif 'f_upper' in kwds:
fstop = kwds['f_upper']
warnings.warn('f_upper is deprecated in favour of f_final!',
DeprecationWarning)
else:
# Schwarzschild ISCO frequency
vISCO = 1. / sqrt(6.)
fstop = vISCO * vISCO * vISCO / piM
if fstop <= f_lower:
raise ValueError("cannot generate waveform! f_lower >= f_final"
f" ({f_lower}, {fstop})")
kmax = int(fstop / delta_f)
f_max = ceilpow2(fstop)
n = int(f_max / delta_f) + 1
if not out:
htilde = FrequencySeries(zeros(n, dtype=numpy.complex64), delta_f=delta_f, copy=False)
else:
if type(out) is not Array:
raise TypeError("Output must be an instance of Array")
if len(out) < kmax:
kmax = len(out)
if out.dtype != complex64:
raise TypeError("Output array is the wrong dtype")
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
spa_tmplt_engine(htilde[kmin:kmax], kmin, phase_order,
delta_f, piM, pfaN,
pfa2, pfa3, pfa4, pfa5, pfl5,
pfa6, pfl6, pfa7, amp_factor)
else:
from .spa_tmplt_cpu import spa_tmplt_inline_sequence
htilde = numpy.empty(len(kwds['sample_points']), dtype=numpy.complex64)
spa_tmplt_inline_sequence(
piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7,
amp_factor, kwds['sample_points'], htilde)
return htilde
| 9,385
| 34.153558
| 100
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/premerger.py
|
""" Waveform approximants for the pre-merger detection of gravitational waves
"""
import logging
def premerger_taylorf2(**p):
""" Generate time-shifted TaylorF2"""
from pycbc.waveform import get_fd_waveform
from pycbc.waveform.spa_tmplt import spa_length_in_time
from pycbc.waveform.utils import fd_taper
p.pop('approximant')
hp, hc = get_fd_waveform(approximant="TaylorF2", **p)
removed = spa_length_in_time(mass1=p['mass1'],
mass2=p['mass2'],
f_lower=p['f_final'],
phase_order=-1)
hp = hp.cyclic_time_shift(removed)
hp.start_time += removed
hc = hc.cyclic_time_shift(removed)
hc.start_time += removed
logging.info("PreTaylorF2, m1=%.1f, m2=%.1f, fmax=%.1f, timeshift=%.1f",
p['mass1'], p['mass2'], p['f_final'], removed)
kmin = int(p['f_lower'] / p['delta_f'])
hp[0:kmin] = 0
hc[0:kmin] = 0
if 'final_taper' in p:
taper_size = p['final_taper']
hp = fd_taper(hp, p['f_final'] - taper_size, p['f_final'], side='right')
hc = fd_taper(hc, p['f_final'] - taper_size, p['f_final'], side='right')
hp.time_offset = removed
hc.time_offset = removed
return hp, hc
| 1,282
| 30.292683
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/parameters.py
|
# Copyright (C) 2016 Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Classes to define common parameters used for waveform generation.
"""
from collections import OrderedDict
try:
from collections import UserList
except ImportError:
from UserList import UserList
#
# =============================================================================
#
# Base definitions
#
# =============================================================================
#
class Parameter(str):
"""A class that stores information about a parameter. This is done by
sub-classing string, adding additional attributes.
"""
def __new__(cls, name, dtype=None, default=None, label=None,
description="No description."):
obj = str.__new__(cls, name)
obj.name = name
obj.dtype = dtype
obj.default = default
obj.label = label
obj.description = description
return obj
def docstr(self, prefix='', include_label=True):
"""Returns a string summarizing the parameter. Format is:
<prefix>``name`` : {``default``, ``dtype``}
<prefix> ``description`` Label: ``label``.
"""
dtype_str = str(self.dtype).replace("<type '", '').replace("'>", '')
dtype_str = dtype_str.replace("<class '", '')
outstr = "%s%s : {%s, %s}\n%s %s" % (
prefix, self.name, str(self.default), dtype_str, prefix,
self.description)
if include_label:
outstr += " Label: %s" % (self.label)
return outstr
class ParameterList(UserList):
"""A list of parameters. Each element in the list is expected to be a
Parameter instance.
"""
@property
def names(self):
"""Returns a list of the names of each parameter."""
return [x.name for x in self]
@property
def aslist(self):
"""Cast to basic list."""
return list(self)
@property
def asdict(self):
"""Returns a dictionary of the parameters keyed by the parameters."""
return dict([[x, x] for x in self])
def defaults(self):
"""Returns a list of the name and default value of each parameter,
as tuples.
"""
return [(x, x.default) for x in self]
def default_dict(self):
"""Returns a dictionary of the name and default value of each
parameter.
"""
return OrderedDict(self.defaults())
@property
def nodefaults(self):
"""Returns a ParameterList of the parameters that have None for
defaults.
"""
return ParameterList([x for x in self if x.default is None])
@property
def dtypes(self):
"""Returns a list of the name and dtype of each parameter,
as tuples.
"""
return [(x, x.dtype) for x in self]
@property
def dtype_dict(self):
"""Returns a dictionary of the name and dtype of each parameter."""
return OrderedDict(self.dtypes)
@property
def descriptions(self):
"""Returns a list of the name and description of each parameter,
as tuples.
"""
return [(x, x.description) for x in self]
@property
def description_dict(self):
"""Return a dictionary of the name and description of each parameter.
"""
return OrderedDict(self.descriptions)
@property
def labels(self):
"""Returns a list of each parameter and its label, as tuples."""
return [(x, x.label) for x in self]
@property
def label_dict(self):
"""Return a dictionary of the name and label of each parameter.
"""
return OrderedDict(self.labels)
def docstr(self, prefix='', include_label=True):
"""Returns the ``docstr`` of each parameter joined together."""
return '\n'.join([x.docstr(prefix, include_label) for x in self])
#
# =============================================================================
#
# Parameter definitions
#
# =============================================================================
#
#
# CBC intrinsic parameters
#
mass1 = Parameter("mass1",
dtype=float, default=None, label=r"$m_1~(\mathrm{M}_\odot)$",
description="The mass of the first component object in the "
"binary (in solar masses).")
mass2 = Parameter("mass2",
dtype=float, default=None, label=r"$m_2~(\mathrm{M}_\odot)$",
description="The mass of the second component object in the "
"binary (in solar masses).")
spin1x = Parameter("spin1x",
dtype=float, default=0., label=r"$\chi_{1x}$",
description="The x component of the first binary component's "
"dimensionless spin.")
spin1y = Parameter("spin1y",
dtype=float, default=0., label=r"$\chi_{1y}$",
description="The y component of the first binary component's "
"dimensionless spin.")
spin1z = Parameter("spin1z",
dtype=float, default=0., label=r"$\chi_{1z}$",
description="The z component of the first binary component's "
"dimensionless spin.")
spin2x = Parameter("spin2x",
dtype=float, default=0., label=r"$\chi_{2x}$",
description="The x component of the second binary component's "
"dimensionless spin.")
spin2y = Parameter("spin2y",
dtype=float, default=0., label=r"$\chi_{2y}$",
description="The y component of the second binary component's "
"dimensionless spin.")
spin2z = Parameter("spin2z",
dtype=float, default=0., label=r"$\chi_{2z}$",
description="The z component of the second binary component's "
"dimensionless spin.")
eccentricity = Parameter("eccentricity",
dtype=float, default=0., label=r"$e$",
description="Eccentricity.")
# derived parameters (these are not used for waveform generation)
mchirp = Parameter("mchirp",
dtype=float, label=r"$\mathcal{M}~(\mathrm{M}_\odot)$",
description="The chirp mass of the binary (in solar masses).")
eta = Parameter("eta",
dtype=float, label=r"$\eta$",
description="The symmetric mass ratio of the binary.")
mtotal = Parameter("mtotal",
dtype=float, label=r"$M~(\mathrm{M}_\odot)$",
description="The total mass of the binary (in solar masses).")
q = Parameter("q",
dtype=float, label=r"$q$",
description="The mass ratio, m1/m2, where m1 >= m2.")
srcmass1 = Parameter("srcmass1", dtype=float,
label=r"$m_1^{\rm{src}}~(\mathrm{M}_\odot)$",
description="The mass of the first component object in "
"the source frame (in solar masses).")
srcmass2 = Parameter("srcmass1", dtype=float,
label=r"$m_2^{\rm{src}}~(\mathrm{M}_\odot)$",
description="The mass of the second component object in "
"the source frame (in solar masses).")
srcmchirp = Parameter("srcmchirp", dtype=float,
label=r"$\mathcal{M}^{\rm{src}}~(\mathrm{M}_\odot)$",
description="The chirp mass of the binary in the "
"source frame (in solar masses).")
srcmtotal = Parameter("mtotal", dtype=float,
label=r"$M^{\rm{src}}~(\mathrm{M}_\odot)$",
description="The total mass of the binary in the "
"source frame (in solar masses).")
primary_mass = Parameter("primary_mass",
dtype=float, label=r"$m_{1}$",
description="Mass of the primary object (in solar masses).")
secondary_mass = Parameter("secondary_mass",
dtype=float, label=r"$m_{2}$",
description="Mass of the secondary object (in solar masses).")
# derived parameters for component spins
chi_eff = Parameter("chi_eff",
dtype=float, label=r"$\chi_\mathrm{eff}$",
description="Effective spin of the binary.")
chi_p = Parameter("chi_p",
dtype=float, label=r"$\chi_p$",
description="Effective precessing spin of the binary.")
spin_px = Parameter("spin_px",
dtype=float, label=r"$\chi_{1x}$",
description="The x component of the dimensionless spin of the "
"primary object.")
spin_py = Parameter("spin_py",
dtype=float, label=r"$\chi_{1y}$",
description="The y component of the dimensionless spin of the "
"primary object.")
spin_pz = Parameter("spin_pz",
dtype=float, label=r"$\chi_{1z}$",
description="The z component of the dimensionless spin of the "
"primary object.")
spin_sx = Parameter("spin_sx",
dtype=float, label=r"$\chi_{2x}$",
description="The x component of the dimensionless spin of the "
"secondary object.")
spin_sy = Parameter("spin_sy",
dtype=float, label=r"$\chi_{2y}$",
description="The y component of the dimensionless spin of the "
"secondary object.")
spin_sz = Parameter("spin_sz",
dtype=float, label=r"$\chi_{2z}$",
description="The z component of the dimensionless spin of the "
"secondary object.")
lambda1 = Parameter("lambda1",
dtype=float, default=None, label=r"$\Lambda_1$",
description="The dimensionless tidal deformability parameter of object 1.")
lambda2 = Parameter("lambda2",
dtype=float, default=None, label=r"$\Lambda_2$",
description="The dimensionless tidal deformability parameter of object 2.")
dquad_mon1 = Parameter("dquad_mon1",
dtype=float, default=None, label=r"$qm_1$",
description="Quadrupole-monopole parameter / m_1^5 -1.")
dquad_mon2 = Parameter("dquad_mon2",
dtype=float, default=None, label=r"$qm_2$",
description="Quadrupole-monopole parameter / m_2^5 -1.")
lambda_octu1 = Parameter("lambda_octu1",
dtype=float, default=None, label=r"$\Lambda_3^{(1)}$",
description="The octupolar tidal deformability parameter of "
"object 1.")
lambda_octu2 = Parameter("lambda_octu2",
dtype=float, default=None, label=r"$\Lambda_3^{(2)}$",
description="The octupolar tidal deformability parameter of "
"object 2.")
quadfmode1 = Parameter("quadfmode1",
dtype=float, default=None, label=r"$m_1 \omega_{02}^{(1)}$",
description="The quadrupolar f-mode angular frequency of "
"object 1.")
quadfmode2 = Parameter("quadfmode2",
dtype=float, default=None, label=r"$m_ \omega_{02}^{(2)}$",
description="The quadrupolar f-mode angular frequency of "
"object 2.")
octufmode1 = Parameter("octufmode1",
dtype=float, default=None, label=r"$m_1 \omega_{03}^{(1)}$",
description="The octupolar f-mode angular frequency of "
"object 1.")
octufmode2 = Parameter("octufmode2",
dtype=float, default=None, label=r"$m_ \omega_{03}^{(2)}$",
description="The octupolar f-mode angular frequency of "
"object 2.")
# derived parameters for component spin magnitude and angles
spin1_a = Parameter("spin1_a",
dtype=float, label=r"$a_{1}$",
description="The dimensionless spin magnitude "
r"$|\vec{s}/m_{1}^2|$.")
spin2_a = Parameter("spin2_a",
dtype=float, label=r"$a_{2}$",
description="The dimensionless spin magnitude "
r"$|\vec{s}/m_{2}^2|$.")
spin1_azimuthal = Parameter(
"spin1_azimuthal",
dtype=float, label=r"$\theta_1^\mathrm{azimuthal}$",
description="The azimuthal spin angle for mass 1.")
spin2_azimuthal = Parameter(
"spin2_azimuthal",
dtype=float, label=r"$\theta_2^\mathrm{azimuthal}$",
description="The azimuthal spin angle for mass 2.")
spin1_polar = Parameter("spin1_polar",
dtype=float, label=r"$\theta_1^\mathrm{polar}$",
description="The polar spin angle for mass 1.")
spin2_polar = Parameter("spin2_polar",
dtype=float, label=r"$\theta_2^\mathrm{polar}$",
description="The polar spin angle for mass 2.")
#
# Parameters needed for CBC waveform generation
#
f_lower = Parameter("f_lower",
dtype=float, default=None, label=r"$f_0$ (Hz)",
description="The starting frequency of the waveform (in Hz).")
f_final = Parameter("f_final",
dtype=float, default=0, label=r"$f_{\mathrm{final}}$ (Hz)",
description="The ending frequency of the waveform. The "
"default (0) indicates that the choice is made by "
"the respective approximant.")
f_final_func = Parameter("f_final_func",
dtype=str, default="", label=None,
description="Use the given frequency function to compute f_final "
"based on the parameters of the waveform.")
f_ref = Parameter("f_ref",
dtype=float, default=0, label=r"$f_{\mathrm{ref}}$ (Hz)",
description="The reference frequency.")
delta_f = Parameter("delta_f",
dtype=float, default=None, label=r"$\Delta f$ (Hz)",
description="The frequency step used to generate the waveform "
"(in Hz).")
delta_t = Parameter("delta_t",
dtype=float, default=None, label=r"$\Delta t$ (s)",
description="The time step used to generate the waveform "
"(in s).")
sample_points = Parameter("sample_points",
dtype="Array", default=None, label=None,
description="An array of the frequencies (in Hz) at which to "
"generate the waveform.")
approximant = Parameter("approximant",
dtype=str, default=None, label=None,
description="A string that indicates the chosen approximant.")
phase_order = Parameter("phase_order",
dtype=int, default=-1, label=None,
description="The pN order of the orbital phase. The default "
"of -1 indicates that all implemented orders are "
"used.")
spin_order = Parameter("spin_order",
dtype=int, default=-1, label=None,
description="The pN order of the spin corrections. The "
"default of -1 indicates that all implemented "
"orders are used.")
tidal_order = Parameter("tidal_order",
dtype=int, default=-1, label=None,
description="The pN order of the tidal corrections. The "
"default of -1 indicates that all implemented "
"orders are used.")
amplitude_order = Parameter("amplitude_order",
dtype=int, default=-1, label=None,
description="The pN order of the amplitude. The default of -1 "
"indicates that all implemented orders are used.")
eccentricity_order = Parameter("eccentricity_order",
dtype=int, default=-1, label=None,
description="The pN order of the eccentricity corrections."
"The default of -1 indicates that all implemented orders are used.")
numrel_data = Parameter("numrel_data",
dtype=str, default="", label=None,
description="Sets the NR flags; only needed for NR waveforms.")
remnant_mass = Parameter("remnant_mass",
dtype=float, label=r"$m_{\mathrm{rem}}$",
description="Remnant mass of NS-BH merger. See "
"conversions.remnant_mass_"
"from_mass1_mass2_spin1x_spin1y_spin1z_eos")
#
# General location parameters
#
distance = Parameter("distance",
dtype=float, default=1., label=r"$d_L$ (Mpc)",
description="Luminosity distance to the binary (in Mpc).")
chirp_distance = Parameter("chirp_distance",
dtype=float, default=1., label=r"$d_c$ (Mpc)",
description="Chirp distance to the binary (in Mpc).")
coa_phase = Parameter("coa_phase",
dtype=float, default=0., label=r"$\phi_c$",
description="Coalesence phase of the binary (in rad).")
inclination = Parameter("inclination",
dtype=float, default=0., label=r"$\iota$",
description="Inclination (rad), defined as the angle between "
"the orbital angular momentum L and the "
"line-of-sight at the reference frequency.")
thetajn = Parameter("thetajn",
dtype=float, default=0., label=r"$\theta_{JN}$",
description="The angle between the total angular momentum "
"J and the line-of-sight.")
long_asc_nodes = Parameter("long_asc_nodes",
dtype=float, default=0., label=r"$\Omega$",
description="Longitude of ascending nodes axis (rad).")
mean_per_ano = Parameter("mean_per_ano",
dtype=float, default=0., label=r"$\delta$",
description="Mean anomaly of the periastron (rad).")
tc = Parameter("tc",
dtype=float, default=None, label=r"$t_c$ (s)",
description="Coalescence time (s).")
delta_tc = Parameter("delta_tc", dtype=float,
label=r"$\Delta t_c~(\rm{s})$",
description="Coalesence time offset.")
ra = Parameter("ra",
dtype=float, default=0., label=r"$\alpha$",
description="Right ascension (rad).")
dec = Parameter("dec",
dtype=float, default=0., label=r"$\delta$",
description="Declination (rad).")
polarization = Parameter("polarization",
dtype=float, default=0., label=r"$\psi$",
description="Polarization (rad).")
redshift = Parameter("redshift",
dtype=float, default=None, label=r"$z$",
description="Redshift.")
comoving_volume = Parameter("comoving_volume", dtype=float,
label=r"$V_C~(\rm{Mpc}^3)$",
description="Comoving volume (in cubic Mpc).")
eclipticlatitude = Parameter("eclipticlatitude",
dtype=float, default=0., label=r"$\beta$",
description="eclipticlatitude wrt SSB coords.")
eclipticlongitude = Parameter("eclipticlongitude",
dtype=float, default=0., label=r"$\lambda$",
description="eclipticlongitude wrt SSB coords.")
#
# Calibration parameters
#
delta_fs = Parameter("calib_delta_fs",
dtype=float,
description="Change in optical spring freq (Hz).")
delta_fc = Parameter("calib_delta_fc",
dtype=float,
description="Change in cavity pole freq (Hz).")
delta_qinv = Parameter("calib_delta_qinv",
dtype=float,
description="Change in inverse quality factor.")
kappa_c = Parameter("calib_kappa_c",
dtype=float)
kappa_tst_re = Parameter("calib_kappa_tst_re",
dtype=float)
kappa_tst_im = Parameter("calib_kappa_tst_im",
dtype=float)
kappa_pu_re = Parameter("calib_kappa_pu_re",
dtype=float)
kappa_pu_im = Parameter("calib_kappa_pu_im",
dtype=float)
#
# Non mandatory flags with default values
#
frame_axis = Parameter("frame_axis",
dtype=int, default=0,
description="Allow to choose among orbital_l, view and total_j")
modes_choice = Parameter("modes_choice",
dtype=int, default=0,
description="Allow to turn on among orbital_l, view and total_j")
side_bands = Parameter("side_bands",
dtype=int, default=0,
description="Flag for generating sidebands")
mode_array = Parameter("mode_array",
dtype=list, default=None,
description="Choose which (l,m) modes to include when "
"generating a waveform. "
"Only if approximant supports this feature."
"By default pass None and let lalsimulation "
"use it's default behaviour."
"Example: mode_array = [ [2,2], [2,-2] ]")
#
# Parametrized testing general relativity parameters
#
dchi0 = Parameter("dchi0",
dtype=float, default=0., label=r"$d\chi_0$",
description="0PN testingGR parameter.")
dchi1 = Parameter("dchi1",
dtype=float, default=0., label=r"$d\chi_1$",
description="0.5PN testingGR parameter.")
dchi2 = Parameter("dchi2",
dtype=float, default=0., label=r"$d\chi_2$",
description="1PN testingGR parameter.")
dchi3 = Parameter("dchi3",
dtype=float, default=0., label=r"$d\chi_3$",
description="1.5PN testingGR parameter.")
dchi4 = Parameter("dchi4",
dtype=float, default=0., label=r"$d\chi_4$",
description="2PN testingGR parameter.")
dchi5 = Parameter("dchi5",
dtype=float, default=0., label=r"$d\chi_5$",
description="2.5PN testingGR parameter.")
dchi5l = Parameter("dchi5l",
dtype=float, default=0., label=r"$d\chi_5{l}$",
description="2.5PN logrithm testingGR parameter.")
dchi6 = Parameter("dchi6",
dtype=float, default=0., label=r"$d\chi_6$",
description="3PN testingGR parameter.")
dchi6l = Parameter("dchi6l",
dtype=float, default=0., label=r"$d\chi_{6l}$",
description="3PN logrithm testingGR parameter.")
dchi7 = Parameter("dchi7",
dtype=float, default=0., label=r"$d\chi_7$",
description="3.5PN testingGR parameter.")
dalpha1 = Parameter("dalpha1",
dtype=float, default=0., label=r"$d\alpha_1$",
description="Merger-ringdown testingGR parameter.")
dalpha2 = Parameter("dalpha2",
dtype=float, default=0., label=r"$d\alpha_2$",
description="Merger-ringdown testingGR parameter.")
dalpha3 = Parameter("dalpha3",
dtype=float, default=0., label=r"$d\alpha_3$",
description="Merger-ringdown testingGR parameter.")
dalpha4 = Parameter("dalpha4",
dtype=float, default=0., label=r"$d\alpha_4$",
description="Merger-ringdown testingGR parameter.")
dalpha5 = Parameter("dalpha5",
dtype=float, default=0., label=r"$d\alpha_5$",
description="Merger-ringdown testingGR parameter.")
dbeta1 = Parameter("dbeta1",
dtype=float, default=0., label=r"$d\beta_1$",
description="Intermediate testingGR parameter.")
dbeta2 = Parameter("dbeta2",
dtype=float, default=0., label=r"$d\beta_2$",
description="Intermediate testingGR parameter.")
dbeta3 = Parameter("dbeta3",
dtype=float, default=0., label=r"$d\beta_3$",
description="Intermediate testingGR parameter.")
#
# =============================================================================
#
# Parameter list definitions
#
# =============================================================================
#
# parameters describing the location of a binary w.r.t. the Earth. Note: we
# do not include distance here. This is because these parameters are not
# passed to the waveform generators in lalsimulation, but are instead applied
# after a waveform is generated. Distance, however, is a parameter used by
# the waveform generators.
location_params = ParameterList([tc, ra, dec, polarization,
eclipticlatitude, eclipticlongitude])
# parameters describing the orientation of a binary w.r.t. the radiation
# frame. Note: we include distance here, as it is typically used for generating
# waveforms.
orientation_params = ParameterList\
([distance, coa_phase, inclination, long_asc_nodes, mean_per_ano])
# the extrinsic parameters of a waveform
extrinsic_params = orientation_params + location_params
# testing GR parameters
testingGR_params = ParameterList\
([dchi0, dchi1, dchi2, dchi3, dchi4, dchi5, dchi5l, dchi6, dchi6l,
dchi7, dalpha1, dalpha2, dalpha3, dalpha4, dalpha5,
dbeta1, dbeta2, dbeta3])
# intrinsic parameters of a CBC waveform. Some of these are not recognized
# by every waveform model
cbc_intrinsic_params = ParameterList\
([mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y, spin2z,
eccentricity, lambda1, lambda2, dquad_mon1, dquad_mon2, lambda_octu1,
lambda_octu2, quadfmode1, quadfmode2, octufmode1, octufmode2]) + \
testingGR_params
# the parameters of a cbc in the radiation frame
cbc_rframe_params = cbc_intrinsic_params + orientation_params
# calibration parameters
calibration_params = ParameterList([
delta_fc, delta_fs, delta_qinv, kappa_c, kappa_tst_re, kappa_tst_im,
kappa_pu_re, kappa_pu_im])
# common generation parameters are parameters needed to generate either
# a TD, FD, or frequency sequence waveform
common_generation_params = ParameterList([
approximant, f_ref, phase_order, spin_order, tidal_order, amplitude_order, eccentricity_order])
# Flags having discrete values, optional to generate either
# a TD, FD, or frequency sequence waveform
flags_generation_params = ParameterList([frame_axis, modes_choice, side_bands, mode_array])
# the following are parameters needed to generate an FD or TD waveform that
# is equally sampled
common_gen_equal_sampled_params = ParameterList([f_lower]) + \
common_generation_params + flags_generation_params
# the following are parameters that can be used to generate an FD waveform
fd_waveform_params = cbc_rframe_params + ParameterList([delta_f]) + \
common_gen_equal_sampled_params + ParameterList([f_final, f_final_func])
# the following are parameters that can be used to generate a TD waveform
td_waveform_params = cbc_rframe_params + ParameterList([delta_t]) + \
common_gen_equal_sampled_params + ParameterList([numrel_data]) + \
flags_generation_params
# The following are the minimum set of parameters that are required to
# generate a FD or TD waveform. All other parameters have some default value as
# defined above. Defaults of None simply mean that the value is not passed into
# the lal_dict structure and the waveform generator will take whatever default
# behaviour
td_required = ParameterList([f_lower, delta_t, approximant])
fd_required = ParameterList([f_lower, delta_f, approximant])
####
cbc_td_required = ParameterList([mass1, mass2, f_lower, delta_t, approximant])
cbc_fd_required = ParameterList([mass1, mass2, f_lower, delta_f, approximant])
# the following are parameters that can be used to generate a
# frequency series waveform
fd_waveform_sequence_params = cbc_rframe_params + \
ParameterList([sample_points]) + common_generation_params + \
flags_generation_params
| 29,135
| 44.811321
| 99
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/utils_cuda.py
|
# Copyright (C) 2018 Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains the CUDA-specific code for
convenience utilities for manipulating waveforms
"""
from pycbc.types import FrequencySeries
from mako.template import Template
from pycuda.compiler import SourceModule
import numpy
time_shift_kernel = Template("""
__global__ void fseries_ts(float2 *out, float phi,
int kmin, int kmax){
/*
Input parameters:
=================
out: float2 pointer
The input frequency series to shift;
will be shifted in-place
phi: float
Equals -2*pi*delta_f*time_shift
kmin: int
minimum index to examine or write
kmax: int
maximum index to examine or write
*/
float x, y;
int i;
float2 tmp, htmp;
i = ${ntpb}*blockIdx.x + threadIdx.x;
if ((i >= kmin) && (i < kmax)){
htmp = out[i];
__sincosf(phi*i, &y, &x);
tmp.x = x*htmp.x-y*htmp.y;
tmp.y = x*htmp.y+y*htmp.x;
out[i] = tmp;
}
return;
}
""")
# Right now, hardcoding the number of threads per block
nt = 1024
nt_float = numpy.float32(nt)
mod = SourceModule(time_shift_kernel.render(ntpb=nt))
fseries_ts_fn = mod.get_function("fseries_ts")
fseries_ts_fn.prepare("Pfii")
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True):
"""Shifts a frequency domain waveform in time. The waveform is assumed to
be sampled at equal frequency intervals.
"""
if htilde.precision != 'single':
raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision")
if copy:
out = htilde.copy()
else:
out = htilde
kmin = numpy.int32(kmin)
kmax = numpy.int32(len(htilde))
nb = int(numpy.ceil(kmax / nt_float))
if nb > 1024:
raise ValueError("More than 1024 blocks not supported yet")
phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f)
fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax)
if copy:
htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
return htilde
| 3,136
| 29.163462
| 108
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/__init__.py
|
from pycbc.waveform.waveform import *
from pycbc.waveform.utils import *
from pycbc.waveform.bank import *
from pycbc.waveform.ringdown import *
from pycbc.waveform.parameters import *
from pycbc.waveform.waveform_modes import (get_td_waveform_modes,
get_fd_waveform_modes)
from pycbc.waveform.plugin import (retrieve_waveform_plugins,
add_custom_waveform,
add_length_estimator)
retrieve_waveform_plugins()
| 520
| 42.416667
| 65
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/generator.py
|
# Copyright (C) 2016 Collin Capano, Alex Nitz, Christopher Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes for generating waveforms.
"""
import os
import logging
from abc import (ABCMeta, abstractmethod)
from . import waveform
from .waveform import (FailedWaveformError)
from . import ringdown
from . import supernovae
from . import waveform_modes
from pycbc import transforms
from pycbc.types import TimeSeries
from pycbc.waveform import parameters
from pycbc.waveform.utils import apply_fd_time_shift, taper_timeseries, \
ceilpow2
from pycbc.detector import Detector
from pycbc.pool import use_mpi
import lal as _lal
from pycbc import strain
# utility functions/class
failed_counter = 0
class BaseGenerator(object):
"""A wrapper class to call a waveform generator with a set of frozen
parameters and a set of variable parameters. The frozen parameters and
values, along with a list of variable parameter names, are set at
initialization. This way, repeated calls can be made to the underlying
generator by simply passing a list of values for the variable parameters
to this class's generate function.
Parameters
----------
generator : function
The function that is called for waveform generation.
variable_args : {(), list}
A tuple or list of strings giving the names and order of variable
parameters that will be passed to the waveform generator when the
generate function is called.
record_failures : boolean
Store output files containing the parameters of failed waveform
generation. Default is False.
\**frozen_params :
These keyword arguments are the ones that will be frozen in the
waveform generator. For a list of possible parameters, see
pycbc.waveform.cbc_parameters.
Attributes
----------
generator : function
The function that is called for waveform generation.
variable_args : tuple
The list of names of variable arguments.
frozen_params : dict
A dictionary of the frozen keyword arguments that are always passed
to the waveform generator function.
current_params : dict
A dictionary of the frozen keyword arguments and variable arguments
that were last passed to the waveform generator.
"""
def __init__(self, generator, variable_args=(), record_failures=False,
**frozen_params):
self.generator = generator
self.variable_args = tuple(variable_args)
self.frozen_params = frozen_params
# we'll keep a dictionary of the current parameters for fast
# generation
self.current_params = frozen_params.copy()
# keep a list of functions to call before waveform generation
self._pregenerate_functions = []
# If we are under mpi, then failed waveform will be stored by
# mpi rank to avoid file writing conflicts. We'll check for this
# upfront
self.record_failures = (record_failures or
('PYCBC_RECORD_FAILED_WAVEFORMS' in os.environ))
self.mpi_enabled, _, self.mpi_rank = use_mpi()
@property
def static_args(self):
"""Returns a dictionary of the static arguments."""
return self.frozen_params
def generate(self, **kwargs):
"""Generates a waveform from the keyword args. The current params
are updated with the given kwargs, then the generator is called.
"""
self.current_params.update(kwargs)
return self._generate_from_current()
def _add_pregenerate(self, func):
""" Adds a function that will be called by the generator function
before waveform generation.
"""
self._pregenerate_functions.append(func)
def _postgenerate(self, res):
"""Allows the waveform returned by the generator function to be
manipulated before returning.
"""
return res
def _gdecorator(generate_func):
"""A decorator that allows for seemless pre/post manipulation of
the waveform generator function.
"""
def dostuff(self):
for func in self._pregenerate_functions:
self.current_params = func(self.current_params)
res = generate_func(self) # pylint:disable=not-callable
return self._postgenerate(res)
return dostuff
@_gdecorator
def _generate_from_current(self):
"""Generates a waveform from the current parameters.
"""
try:
new_waveform = self.generator(**self.current_params)
return new_waveform
except RuntimeError as e:
if self.record_failures:
import h5py
from pycbc.io.hdf import dump_state
global failed_counter
if self.mpi_enabled:
outname = 'failed/params_%s.hdf' % self.mpi_rank
else:
outname = 'failed/params.hdf'
if not os.path.exists('failed'):
os.makedirs('failed')
with h5py.File(outname) as f:
dump_state(self.current_params, f,
dsetname=str(failed_counter))
failed_counter += 1
# we'll get a RuntimeError if lalsimulation failed to generate
# the waveform for whatever reason
strparams = ' | '.join(['{}: {}'.format(
p, str(val)) for p, val in self.current_params.items()])
raise FailedWaveformError("Failed to generate waveform with "
"parameters:\n{}\nError was: {}"
.format(strparams, e))
class BaseCBCGenerator(BaseGenerator):
"""Adds ability to convert from various derived parameters to parameters
needed by the waveform generators.
"""
possible_args = set(parameters.td_waveform_params +
parameters.fd_waveform_params +
['taper'])
"""set: The set of names of arguments that may be used in the
`variable_args` or `frozen_params`.
"""
def __init__(self, generator, variable_args=(), **frozen_params):
super(BaseCBCGenerator, self).__init__(generator,
variable_args=variable_args, **frozen_params)
# decorate the generator function with a list of functions that convert
# parameters to those used by the waveform generation interface
all_args = set(list(self.frozen_params.keys()) +
list(self.variable_args))
# check that there are no unused (non-calibration) parameters
calib_args = set([a for a in self.variable_args if
a.startswith('calib_')])
all_args = all_args - calib_args
unused_args = all_args - self.possible_args
if len(unused_args):
logging.warning("WARNING: The following parameters are generally "
"not used by CBC waveform generators: %s. If you "
"have provided a transform that converted these "
"into known parameters (e.g., mchirp, q to "
"mass1, mass2) or you are using a custom model "
"that uses these parameters, you can safely "
"ignore this message.", ', '.join(unused_args))
class FDomainCBCGenerator(BaseCBCGenerator):
"""Generates frequency-domain CBC waveforms in the radiation frame.
Uses `waveform.get_fd_waveform` as a generator function to create
frequency- domain CBC waveforms in the radiation frame; i.e., with no
detector response function applied. For more details, see `BaseGenerator`.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import FDomainCBCGenerator
>>> generator = FDomainCBCGenerator(variable_args=['mass1', 'mass2'], delta_f=1./32, f_lower=30., approximant='TaylorF2')
Create a waveform with the variable arguments (in this case, mass1, mass2):
>>> generator.generate(mass1=1.4, mass2=1.4)
(<pycbc.types.frequencyseries.FrequencySeries at 0x1110c1450>,
<pycbc.types.frequencyseries.FrequencySeries at 0x1110c1510>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(FDomainCBCGenerator, self).__init__(waveform.get_fd_waveform,
variable_args=variable_args, **frozen_params)
class FDomainCBCModesGenerator(BaseCBCGenerator):
"""Generates frequency-domain CBC waveform modes.
Uses :py:func:`waveform_modes.get_fd_waveform_modes` as a generator
function to create frequency-domain CBC waveforms mode-by-mode, without
applying spherical harmonics.
For details, on methods and arguments, see :py:class:`BaseGenerator`.
"""
def __init__(self, variable_args=(), **frozen_params):
super(FDomainCBCModesGenerator, self).__init__(
waveform_modes.get_fd_waveform_modes,
variable_args=variable_args, **frozen_params)
class TDomainCBCGenerator(BaseCBCGenerator):
"""Create time domain CBC waveforms in the radiation frame.
Uses waveform.get_td_waveform as a generator function to create time-
domain CBC waveforms in the radiation frame; i.e., with no detector
response function applied. For more details, see `BaseGenerator`.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import TDomainCBCGenerator
>>> generator = TDomainCBCGenerator(variable_args=['mass1', 'mass2'], delta_t=1./4096, f_lower=30., approximant='TaylorT4')
Create a waveform with the variable arguments (in this case, mass1, mass2):
>>> generator.generate(mass1=2., mass2=1.3)
(<pycbc.types.timeseries.TimeSeries at 0x10e546710>,
<pycbc.types.timeseries.TimeSeries at 0x115f37690>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(TDomainCBCGenerator, self).__init__(waveform.get_td_waveform,
variable_args=variable_args, **frozen_params)
def _postgenerate(self, res):
"""Applies a taper if it is in current params.
"""
hp, hc = res
try:
hp = taper_timeseries(hp, tapermethod=self.current_params['taper'])
hc = taper_timeseries(hc, tapermethod=self.current_params['taper'])
except KeyError:
pass
return hp, hc
class TDomainCBCModesGenerator(BaseCBCGenerator):
"""Generates time domain CBC waveform modes.
Uses :py:func:`waveform_modes.get_td_waveform_modes` as a generator
function to create time-domain CBC waveforms mode-by-mode, without applying
spherical harmonics. The ``generate`` function returns a dictionary of
modes -> (real, imag) part of the complex time series.
For details, on methods and arguments, see :py:class:`BaseGenerator`.
"""
def __init__(self, variable_args=(), **frozen_params):
super(TDomainCBCModesGenerator, self).__init__(
waveform_modes.get_td_waveform_modes,
variable_args=variable_args, **frozen_params)
def _postgenerate(self, res):
"""Applies a taper if it is in current params.
"""
if 'taper' in self.current_params:
tapermethod = self.current_params['taper']
for mode in res:
ulm, vlm = res[mode]
ulm = taper_timeseries(ulm, tapermethod=tapermethod)
vlm = taper_timeseries(vlm, tapermethod=tapermethod)
res[mode] = (ulm, vlm)
return res
class FDomainMassSpinRingdownGenerator(BaseGenerator):
"""Uses ringdown.get_fd_from_final_mass_spin as a generator function to
create frequency-domain ringdown waveforms with higher modes in the
radiation frame; i.e., with no detector response function applied.
For more details, see BaseGenerator.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import FDomainMassSpinRingdownGenerator
>>> generator = FDomainMassSpinRingdownGenerator(variable_args=['final_mass',
'final_spin','amp220','amp210','phi220','phi210'], lmns=['221','211'],
delta_f=1./32, f_lower=30., f_final=500)
Create a ringdown with the variable arguments:
>>> generator.generate(final_mass=65., final_spin=0.7,
amp220=1e-21, amp210=1./10, phi220=0., phi210=0.)
(<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>,
<pycbc.types.frequencyseries.FrequencySeries at 0x5161550>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(FDomainMassSpinRingdownGenerator, self).__init__(ringdown.get_fd_from_final_mass_spin,
variable_args=variable_args, **frozen_params)
class FDomainFreqTauRingdownGenerator(BaseGenerator):
"""Uses ringdown.get_fd_from_freqtau as a generator function to
create frequency-domain ringdown waveforms with higher modes in the
radiation frame; i.e., with no detector response function applied.
For more details, see BaseGenerator.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import FDomainFreqTauRingdownGenerator
>>> generator = FDomainFreqTauRingdownGenerator(variable_args=['f_220',
'tau_220','f_210','tau_210','amp220','amp210','phi220','phi210'],
lmns=['221','211'], delta_f=1./32, f_lower=30., f_final=500)
Create a ringdown with the variable arguments:
>>> generator.generate(f_220=317., tau_220=0.003, f_210=274., tau_210=0.003,
amp220=1e-21, amp210=1./10, phi220=0., phi210=0.)
(<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>,
<pycbc.types.frequencyseries.FrequencySeries at 0x5161550>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(FDomainFreqTauRingdownGenerator, self).__init__(ringdown.get_fd_from_freqtau,
variable_args=variable_args, **frozen_params)
class TDomainMassSpinRingdownGenerator(BaseGenerator):
"""Uses ringdown.get_td_from_final_mass_spin as a generator function to
create time-domain ringdown waveforms with higher modes in the
radiation frame; i.e., with no detector response function applied.
For more details, see BaseGenerator.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import TDomainMassSpinRingdownGenerator
>>> generator = TDomainMassSpinRingdownGenerator(variable_args=['final_mass',
'final_spin','amp220','amp210','phi220','phi210'], lmns=['221','211'],
delta_t=1./2048)
Create a ringdown with the variable arguments:
>>> generator.generate(final_mass=65., final_spin=0.7,
amp220=1e-21, amp210=1./10, phi220=0., phi210=0.)
(<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>,
<pycbc.types.frequencyseries.FrequencySeries at 0x5161550>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(TDomainMassSpinRingdownGenerator, self).__init__(ringdown.get_td_from_final_mass_spin,
variable_args=variable_args, **frozen_params)
class TDomainFreqTauRingdownGenerator(BaseGenerator):
"""Uses ringdown.get_td_from_freqtau as a generator function to
create time-domain ringdown waveforms with higher modes in the
radiation frame; i.e., with no detector response function applied.
For more details, see BaseGenerator.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import FDomainFreqTauRingdownGenerator
>>> generator = TDomainFreqTauRingdownGenerator(variable_args=['f_220',
'tau_220','f_210','tau_210','amp220','amp210','phi220','phi210'],
lmns=['221','211'], delta_t=1./2048)
Create a ringdown with the variable arguments:
>>> generator.generate(f_220=317., tau_220=0.003, f_210=274., tau_210=0.003,
amp220=1e-21, amp210=1./10, phi220=0., phi210=0.)
(<pycbc.types.frequencyseries.FrequencySeries at 0x51614d0>,
<pycbc.types.frequencyseries.FrequencySeries at 0x5161550>)
"""
def __init__(self, variable_args=(), **frozen_params):
super(TDomainFreqTauRingdownGenerator, self).__init__(ringdown.get_td_from_freqtau,
variable_args=variable_args, **frozen_params)
class TDomainSupernovaeGenerator(BaseGenerator):
"""Uses supernovae.py to create time domain core-collapse supernovae waveforms
using a set of Principal Components provided in a .hdf file.
"""
def __init__(self, variable_args=(), **frozen_params):
super(TDomainSupernovaeGenerator,
self).__init__(supernovae.get_corecollapse_bounce,
variable_args=variable_args, **frozen_params)
#
# =============================================================================
#
# Detector-frame generators
#
# =============================================================================
#
class BaseFDomainDetFrameGenerator(metaclass=ABCMeta):
"""Base generator for frquency-domain waveforms in a detector frame.
Parameters
----------
rFrameGeneratorClass : class
The class to use for generating the waveform in the radiation frame,
e.g., FDomainCBCGenerator. This should be the class, not an
instance of the class (the class will be initialized with the
appropriate arguments internally).
detectors : {None, list of strings}
The names of the detectors to use. If provided, all location parameters
must be included in either the variable args or the frozen params. If
None, the generate function will just return the plus polarization
returned by the rFrameGeneratorClass shifted by any desired time shift.
epoch : {float, lal.LIGOTimeGPS
The epoch start time to set the waveform to. A time shift = tc - epoch is
applied to waveforms before returning.
variable_args : {(), list or tuple}
A list or tuple of strings giving the names and order of parameters
that will be passed to the generate function.
\**frozen_params
Keyword arguments setting the parameters that will not be changed from
call-to-call of the generate function.
Attributes
----------
detectors : dict
The dictionary of detectors that antenna patterns are calculated for
on each call of generate. If no detectors were provided, will be
``{'RF': None}``, where "RF" means "radiation frame".
detector_names : list
The list of detector names. If no detectors were provided, then this
will be ['RF'] for "radiation frame".
current_params : dict
A dictionary of name, value pairs of the arguments that were last
used by the generate function.
rframe_generator : instance of rFrameGeneratorClass
The instance of the radiation-frame generator that is used for waveform
generation. All parameters in current_params except for the
location params are passed to this class's generate function.
frozen_location_args : dict
Any location parameters that were included in the frozen_params.
variable_args : tuple
The list of names of arguments that are passed to the generate
function.
"""
location_args = set([])
"""Set: Should be overriden by children classes with a set of parameters
that set the binary's location.
"""
def __init__(self, rFrameGeneratorClass, epoch, detectors=None,
variable_args=(), recalib=None, gates=None, **frozen_params):
# initialize frozen & current parameters:
self.current_params = frozen_params.copy()
self._static_args = frozen_params.copy()
# we'll separate out frozen location parameters from the frozen
# parameters that are sent to the rframe generator
self.frozen_location_args = {}
loc_params = set(frozen_params.keys()) & self.location_args
for param in loc_params:
self.frozen_location_args[param] = frozen_params.pop(param)
# set the order of the variable parameters
self.variable_args = tuple(variable_args)
# variables that are sent to the rFrame generator
rframe_variables = list(set(self.variable_args) - self.location_args)
# initialize the radiation frame generator
self.rframe_generator = rFrameGeneratorClass(
variable_args=rframe_variables, **frozen_params)
self.set_epoch(epoch)
# set calibration model
self.recalib = recalib
# if detectors are provided, convert to detector type; also ensure that
# location variables are specified
if detectors is not None:
self.detectors = {det: Detector(det) for det in detectors}
missing_args = [arg for arg in self.location_args if not
(arg in self.current_params or arg in self.variable_args)]
if any(missing_args):
raise ValueError("detectors provided, but missing location "
"parameters %s. " %(', '.join(missing_args)) +
"These must be either in the frozen params or the "
"variable args.")
else:
self.detectors = {'RF': None}
self.detector_names = sorted(self.detectors.keys())
self.gates = gates
def set_epoch(self, epoch):
"""Sets the epoch; epoch should be a float or a LIGOTimeGPS."""
self._epoch = float(epoch)
@property
def static_args(self):
"""Returns a dictionary of the static arguments."""
return self._static_args
@property
def epoch(self):
"""The GPS start time of the frequency series returned by the generate
function. A time shift is applied to the waveform equal to tc-epoch.
Update by using ``set_epoch``
"""
return _lal.LIGOTimeGPS(self._epoch)
@abstractmethod
def generate(self, **kwargs):
"""The function that generates the waveforms.
"""
pass
@abstractmethod
def select_rframe_generator(self, approximant):
"""Method to select waveform generator based on an approximant."""
pass
class FDomainDetFrameGenerator(BaseFDomainDetFrameGenerator):
"""Generates frequency-domain waveform in a specific frame.
Generates a waveform using the given radiation frame generator class,
and applies the detector response function and appropriate time offset.
Parameters
----------
rFrameGeneratorClass : class
The class to use for generating the waveform in the radiation frame,
e.g., FDomainCBCGenerator. This should be the class, not an
instance of the class (the class will be initialized with the
appropriate arguments internally).
detectors : {None, list of strings}
The names of the detectors to use. If provided, all location parameters
must be included in either the variable args or the frozen params. If
None, the generate function will just return the plus polarization
returned by the rFrameGeneratorClass shifted by any desired time shift.
epoch : {float, lal.LIGOTimeGPS
The epoch start time to set the waveform to. A time shift = tc - epoch is
applied to waveforms before returning.
variable_args : {(), list or tuple}
A list or tuple of strings giving the names and order of parameters
that will be passed to the generate function.
\**frozen_params
Keyword arguments setting the parameters that will not be changed from
call-to-call of the generate function.
Attributes
----------
detectors : dict
The dictionary of detectors that antenna patterns are calculated for
on each call of generate. If no detectors were provided, will be
``{'RF': None}``, where "RF" means "radiation frame".
detector_names : list
The list of detector names. If no detectors were provided, then this
will be ['RF'] for "radiation frame".
epoch : lal.LIGOTimeGPS
The GPS start time of the frequency series returned by the generate function.
A time shift is applied to the waveform equal to tc-epoch. Update by using
``set_epoch``.
current_params : dict
A dictionary of name, value pairs of the arguments that were last
used by the generate function.
rframe_generator : instance of rFrameGeneratorClass
The instance of the radiation-frame generator that is used for waveform
generation. All parameters in current_params except for the
location params are passed to this class's generate function.
frozen_location_args : dict
Any location parameters that were included in the frozen_params.
variable_args : tuple
The list of names of arguments that are passed to the generate
function.
Examples
--------
Initialize a generator:
>>> from pycbc.waveform.generator import FDomainDetFrameGenerator
>>> generator = FDomainDetFrameGenerator(waveform.FDomainCBCGenerator, 0., variable_args=['mass1', 'mass2', 'spin1z', 'spin2z', 'tc', 'ra', 'dec', 'polarization'], detectors=['H1', 'L1'], delta_f=1./64, f_lower=20., approximant='SEOBNRv2_ROM_DoubleSpin')
Generate a waveform:
>>> generator.generate(mass1=38.6, mass2=29.3, spin1z=0.33, spin2z=-0.94, tc=2.43, ra=1.37, dec=-1.26, polarization=2.76)
{'H1': <pycbc.types.frequencyseries.FrequencySeries at 0x116637350>,
'L1': <pycbc.types.frequencyseries.FrequencySeries at 0x116637a50>}
"""
location_args = set(['tc', 'ra', 'dec', 'polarization'])
"""set(['tc', 'ra', 'dec', 'polarization']):
The set of location parameters. These are not passed to the rFrame
generator class; instead, they are used to apply the detector response
function and/or shift the waveform in time. The parameters are:
* tc: The GPS time of coalescence (should be geocentric time).
* ra: Right ascension.
* dec: declination
* polarization: polarization.
All of these must be provided in either the variable args or the
frozen params if detectors is not None. If detectors
is None, tc may optionally be provided.
"""
def generate(self, **kwargs):
"""Generates a waveform, applies a time shift and the detector response
function from the given kwargs.
"""
self.current_params.update(kwargs)
rfparams = {param: self.current_params[param]
for param in kwargs if param not in self.location_args}
hp, hc = self.rframe_generator.generate(**rfparams)
if isinstance(hp, TimeSeries):
df = self.current_params['delta_f']
hp = hp.to_frequencyseries(delta_f=df)
hc = hc.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak amp
# happens at the end of the time series (as they are for f-domain),
# so we add an additional shift to account for it
tshift = 1./df - abs(hp._epoch)
else:
tshift = 0.
hp._epoch = hc._epoch = self._epoch
h = {}
if self.detector_names != ['RF']:
for detname, det in self.detectors.items():
# apply detector response function
fp, fc = det.antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.current_params['polarization'],
self.current_params['tc'])
thish = fp*hp + fc*hc
# apply the time shift
tc = self.current_params['tc'] + \
det.time_delay_from_earth_center(self.current_params['ra'],
self.current_params['dec'], self.current_params['tc'])
h[detname] = apply_fd_time_shift(thish, tc+tshift, copy=False)
if self.recalib:
# recalibrate with given calibration model
h[detname] = \
self.recalib[detname].map_to_adjust(h[detname],
**self.current_params)
else:
# no detector response, just use the + polarization
if 'tc' in self.current_params:
hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift,
copy=False)
h['RF'] = hp
if self.gates is not None:
# resize all to nearest power of 2
for d in h.values():
d.resize(ceilpow2(len(d)-1) + 1)
h = strain.apply_gates_to_fd(h, self.gates)
return h
@staticmethod
def select_rframe_generator(approximant):
"""Returns a radiation frame generator class based on the approximant
string.
"""
return select_waveform_generator(approximant)
class FDomainDetFrameTwoPolGenerator(BaseFDomainDetFrameGenerator):
"""Generates frequency-domain waveform in a specific frame.
Generates both polarizations of a waveform using the given radiation frame
generator class, and applies the time shift. Detector response functions
are not applied.
Parameters
----------
rFrameGeneratorClass : class
The class to use for generating the waveform in the radiation frame,
e.g., FDomainCBCGenerator. This should be the class, not an
instance of the class (the class will be initialized with the
appropriate arguments internally).
detectors : {None, list of strings}
The names of the detectors to use. If provided, all location parameters
must be included in either the variable args or the frozen params. If
None, the generate function will just return the plus polarization
returned by the rFrameGeneratorClass shifted by any desired time shift.
epoch : {float, lal.LIGOTimeGPS
The epoch start time to set the waveform to. A time shift = tc - epoch is
applied to waveforms before returning.
variable_args : {(), list or tuple}
A list or tuple of strings giving the names and order of parameters
that will be passed to the generate function.
\**frozen_params
Keyword arguments setting the parameters that will not be changed from
call-to-call of the generate function.
Attributes
----------
detectors : dict
The dictionary of detectors that antenna patterns are calculated for
on each call of generate. If no detectors were provided, will be
``{'RF': None}``, where "RF" means "radiation frame".
detector_names : list
The list of detector names. If no detectors were provided, then this
will be ['RF'] for "radiation frame".
epoch : lal.LIGOTimeGPS
The GPS start time of the frequency series returned by the generate function.
A time shift is applied to the waveform equal to tc-epoch. Update by using
``set_epoch``.
current_params : dict
A dictionary of name, value pairs of the arguments that were last
used by the generate function.
rframe_generator : instance of rFrameGeneratorClass
The instance of the radiation-frame generator that is used for waveform
generation. All parameters in current_params except for the
location params are passed to this class's generate function.
frozen_location_args : dict
Any location parameters that were included in the frozen_params.
variable_args : tuple
The list of names of arguments that are passed to the generate
function.
"""
location_args = set(['tc', 'ra', 'dec'])
""" set(['tc', 'ra', 'dec']):
The set of location parameters. These are not passed to the rFrame
generator class; instead, they are used to apply the detector response
function and/or shift the waveform in time. The parameters are:
* tc: The GPS time of coalescence (should be geocentric time).
* ra: Right ascension.
* dec: declination
All of these must be provided in either the variable args or the
frozen params if detectors is not None. If detectors
is None, tc may optionally be provided.
"""
def generate(self, **kwargs):
"""Generates a waveform polarizations and applies a time shift.
Returns
-------
dict :
Dictionary of ``detector names -> (hp, hc)``, where ``hp, hc`` are
the plus and cross polarization, respectively.
"""
self.current_params.update(kwargs)
rfparams = {param: self.current_params[param]
for param in kwargs if param not in self.location_args}
hp, hc = self.rframe_generator.generate(**rfparams)
if isinstance(hp, TimeSeries):
df = self.current_params['delta_f']
hp = hp.to_frequencyseries(delta_f=df)
hc = hc.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak amp
# happens at the end of the time series (as they are for f-domain),
# so we add an additional shift to account for it
tshift = 1./df - abs(hp._epoch)
else:
tshift = 0.
hp._epoch = hc._epoch = self._epoch
h = {}
if self.detector_names != ['RF']:
for detname, det in self.detectors.items():
# apply the time shift
tc = self.current_params['tc'] + \
det.time_delay_from_earth_center(self.current_params['ra'],
self.current_params['dec'], self.current_params['tc'])
dethp = apply_fd_time_shift(hp, tc+tshift, copy=True)
dethc = apply_fd_time_shift(hc, tc+tshift, copy=True)
if self.recalib:
# recalibrate with given calibration model
dethp = self.recalib[detname].map_to_adjust(
dethp, **self.current_params)
dethc = self.recalib[detname].map_to_adjust(
dethc, **self.current_params)
h[detname] = (dethp, dethc)
else:
# no detector response, just use the + polarization
if 'tc' in self.current_params:
hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift,
copy=False)
hc = apply_fd_time_shift(hc, self.current_params['tc']+tshift,
copy=False)
h['RF'] = (hp, hc)
if self.gates is not None:
# resize all to nearest power of 2
hps = {}
hcs = {}
for det in h:
hp = h[det]
hc = h[det]
hp.resize(ceilpow2(len(hp)-1) + 1)
hc.resize(ceilpow2(len(hc)-1) + 1)
hps[det] = hp
hcs[det] = hc
hps = strain.apply_gates_to_fd(hps, self.gates)
hcs = strain.apply_gates_to_fd(hps, self.gates)
h = {det: (hps[det], hcs[det]) for det in h}
return h
@staticmethod
def select_rframe_generator(approximant):
"""Returns a radiation frame generator class based on the approximant
string.
"""
return select_waveform_generator(approximant)
class FDomainDetFrameTwoPolNoRespGenerator(BaseFDomainDetFrameGenerator):
"""Generates frequency-domain waveform in a specific frame.
Generates both polarizations of a waveform using the given radiation frame
generator class, and applies the time shift. Detector response functions
are not applied.
Parameters
----------
rFrameGeneratorClass : class
The class to use for generating the waveform in the radiation frame,
e.g., FDomainCBCGenerator. This should be the class, not an
instance of the class (the class will be initialized with the
appropriate arguments internally).
detectors : {None, list of strings}
The names of the detectors to use. If provided, all location parameters
must be included in either the variable args or the frozen params. If
None, the generate function will just return the plus polarization
returned by the rFrameGeneratorClass shifted by any desired time shift.
epoch : {float, lal.LIGOTimeGPS
The epoch start time to set the waveform to. A time shift = tc - epoch is
applied to waveforms before returning.
variable_args : {(), list or tuple}
A list or tuple of strings giving the names and order of parameters
that will be passed to the generate function.
\**frozen_params
Keyword arguments setting the parameters that will not be changed from
call-to-call of the generate function.
Attributes
----------
detectors : dict
The dictionary of detectors that antenna patterns are calculated for
on each call of generate. If no detectors were provided, will be
``{'RF': None}``, where "RF" means "radiation frame".
detector_names : list
The list of detector names. If no detectors were provided, then this
will be ['RF'] for "radiation frame".
epoch : lal.LIGOTimeGPS
The GPS start time of the frequency series returned by the generate function.
A time shift is applied to the waveform equal to tc-epoch. Update by using
``set_epoch``.
current_params : dict
A dictionary of name, value pairs of the arguments that were last
used by the generate function.
rframe_generator : instance of rFrameGeneratorClass
The instance of the radiation-frame generator that is used for waveform
generation. All parameters in current_params except for the
location params are passed to this class's generate function.
frozen_location_args : dict
Any location parameters that were included in the frozen_params.
variable_args : tuple
The list of names of arguments that are passed to the generate
function.
"""
def generate(self, **kwargs):
"""Generates a waveform polarizations
Returns
-------
dict :
Dictionary of ``detector names -> (hp, hc)``, where ``hp, hc`` are
the plus and cross polarization, respectively.
"""
self.current_params.update(kwargs)
hp, hc = self.rframe_generator.generate(**self.current_params)
if isinstance(hp, TimeSeries):
df = self.current_params['delta_f']
hp = hp.to_frequencyseries(delta_f=df)
hc = hc.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak amp
# happens at the end of the time series (as they are for f-domain),
# so we add an additional shift to account for it
tshift = 1./df - abs(hp._epoch)
hp = apply_fd_time_shift(hp, tshift, copy=True)
hc = apply_fd_time_shift(hc, tshift, copy=True)
hp._epoch = hc._epoch = self._epoch
h = {}
for detname in self.detectors:
if self.recalib:
# recalibrate with given calibration model
hp = self.recalib[detname].map_to_adjust(
hp, **self.current_params)
hc = self.recalib[detname].map_to_adjust(
hc, **self.current_params)
h[detname] = (hp.copy(), hc.copy())
return h
@staticmethod
def select_rframe_generator(approximant):
"""Returns a radiation frame generator class based on the approximant
string.
"""
return select_waveform_generator(approximant)
class FDomainDetFrameModesGenerator(BaseFDomainDetFrameGenerator):
"""Generates frequency-domain waveform modes in a specific frame.
Generates both polarizations of every waveform mode using the given
radiation frame generator class, and applies the time shift. Detector
response functions are not applied.
Parameters
----------
rFrameGeneratorClass : class
The class to use for generating the waveform modes in the radiation
frame, e.g., :py:class:`FDomainCBCModesGenerator`. This should be the
class, not an instance of the class (the class will be initialized with
the appropriate arguments internally). The class should have a generate
function that returns a dictionary of waveforms keyed by the modes.
detectors : {None, list of strings}
The names of the detectors to use. If provided, all location parameters
must be included in either the variable args or the frozen params. If
None, the generate function will just return the plus polarization
returned by the rFrameGeneratorClass shifted by any desired time shift.
epoch : {float, lal.LIGOTimeGPS
The epoch start time to set the waveform to. A time shift = tc - epoch is
applied to waveforms before returning.
variable_args : {(), list or tuple}
A list or tuple of strings giving the names and order of parameters
that will be passed to the generate function.
\**frozen_params
Keyword arguments setting the parameters that will not be changed from
call-to-call of the generate function.
Attributes
----------
detectors : dict
The dictionary of detectors that antenna patterns are calculated for
on each call of generate. If no detectors were provided, will be
``{'RF': None}``, where "RF" means "radiation frame".
detector_names : list
The list of detector names. If no detectors were provided, then this
will be ['RF'] for "radiation frame".
epoch : lal.LIGOTimeGPS
The GPS start time of the frequency series returned by the generate
function. A time shift is applied to the waveform equal to tc-epoch.
Update by using ``set_epoch``.
current_params : dict
A dictionary of name, value pairs of the arguments that were last
used by the generate function.
rframe_generator : instance of rFrameGeneratorClass
The instance of the radiation-frame generator that is used for waveform
generation. All parameters in current_params except for the
location params are passed to this class's generate function.
frozen_location_args : dict
Any location parameters that were included in the frozen_params.
variable_args : tuple
The list of names of arguments that are passed to the generate
function.
"""
location_args = set(['tc', 'ra', 'dec'])
""" set(['tc', 'ra', 'dec']):
The set of location parameters. These are not passed to the rFrame
generator class; instead, they are used to apply the detector response
function and/or shift the waveform in time. The parameters are:
* tc: The GPS time of coalescence (should be geocentric time).
* ra: Right ascension.
* dec: declination
All of these must be provided in either the variable args or the
frozen params if detectors is not None. If detectors
is None, tc may optionally be provided.
"""
def generate(self, **kwargs):
"""Generates and returns a waveform decompsed into separate modes.
Returns
-------
dict :
Dictionary of ``detector names -> modes -> (ulm, vlm)``, where
``ulm, vlm`` are the frequency-domain representations of the real
and imaginary parts, respectively, of the complex time series
representation of the ``hlm``.
"""
self.current_params.update(kwargs)
rfparams = {param: self.current_params[param]
for param in kwargs if param not in self.location_args}
hlms = self.rframe_generator.generate(**rfparams)
h = {det: {} for det in self.detectors}
for mode in hlms:
ulm, vlm = hlms[mode]
if isinstance(ulm, TimeSeries):
df = self.current_params['delta_f']
ulm = ulm.to_frequencyseries(delta_f=df)
vlm = vlm.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak
# amplitude happens at the end of the time series (as they are
# for f-domain), so we add an additional shift to account for
# it
tshift = 1./df - abs(ulm._epoch)
else:
tshift = 0.
ulm._epoch = vlm._epoch = self._epoch
if self.detector_names != ['RF']:
for detname, det in self.detectors.items():
# apply the time shift
tc = self.current_params['tc'] + \
det.time_delay_from_earth_center(
self.current_params['ra'],
self.current_params['dec'],
self.current_params['tc'])
detulm = apply_fd_time_shift(ulm, tc+tshift, copy=True)
detvlm = apply_fd_time_shift(vlm, tc+tshift, copy=True)
if self.recalib:
# recalibrate with given calibration model
detulm = self.recalib[detname].map_to_adjust(
detulm, **self.current_params)
detvlm = self.recalib[detname].map_to_adjust(
detvlm, **self.current_params)
h[detname][mode] = (detulm, detvlm)
else:
# no detector response, just apply time shift
if 'tc' in self.current_params:
ulm = apply_fd_time_shift(ulm,
self.current_params['tc']+tshift,
copy=False)
vlm = apply_fd_time_shift(vlm,
self.current_params['tc']+tshift,
copy=False)
h['RF'][mode] = (ulm, vlm)
if self.gates is not None:
# resize all to nearest power of 2
ulms = {}
vlms = {}
for det in h:
ulm, vlm = h[det][mode]
ulm.resize(ceilpow2(len(ulm)-1) + 1)
vlm.resize(ceilpow2(len(vlm)-1) + 1)
ulms[det] = ulm
vlms[det] = vlm
ulms = strain.apply_gates_to_fd(ulms, self.gates)
vlms = strain.apply_gates_to_fd(ulms, self.gates)
for det in ulms:
h[det][mode] = (ulms[det], vlms[det])
return h
@staticmethod
def select_rframe_generator(approximant):
"""Returns a radiation frame generator class based on the approximant
string.
"""
return select_waveform_modes_generator(approximant)
#
# =============================================================================
#
# Helper functions
#
# =============================================================================
#
def select_waveform_generator(approximant):
"""Returns the single-IFO generator for the approximant.
Parameters
----------
approximant : str
Name of waveform approximant. Valid names can be found using
``pycbc.waveform`` methods.
Returns
-------
generator : (PyCBC generator instance)
A waveform generator object.
Examples
--------
Get a list of available approximants:
>>> from pycbc import waveform
>>> waveform.fd_approximants()
>>> waveform.td_approximants()
>>> from pycbc.waveform import ringdown
>>> ringdown.ringdown_fd_approximants.keys()
Get generator object:
>>> from pycbc.waveform.generator import select_waveform_generator
>>> select_waveform_generator(waveform.fd_approximants()[0])
"""
# check if frequency-domain CBC waveform
if approximant in waveform.fd_approximants():
return FDomainCBCGenerator
# check if time-domain CBC waveform
elif approximant in waveform.td_approximants():
return TDomainCBCGenerator
# check if frequency-domain ringdown waveform
elif approximant in ringdown.ringdown_fd_approximants:
if approximant == 'FdQNMfromFinalMassSpin':
return FDomainMassSpinRingdownGenerator
elif approximant == 'FdQNMfromFreqTau':
return FDomainFreqTauRingdownGenerator
elif approximant in ringdown.ringdown_td_approximants:
if approximant == 'TdQNMfromFinalMassSpin':
return TDomainMassSpinRingdownGenerator
elif approximant == 'TdQNMfromFreqTau':
return TDomainFreqTauRingdownGenerator
# check if supernovae waveform:
elif approximant in supernovae.supernovae_td_approximants:
if approximant == 'CoreCollapseBounce':
return TDomainSupernovaeGenerator
# otherwise waveform approximant is not supported
else:
raise ValueError("%s is not a valid approximant." % approximant)
def select_waveform_modes_generator(approximant):
"""Returns the single-IFO modes generator for the approximant.
Parameters
----------
approximant : str
Name of waveform approximant. Valid names can be found using
``pycbc.waveform`` methods.
Returns
-------
generator : (PyCBC generator instance)
A waveform generator object.
"""
# check if frequency-domain CBC waveform
if approximant in waveform.fd_approximants():
return FDomainCBCModesGenerator
# check if time-domain CBC waveform
elif approximant in waveform.td_approximants():
return TDomainCBCModesGenerator
# otherwise waveform approximant is not supported
raise ValueError("%s is not a valid approximant." % approximant)
| 51,496
| 42.238455
| 258
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/decompress_cuda.py
|
# Copyright (C) 2016 Josh Willis
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy
import mako.template
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import pycbc.scheme
from pycbc.types import zeros
# The interpolation is the result of the call of two kernels.
#
# The first, find_block_indices(), will find the correct upper
# and lower indices into the frequency texture for each thread
# block in the second kernel. These are placed into global memory,
# as that is the only way to communicate between kernels. The
# indices are found by binary search on the sample frequencies
# texture.
#
# The second kernel, linear_interp, takes these upper and lower
# bounds, the texture of freqency samples, and textures containing
# values of the amplitude and phase at those frequencies, and fills
# an array with the (complex) value of the interpolated waveform.
#
# The three interpolation arrays (node locations, amplitude values,
# and phase values) are stored as 1D textures on the GPU, because many
# threads will need to read them concurrently but never write them, and
# the access pattern of a binary search precludes guaranteeing that
# sequential threads will access sequential memory locations.
kernel_sources = mako.template.Template("""
texture<float, 1> freq_tex;
texture<float, 1> amp_tex;
texture<float, 1> phase_tex;
__device__ int binary_search(float freq, int lower, int upper){
/*
Input parameters:
=================
freq: The target frequency
lower: The index into the frequency texture at which
to start the search
upper: The index into the frequency texture at which
to end the search
Return value:
=============
The largest index into the frequency texture for
which the value of the texture at that index is less
than or equal to the target frequency 'freq'.
*/
int begin = lower;
int end = upper;
while (begin != end){
int mid = (begin + end)/2;
float fcomp = tex1Dfetch(freq_tex, (float) mid);
if (fcomp <= freq){
begin = mid+1;
} else {
end = mid;
}
}
return begin-1;
}
__global__ void find_block_indices(int *lower, int *upper, int texlen,
float df, float flow, float fmax){
/*
Input parameters:
=================
texlen: The length of the sample frequency texture
df: The difference between successive frequencies in the
output array
flow: The minimum frequency at which to generate an interpolated
waveform
Global variable:
===================
freq_tex: Texture of sample frequencies (its length is texlen)
Output parameters:
==================
lower: array of indices, one per thread block, of the lower
limit for each block within the frequency arrays.
upper: array of indices, one per thread block, of the upper
limit for each block within the frequency arrays.
*/
// This kernel is launched with only one block; the number of
// threads will equal the number of blocks in the next kernel.
int i = threadIdx.x;
// We want to find the index of the smallest freqency in our
// texture which is greater than the freqency fmatch below:
float ffirst = i*df*${ntpb};
float flast = (i+1)*df*${ntpb}-df;
if (ffirst < flow){
ffirst = flow;
}
lower[i] = binary_search(ffirst, 0, texlen);
upper[i] = binary_search(flast, 0, texlen) + 1;
return;
}
__global__ void linear_interp(float2 *h, float df, int hlen,
float flow, float fmax, int texlen,
int *lower, int *upper){
/*
Input parameters:
=================
df: The difference between successive frequencies in the
output array
hlen: The length of the output array
flow: The minimum frequency at which to generate an interpolated
waveform
fmax: The maximum frequency in the sample frequency texture; i.e.,
freq_tex[texlen-1]
texlen: The common length of the three sample textures
lower: Array that for each thread block stores the index into the
sample frequency array of the largest sample frequency that
is less than or equal to the smallest frequency considered
by that thread block.
upper: Array that for each thread block stores the index into the
sample frequency array of the smallest sample frequency that
is greater than the next frequency considered *after* that
thread block.
Global variables:
===================
freq_tex: Texture of sample frequencies (its length is texlen)
amp_tex: Texture of amplitudes corresponding to sample frequencies
phase_tex: Texture of phases corresponding to sample frequencies
Output parameters:
==================
h: array of complex
*/
__shared__ int low[1];
__shared__ int high[1];
int idx;
float2 tmp;
float amp, freq, phase, inv_df, x, y;
float a0, a1, f0, f1, p0, p1;
// Load values in global memory into shared memory that
// all threads in this block will use:
if (threadIdx.x == 0) {
low[0] = lower[blockIdx.x];
high[0] = upper[blockIdx.x];
}
__syncthreads();
int i = ${ntpb}*blockIdx.x + threadIdx.x;
if (i < hlen){
freq = df*i;
if ( (freq<flow) || (freq>fmax) ){
tmp.x = 0.0;
tmp.y = 0.0;
} else {
idx = binary_search(freq, low[0], high[0]);
if (idx < texlen-1) {
f0 = tex1Dfetch(freq_tex, idx);
f1 = tex1Dfetch(freq_tex, idx+1);
inv_df = 1.0/(f1-f0);
a0 = tex1Dfetch(amp_tex, idx);
a1 = tex1Dfetch(amp_tex, idx+1);
p0 = tex1Dfetch(phase_tex, idx);
p1 = tex1Dfetch(phase_tex, idx+1);
amp = a0*inv_df*(f1-freq) + a1*inv_df*(freq-f0);
phase = p0*inv_df*(f1-freq) + p1*inv_df*(freq-f0);
} else {
// We must have idx = texlen-1, so this frequency
// exactly equals fmax
amp = tex1Dfetch(amp_tex, idx);
phase = tex1Dfetch(phase_tex, idx);
}
__sincosf(phase, &y, &x);
tmp.x = amp*x;
tmp.y = amp*y;
}
h[i] = tmp;
}
return;
}
""")
dckernel_cache = {}
def get_dckernel(slen):
# Right now, hardcoding the number of threads per block
nt = 1024
nb = int(numpy.ceil(slen / 1024.0))
if nb > 1024:
raise ValueError("More than 1024 blocks not supported yet")
try:
return dckernel_cache[nb]
except KeyError:
mod = SourceModule(kernel_sources.render(ntpb=nt, nblocks=nb))
freq_tex = mod.get_texref("freq_tex")
amp_tex = mod.get_texref("amp_tex")
phase_tex = mod.get_texref("phase_tex")
fn1 = mod.get_function("find_block_indices")
fn1.prepare("PPifff", texrefs=[freq_tex])
fn2 = mod.get_function("linear_interp")
fn2.prepare("PfiffiPP", texrefs=[freq_tex, amp_tex, phase_tex])
dckernel_cache[nb] = (fn1, fn2, freq_tex, amp_tex, phase_tex, nt, nb)
return dckernel_cache[nb]
class CUDALinearInterpolate(object):
def __init__(self, output):
self.output = output.data.gpudata
self.df = numpy.float32(output.delta_f)
self.hlen = numpy.int32(len(output))
lookups = get_dckernel(self.hlen)
self.fn1 = lookups[0]
self.fn2 = lookups[1]
self.freq_tex = lookups[2]
self.amp_tex = lookups[3]
self.phase_tex = lookups[4]
self.nt = lookups[5]
self.nb = lookups[6]
self.lower = zeros(self.nb, dtype=numpy.int32).data.gpudata
self.upper = zeros(self.nb, dtype=numpy.int32).data.gpudata
def interpolate(self, flow, freqs, amps, phases):
flow = numpy.float32(flow)
texlen = numpy.int32(len(freqs))
fmax = numpy.float32(freqs[texlen-1])
freqs_gpu = gpuarray.to_gpu(freqs)
freqs_gpu.bind_to_texref_ext(self.freq_tex, allow_offset=False)
amps_gpu = gpuarray.to_gpu(amps)
amps_gpu.bind_to_texref_ext(self.amp_tex, allow_offset=False)
phases_gpu = gpuarray.to_gpu(phases)
phases_gpu.bind_to_texref_ext(self.phase_tex, allow_offset=False)
fn1 = self.fn1.prepared_call
fn2 = self.fn2.prepared_call
fn1((1, 1), (self.nb, 1, 1), self.lower, self.upper, texlen, self.df, flow, fmax)
fn2((self.nb, 1), (self.nt, 1, 1), self.output, self.df, self.hlen, flow, fmax, texlen, self.lower, self.upper)
pycbc.scheme.mgr.state.context.synchronize()
return
def inline_linear_interp(amps, phases, freqs, output, df, flow, imin, start_index):
# Note that imin and start_index are ignored in the GPU code; they are only
# needed for CPU.
if output.precision == 'double':
raise NotImplementedError("Double precision linear interpolation not currently supported on CUDA scheme")
flow = numpy.float32(flow)
texlen = numpy.int32(len(freqs))
fmax = numpy.float32(freqs[texlen-1])
hlen = numpy.int32(len(output))
(fn1, fn2, ftex, atex, ptex, nt, nb) = get_dckernel(hlen)
freqs_gpu = gpuarray.to_gpu(freqs)
freqs_gpu.bind_to_texref_ext(ftex, allow_offset=False)
amps_gpu = gpuarray.to_gpu(amps)
amps_gpu.bind_to_texref_ext(atex, allow_offset=False)
phases_gpu = gpuarray.to_gpu(phases)
phases_gpu.bind_to_texref_ext(ptex, allow_offset=False)
fn1 = fn1.prepared_call
fn2 = fn2.prepared_call
df = numpy.float32(df)
g_out = output.data.gpudata
lower = zeros(nb, dtype=numpy.int32).data.gpudata
upper = zeros(nb, dtype=numpy.int32).data.gpudata
fn1((1, 1), (nb, 1, 1), lower, upper, texlen, df, flow, fmax)
fn2((nb, 1), (nt, 1, 1), g_out, df, hlen, flow, fmax, texlen, lower, upper)
pycbc.scheme.mgr.state.context.synchronize()
return output
| 11,138
| 32.250746
| 119
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/nltides.py
|
""" Utilities for introducing nonlinear tidal effects into waveform approximants
"""
import pycbc.conversions
import numpy
import lal
def nltides_fourier_phase_difference(f, delta_f, f0, amplitude, n, m1, m2):
"""Calculate the change to the Fourier phase change due
to non-linear tides. Note that the Fourier phase Psi(f)
is not the same as the gravitational-wave phase phi(f) and
is computed by
Delta Psi(f) = 2 \pi f Delta t(f) - Delta phi(f)
Parameters
----------
f: numpy.array
Array of frequency values to calculate the fourier phase difference
delta_f: float
Frequency resolution of f array
f0: float
Frequency that NL effects switch on
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
delta_psi: numpy.array
Fourier phase as a function of frequency
"""
kmin = int(f0/delta_f)
kmax = len(f)
f_ref, t_of_f_factor, phi_of_f_factor = \
pycbc.conversions.nltides_coefs(amplitude, n, m1, m2)
# Fourier phase shift below f0 from \Delta \phi(f)
delta_psi_f_le_f0 = numpy.ones(kmin)
delta_psi_f_le_f0 *= - phi_of_f_factor * (f0/f_ref)**(n-3.)
# Fourier phase shift above f0 from \Delta \phi(f)
delta_psi_f_gt_f0 = - phi_of_f_factor * (f[kmin:kmax]/f_ref)**(n-3.)
# Fourier phase shift below f0 from 2 pi f \Delta t(f)
delta_psi_f_le_f0 += 2.0 * lal.lal.PI * f[0:kmin] * t_of_f_factor * \
(f0/f_ref)**(n-4.)
# Fourier phase shift above f0 from 2 pi f \Delta t(f)
delta_psi_f_gt_f0 += 2.0 * lal.lal.PI * f[kmin:kmax] * t_of_f_factor * \
(f[kmin:kmax]/f_ref)**(n-4.)
# Return the shift to the Fourier phase
return numpy.concatenate((delta_psi_f_le_f0, delta_psi_f_gt_f0), axis=0)
def nonlinear_tidal_spa(**kwds):
"""Generates a frequency-domain waveform that implements the
TaylorF2+NL tide model described in https://arxiv.org/abs/1808.07013
"""
from pycbc import waveform
from pycbc.types import Array
# We start with the standard TaylorF2 based waveform
kwds.pop('approximant')
hp, hc = waveform.get_fd_waveform(approximant="TaylorF2", **kwds)
# Add the phasing difference from the nonlinear tides
f = numpy.arange(len(hp)) * hp.delta_f
pd = Array(numpy.exp(-1.0j * nltides_fourier_phase_difference(f,
hp.delta_f,
kwds['f0'], kwds['amplitude'], kwds['n'],
kwds['mass1'], kwds['mass2'])),
dtype=hp.dtype)
hp *= pd
hc *= pd
return hp, hc
| 2,683
| 30.952381
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/decompress_cpu.py
|
# Copyright (C) 2016 Alex Nitz, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" Utilities for handling frequency compressed an unequally spaced frequency
domain waveforms.
"""
import numpy
from ..types import real_same_precision_as
from ..types import complex_same_precision_as
from .decompress_cpu_cython import decomp_ccode_double, decomp_ccode_float
def inline_linear_interp(amp, phase, sample_frequencies, output,
df, f_lower, imin, start_index):
rprec = real_same_precision_as(output)
cprec = complex_same_precision_as(output)
sample_frequencies = numpy.array(sample_frequencies, copy=False,
dtype=rprec)
amp = numpy.array(amp, copy=False, dtype=rprec)
phase = numpy.array(phase, copy=False, dtype=rprec)
sflen = len(sample_frequencies)
h = numpy.array(output.data, copy=False, dtype=cprec)
hlen = len(output)
delta_f = float(df)
if output.precision == 'single':
decomp_ccode_float(h, delta_f, hlen, start_index, sample_frequencies,
amp, phase, sflen, imin)
else:
decomp_ccode_double(h, delta_f, hlen, start_index, sample_frequencies,
amp, phase, sflen, imin)
return output
| 2,155
| 40.461538
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/waveform/SpinTaylorF2.py
|
# Copyright (C) 2013 Haris K
# Ported from LALSimulation's LALSimInspiralSpinTaylorF2.c
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import lal
import numpy
from numpy import sqrt, double, complex128
from math import pow, log, cos, sin, acos, atan2
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
from pycbc.types import FrequencySeries, zeros
from pycbc.waveform.utils import ceilpow2
preamble = """
#include <lal/LALConstants.h>
#include <cuComplex.h>
"""
spintaylorf2_text = """
const double f = (i + kmin ) * delta_f;
const double v0 = cbrt(piM * kmin * delta_f);
const double v = cbrt(piM*f);
const double v2 = v * v;
const double v3 = v * v2;
const double v4 = v * v3;
const double v5 = v * v4;
const double v6 = v * v5;
const double v7 = v * v6;
const double v8 = v * v7;
const double v9 = v * v8;
const double v10 = v * v9;
double phasing = 0.;
double dEnergy = 0.;
double flux = 0.;
double amp;
double shft = -LAL_TWOPI * tC;
double RE_prec_facP;
double IM_prec_facP;
double RE_prec_facC;
double IM_prec_facC;
switch (phase_order)
{
case -1:
case 7:
phasing += pfa7 * v7;
case 6:
phasing += (pfa6 + pfl6 * log(4.*v) ) * v6;
case 5:
phasing += (pfa5 + pfl5 * log(v/v0)) * v5;
case 4:
phasing += pfa4 * v4;
case 3:
phasing += pfa3 * v3;
case 2:
phasing += pfa2 * v2;
case 0:
phasing += 1.;
break;
default:
break;
}
switch (amplitude_order)
{
case -1:
case 7:
flux += FTa7 * v7;
case 6:
flux += ( FTa6 + FTl6*log(16.*v2)) * v6;
dEnergy += dETa3 * v6;
case 5:
flux += FTa5 * v5;
case 4:
flux += FTa4 * v4;
dEnergy += dETa2 * v4;
case 3:
flux += FTa3 * v3;
case 2:
flux += FTa2 * v2;
dEnergy += dETa1 * v2;
case 0:
flux += 1;
dEnergy += 1.;
break;
}
phasing *= pfaN / v5;
flux *= FTaN * v10;
dEnergy *= dETaN * v;
const double gam = gamma0*v;
const double sqrtfac = sqrt(1. + 2.*kappa*gam + gam*gam);
const double logv = log(v);
const double logfac1 = log(1. + kappa*gam + sqrtfac);
const double logfac2 = log(kappa + gam + sqrtfac);
const double kappa2 = kappa * kappa;
const double kappa3 = kappa2 * kappa;
const double gamma02 = gamma0 * gamma0;
const double gamma03 = gamma02 *gamma0;
const double alpha = prec_fac0*( logfac2 *( dtdv2*gamma0 + dtdv3*kappa -
dtdv5*kappa/(2.*gamma02) + dtdv4/(2.*gamma0) -
dtdv4*kappa2/(2.*gamma0) + (dtdv5*kappa3)/(2.*gamma02) ) +
logfac1*( - dtdv2*gamma0*kappa - dtdv3 + kappa*gamma03/2. -
gamma03*kappa3/2. ) + logv *( dtdv2*gamma0*kappa + dtdv3 -
kappa*gamma03/2. + gamma03*kappa3/2. ) + sqrtfac *( dtdv3 +
dtdv4*v/2. + dtdv5/gamma02/3. + dtdv4*kappa/(2.*gamma0) +
dtdv5*kappa*v/(6.*gamma0) - dtdv5*kappa2/(2.*gamma02) - 1/(3.*v3) -
gamma0*kappa/(6.*v2) - dtdv2/v - gamma02/(3.*v) +
gamma02*kappa2/(2.*v) + dtdv5*v2/3. )) - alpha_ref;
const double beta = acos((1. + kappa*gamma0*v)/sqrt(1. + 2.*kappa*gamma0*v + gamma0*gamma0*v*v));
const double zeta = prec_fac0*( dtdv3*gamma0*kappa*v + dtdv4*v +
logfac2 *(-dtdv2*gamma0 - dtdv3*kappa + dtdv5*kappa/(2.*gamma02) -
dtdv4/(2.*gamma0) + dtdv4*kappa2/(2.*gamma0) -
dtdv5*kappa3/(2.*gamma02) ) + logv *( kappa*gamma03/2. -
gamma03*kappa3/2. ) + logfac1 *( dtdv2*gamma0*kappa + dtdv3 -
kappa*gamma03/2. + gamma03*kappa3/2. ) - 1/(3.*v3) -
gamma0*kappa/(2.*v2) - dtdv2/v + dtdv4*gamma0*kappa*v2/2. +
dtdv5*v2/2. + sqrtfac *( -dtdv3 - dtdv4*v/2. - dtdv5/(3.*gamma02) -
dtdv4*kappa/(2.*gamma0) - dtdv5*kappa*v/(6.*gamma0) +
dtdv5*kappa2/(2.*gamma02) + 1/(3.*v3) + gamma0*kappa/(6.*v2) +
dtdv2/v + gamma02/(3.*v) - gamma02*kappa2/(2.*v) - dtdv5*v2/3. ) +
dtdv5*gamma0*kappa*v3/3. ) - zeta_ref;
double CBeta;
double SBeta;
double SAlpha1;
double SAlpha2;
double SAlpha3;
double SAlpha4;
double CAlpha1;
double CAlpha2;
double CAlpha3;
double CAlpha4;
sincos(beta/2.,&SBeta,&CBeta);
sincos(-alpha,&SAlpha1,&CAlpha1);
sincos(-2.*alpha,&SAlpha2,&CAlpha2);
sincos(-3.*alpha,&SAlpha3,&CAlpha3);
sincos(-4.*alpha,&SAlpha4,&CAlpha4);
const double CBeta2 = CBeta * CBeta;
const double CBeta3 = CBeta * CBeta2;
const double CBeta4 = CBeta * CBeta3;
const double SBeta2 = SBeta * SBeta;
const double SBeta3 = SBeta * SBeta2;
const double SBeta4 = SBeta * SBeta3;
RE_prec_facP = ( cos(2.*psiJ_P) *
( SBeta4 * RE_SBfac4 * CAlpha4
+ CBeta * SBeta3 * RE_SBfac3 * CAlpha3
+ CBeta2 * SBeta2 * RE_SBfac2 * CAlpha2
+ CBeta3 * SBeta * RE_SBfac1 * CAlpha1
+ CBeta4 * RE_SBfac0 )
- sin(2.*psiJ_P) *
( SBeta4 * IM_SBfac4 * SAlpha4
+ CBeta * SBeta3 * IM_SBfac3 * SAlpha3
+ CBeta2 * SBeta2 * IM_SBfac2 * SAlpha2
+ CBeta3 * SBeta * IM_SBfac1 * SAlpha1
+ CBeta4 * IM_SBfac0 * 0 ));
IM_prec_facP = ( cos(2.*psiJ_P) *
( SBeta4 * RE_SBfac4 * SAlpha4
+ CBeta * SBeta3 * RE_SBfac3 * SAlpha3
+ CBeta2 * SBeta2 * RE_SBfac2 * SAlpha2
+ CBeta3 * SBeta * RE_SBfac1 * SAlpha1
+ CBeta4 * RE_SBfac0 * 0 )
+ sin(2.*psiJ_P) *
( SBeta4 * IM_SBfac4 * CAlpha4
+ CBeta * SBeta3 * IM_SBfac3 * CAlpha3
+ CBeta2 * SBeta2 * IM_SBfac2 * CAlpha2
+ CBeta3 * SBeta * IM_SBfac1 * CAlpha1
+ CBeta4 * IM_SBfac0 ));
RE_prec_facC = ( cos(2.*psiJ_C) *
( SBeta4 * RE_SBfac4 * CAlpha4
+ CBeta * SBeta3 * RE_SBfac3 * CAlpha3
+ CBeta2 * SBeta2 * RE_SBfac2 * CAlpha2
+ CBeta3 * SBeta * RE_SBfac1 * CAlpha1
+ CBeta4 * RE_SBfac0 )
- sin(2.*psiJ_C) *
( SBeta4 * IM_SBfac4 * SAlpha4
+ CBeta * SBeta3 * IM_SBfac3 * SAlpha3
+ CBeta2 * SBeta2 * IM_SBfac2 * SAlpha2
+ CBeta3 * SBeta * IM_SBfac1 * SAlpha1
+ CBeta4 * IM_SBfac0 * 0 ));
IM_prec_facC = ( cos(2.*psiJ_C) *
( SBeta4 * RE_SBfac4 * SAlpha4
+ CBeta * SBeta3 * RE_SBfac3 * SAlpha3
+ CBeta2 * SBeta2 * RE_SBfac2 * SAlpha2
+ CBeta3 * SBeta * RE_SBfac1 * SAlpha1
+ CBeta4 * RE_SBfac0 * 0 )
+ sin(2.*psiJ_C) *
( SBeta4 * IM_SBfac4 * CAlpha4
+ CBeta * SBeta3 * IM_SBfac3 * CAlpha3
+ CBeta2 * SBeta2 * IM_SBfac2 * CAlpha2
+ CBeta3 * SBeta * IM_SBfac1 * CAlpha1
+ CBeta4 * IM_SBfac0 ));
phasing += shft * f - 2. * phi0; // FIXME:: Sign of phi0?
phasing += 2.*zeta;
amp = amp0 * sqrt(-dEnergy/flux) * v;
const double CPhasing = amp * cos(phasing - LAL_PI_4);
const double SPhasing = amp * sin(phasing - LAL_PI_4);
htildeP[i]._M_re = RE_prec_facP * CPhasing + IM_prec_facP * SPhasing ;
htildeP[i]._M_im = IM_prec_facP * CPhasing - RE_prec_facP * SPhasing ;
htildeC[i]._M_re = RE_prec_facC * CPhasing + IM_prec_facC * SPhasing ;
htildeC[i]._M_im = IM_prec_facC * CPhasing - RE_prec_facC * SPhasing ;
"""
spintaylorf2_kernel = ElementwiseKernel("""pycuda::complex<double> *htildeP,
pycuda::complex<double> *htildeC,
int kmin, int phase_order,
int amplitude_order, double delta_f,
double piM, double pfaN,
double pfa2, double pfa3,
double pfa4, double pfa5,
double pfl5, double pfa6,
double pfl6, double pfa7,
double FTaN, double FTa2,
double FTa3, double FTa4,
double FTa5, double FTa6,
double FTl6, double FTa7,
double dETaN, double dETa1,
double dETa2, double dETa3,
double amp0, double tC, double phi0,
double kappa, double prec_fac0,
double alpha_ref, double zeta_ref,
double dtdv2, double dtdv3,
double dtdv4, double dtdv5,
double RE_SBfac0, double RE_SBfac1,
double RE_SBfac2, double RE_SBfac3,
double RE_SBfac4, double IM_SBfac0,
double IM_SBfac1, double IM_SBfac2,
double IM_SBfac3, double IM_SBfac4,
double psiJ_P, double psiJ_C,
double gamma0""",
spintaylorf2_text, "spintaylorf2_kernel",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def spintaylorf2(**kwds):
""" Return a SpinTaylorF2 waveform using CUDA to generate the phase and amplitude
"""
#####Pull out the input arguments#####
f_lower = double(kwds['f_lower'])
delta_f = double(kwds['delta_f'])
distance = double(kwds['distance'])
mass1 = double(kwds['mass1'])
mass2 = double(kwds['mass2'])
spin1x = double(kwds['spin1x'])
spin1y = double(kwds['spin1y'])
spin1z = double(kwds['spin1z'])
phi0 = double(kwds['coa_phase']) #Orbital Phase at coalescence
phase_order = int(kwds['phase_order'])
amplitude_order = int(kwds['amplitude_order'])
inclination = double(kwds['inclination'])
lnhatx = sin(inclination)
lnhaty = 0.
lnhatz = cos(inclination)
psi = 0.
tC= -1.0 / delta_f
M = mass1 + mass2
eta = mass1 * mass2 / (M * M)
m_sec = M * lal.MTSUN_SI
piM = lal.PI * m_sec
vISCO = 1. / sqrt(6.)
fISCO = vISCO * vISCO * vISCO / piM
f_max = ceilpow2(fISCO)
n = int(f_max / delta_f + 1)
kmax = int(fISCO / delta_f)
kmin = int(numpy.ceil(f_lower / delta_f))
kmax = kmax if (kmax<n) else n
#####Calculate the Orientation#####
v0 = pow(piM * kmin * delta_f,1./3)
chi = sqrt(spin1x**2+spin1y**2+spin1z**2)
kappa = (lnhatx*spin1x+lnhaty*spin1y+lnhatz*spin1z)/chi if (chi > 0.) else 1.
Jx0 = mass1*mass2*lnhatx/v0 + mass1*mass1*spin1x
Jy0 = mass1*mass2*lnhaty/v0 + mass1*mass1*spin1y
Jz0 = mass1*mass2*lnhatz/v0 + mass1*mass1*spin1z
thetaJ = acos(Jz0 / sqrt(Jx0**2+Jy0**2+Jz0**2))
psiJ = atan2(Jy0, -Jx0) # FIXME: check that Jy0 and Jx0 are not both 0
# Rotate Lnhat back to frame where J is along z, to figure out initial alpha
rotLx = lnhatx*cos(thetaJ)*cos(psiJ) - lnhaty*cos(thetaJ)*sin(psiJ) + lnhatz*sin(thetaJ)
rotLy = lnhatx*sin(psiJ) + lnhaty*cos(psiJ)
alpha0 = atan2(rotLy, rotLx) # FIXME: check that rotLy and rotLx are not both 0
psiJ_P =psiJ + psi
psiJ_C =psiJ + psi + lal.PI/4.
#####Calculate the Coefficients#####
#quadparam = 1.
gamma0 = mass1*chi/mass2
#Calculate the spin corrections
# FIXME should use pycbc's function, but sigma has different expression
# in Andy's code, double check
# pn_beta, pn_sigma, pn_gamma = pycbc.pnutils.mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(
# mass1, mass2, chi*kappa, 0) # FIXME: spin2 is taken to be 0
pn_beta = (113.*mass1/(12.*M) - 19.*eta/6.)*chi*kappa
pn_sigma = ( (5.*(3.*kappa*kappa-1.)/2.) + (7. - kappa*kappa)/96. ) * (mass1*mass1*chi*chi/M/M)
pn_gamma = (5.*(146597. + 7056.*eta)*mass1/(2268.*M) - 10.*eta*(1276. + 153.*eta)/81.)*chi*kappa
prec_fac0 = 5.*(4. + 3.*mass2/mass1)/64.
dtdv2 = 743./336. + 11.*eta/4.
dtdv3 = -4.*lal.PI + pn_beta
dtdv4 = 3058673./1016064. + 5429.*eta/1008. + 617.*eta*eta/144. - pn_sigma
dtdv5 = (-7729./672.+13.*eta/8.)*lal.PI + 9.*pn_gamma/40.
#####Calculate the Initial Euler Angles alpha_ref, beta_ref=0 and zeta_ref#####
gam = gamma0*v0
sqrtfac = sqrt(1. + 2.*kappa*gam + gam*gam)
logv0 = log(v0)
logfac1 = log(1. + kappa*gam + sqrtfac)
logfac2 = log(kappa + gam + sqrtfac)
v02 = v0 * v0
v03 = v0 * v02
kappa2 = kappa * kappa
kappa3 = kappa2 * kappa
gamma02 = gamma0 * gamma0
gamma03 = gamma02 *gamma0
alpha_ref = prec_fac0*( logfac2 *( dtdv2*gamma0 + dtdv3*kappa - dtdv5*kappa/(2.*gamma02) + dtdv4/(2.*gamma0) - dtdv4*kappa2/(2.*gamma0) + (dtdv5*kappa3)/(2.*gamma02) ) + logfac1*( - dtdv2*gamma0*kappa - dtdv3 + kappa*gamma03/2. - gamma03*kappa3/2. ) + logv0 *( dtdv2*gamma0*kappa + dtdv3 - kappa*gamma03/2. + gamma03*kappa3/2. ) + sqrtfac *( dtdv3 + dtdv4*v0/2. + dtdv5/gamma02/3. + dtdv4*kappa/(2.*gamma0) + dtdv5*kappa*v0/(6.*gamma0) - dtdv5*kappa2/(2.*gamma02) - 1/(3.*v03) - gamma0*kappa/(6.*v02) - dtdv2/v0 - gamma02/(3.*v0) + gamma02*kappa2/(2.*v0) + dtdv5*v02/3. )) - alpha0
zeta_ref = prec_fac0*( dtdv3*gamma0*kappa*v0 + dtdv4*v0 + logfac2 *(-dtdv2*gamma0 - dtdv3*kappa + dtdv5*kappa/(2.*gamma02) - dtdv4/(2.*gamma0) + dtdv4*kappa2/(2.*gamma0) - dtdv5*kappa3/(2.*gamma02) ) + logv0 *( kappa*gamma03/2. - gamma03*kappa3/2. ) + logfac1 *( dtdv2*gamma0*kappa + dtdv3 - kappa*gamma03/2. + gamma03*kappa3/2. ) - 1/(3.*v03) - gamma0*kappa/(2.*v02) - dtdv2/v0 + dtdv4*gamma0*kappa*v02/2. + dtdv5*v02/2. + sqrtfac *( -dtdv3 - dtdv4*v0/2. - dtdv5/(3.*gamma02) - dtdv4*kappa/(2.*gamma0) - dtdv5*kappa*v0/(6.*gamma0) + dtdv5*kappa2/(2.*gamma02) + 1/(3.*v03) + gamma0*kappa/(6.*v02) + dtdv2/v0 + gamma02/(3.*v0) - gamma02*kappa2/(2.*v0) - dtdv5*v02/3. ) + dtdv5*gamma0*kappa*v03/3. )
#####Calculate the Complex sideband factors, mm=2 is first entry#####
RE_SBfac0= (1.+cos(thetaJ)**2)/2.
RE_SBfac1= sin(2.*thetaJ)
RE_SBfac2= 3.*sin(thetaJ)**2
RE_SBfac3= -sin(2.*thetaJ)
RE_SBfac4= (1.+cos(thetaJ)**2)/2.
IM_SBfac0= -cos(thetaJ)
IM_SBfac1= -2.*sin(thetaJ)
IM_SBfac2= 0.
IM_SBfac3= -2.*sin(thetaJ)
IM_SBfac4= cos(thetaJ)
#####Calculate the PN terms # FIXME replace with functions in lalsimulation #####
theta = -11831./9240.
lambdaa = -1987./3080.0
pfaN = 3.0/(128.0 * eta)
pfa2 = 5.0*(743.0/84 + 11.0 * eta)/9.0
pfa3 = -16.0*lal.PI + 4.0*pn_beta
pfa4 = 5.0*(3058.673/7.056 + 5429.0/7.0 * eta + 617.0 * eta*eta)/72.0 - \
10.0*pn_sigma
pfa5 = 5.0/9.0 * (7729.0/84.0 - 13.0 * eta) * lal.PI - pn_gamma
pfl5 = 5.0/3.0 * (7729.0/84.0 - 13.0 * eta) * lal.PI - pn_gamma * 3
pfa6 = (11583.231236531/4.694215680 - 640.0/3.0 * lal.PI * lal.PI- \
6848.0/21.0*lal.GAMMA) + \
eta * (-15335.597827/3.048192 + 2255./12. * lal.PI * \
lal.PI - 1760./3.*theta +12320./9.*lambdaa) + \
eta*eta * 76055.0/1728.0 - \
eta*eta*eta* 127825.0/1296.0
pfl6 = -6848.0/21.0
pfa7 = lal.PI * 5.0/756.0 * ( 15419335.0/336.0 + 75703.0/2.0 * eta - \
14809.0 * eta*eta)
FTaN = 32.0 * eta*eta / 5.0
FTa2 = -(12.47/3.36 + 3.5/1.2 * eta)
FTa3 = 4.0 * lal.PI
FTa4 = -(44.711/9.072 - 92.71/5.04 * eta - 6.5/1.8 * eta*eta)
FTa5 = -(81.91/6.72 + 58.3/2.4 * eta) * lal.PI
FTa6 = (664.3739519/6.9854400 + 16.0/3.0 * lal.PI*lal.PI -
17.12/1.05 * lal.GAMMA +
(4.1/4.8 * lal.PI*lal.PI - 134.543/7.776) * eta -
94.403/3.024 * eta*eta - 7.75/3.24 * eta*eta*eta)
FTl6 = -8.56/1.05
FTa7 = -(162.85/5.04 - 214.745/1.728 * eta - 193.385/3.024 * eta*eta) \
* lal.PI
dETaN = 2 * -eta/2.0
dETa1 = 2 * -(3.0/4.0 + 1.0/12.0 * eta)
dETa2 = 3 * -(27.0/8.0 - 19.0/8.0 * eta + 1./24.0 * eta*eta)
dETa3 = 4 * -(67.5/6.4 - (344.45/5.76 - 20.5/9.6 * lal.PI*lal.PI) *
eta + 15.5/9.6 * eta*eta + 3.5/518.4 * eta*eta*eta)
amp0 = -4. * mass1 * mass2 / (1.0e+06 * distance * lal.PC_SI ) * \
lal.MRSUN_SI * lal.MTSUN_SI * sqrt(lal.PI/12.0)
htildeP = FrequencySeries(zeros(n,dtype=complex128), delta_f=delta_f, copy=False)
htildeC = FrequencySeries(zeros(n,dtype=complex128), delta_f=delta_f, copy=False)
spintaylorf2_kernel(htildeP.data[kmin:kmax], htildeC.data[kmin:kmax],
kmin, phase_order, amplitude_order, delta_f, piM, pfaN,
pfa2, pfa3, pfa4, pfa5, pfl5,
pfa6, pfl6, pfa7, FTaN, FTa2,
FTa3, FTa4, FTa5, FTa6,
FTl6, FTa7, dETaN, dETa1, dETa2, dETa3,
amp0, tC, phi0,
kappa, prec_fac0, alpha_ref, zeta_ref,
dtdv2, dtdv3, dtdv4, dtdv5,
RE_SBfac0, RE_SBfac1, RE_SBfac2, RE_SBfac3, RE_SBfac4,
IM_SBfac0, IM_SBfac1, IM_SBfac2, IM_SBfac3, IM_SBfac4,
psiJ_P, psiJ_C, gamma0)
return htildeP, htildeC
| 18,361
| 43.139423
| 701
|
py
|
pycbc
|
pycbc-master/pycbc/noise/gaussian.py
|
# Copyright (C) 2012 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains functions to generate gaussian noise colored with a
noise spectrum.
"""
from pycbc import libutils
from pycbc.types import TimeSeries, zeros
from pycbc.types import complex_same_precision_as, FrequencySeries
import lal
import numpy.random
lalsimulation = libutils.import_optional('lalsimulation')
def frequency_noise_from_psd(psd, seed=None):
""" Create noise with a given psd.
Return noise coloured with the given psd. The returned noise
FrequencySeries has the same length and frequency step as the given psd.
Note that if unique noise is desired a unique seed should be provided.
Parameters
----------
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int} or None
The seed to generate the noise. If None specified,
the seed will not be reset.
Returns
--------
noise : FrequencySeriesSeries
A FrequencySeries containing gaussian noise colored by the given psd.
"""
sigma = 0.5 * (psd / psd.delta_f) ** (0.5)
if seed is not None:
numpy.random.seed(seed)
sigma = sigma.numpy()
dtype = complex_same_precision_as(psd)
not_zero = (sigma != 0)
sigma_red = sigma[not_zero]
noise_re = numpy.random.normal(0, sigma_red)
noise_co = numpy.random.normal(0, sigma_red)
noise_red = noise_re + 1j * noise_co
noise = numpy.zeros(len(sigma), dtype=dtype)
noise[not_zero] = noise_red
return FrequencySeries(noise,
delta_f=psd.delta_f,
dtype=dtype)
def noise_from_psd(length, delta_t, psd, seed=None):
""" Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
noise_ts = TimeSeries(zeros(length), delta_t=delta_t)
if seed is None:
seed = numpy.random.randint(2**32)
randomness = lal.gsl_rng("ranlux", seed)
N = int (1.0 / delta_t / psd.delta_f)
n = N//2+1
stride = N//2
if n > len(psd):
raise ValueError("PSD not compatible with requested delta_t")
psd = (psd[0:n]).lal()
psd.data.data[n-1] = 0
psd.data.data[0] = 0
segment = TimeSeries(zeros(N), delta_t=delta_t).lal()
length_generated = 0
lalsimulation.SimNoise(segment, 0, psd, randomness)
while (length_generated < length):
if (length_generated + stride) < length:
noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
else:
noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]
length_generated += stride
lalsimulation.SimNoise(segment, stride, psd, randomness)
return noise_ts
def noise_from_string(psd_name, length, delta_t, seed=None, low_frequency_cutoff=10.0):
""" Create noise from an analytic PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd_name : str
Name of the analytic PSD to use.
low_fr
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
import pycbc.psd
# We just need enough resolution to resolve lines
delta_f = 1.0 / 8
flen = int(.5 / delta_t / delta_f) + 1
psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)
return noise_from_psd(int(length), delta_t, psd, seed=seed)
| 5,192
| 30.472727
| 97
|
py
|
pycbc
|
pycbc-master/pycbc/noise/__init__.py
|
from .gaussian import noise_from_psd, noise_from_string, frequency_noise_from_psd # noqa
| 89
| 44
| 88
|
py
|
pycbc
|
pycbc-master/pycbc/noise/reproduceable.py
|
# Copyright (C) 2017 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy, pycbc.psd
from pycbc.types import TimeSeries, complex_same_precision_as
from numpy.random import RandomState
# This constant need to be constant to be able to recover identical results.
BLOCK_SAMPLES = 1638400
def block(seed, sample_rate):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
sample_rate: float
Sets the variance of the white noise
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = BLOCK_SAMPLES
rng = RandomState(seed % 2**32)
variance = sample_rate / 2
return rng.normal(size=num, scale=variance**0.5)
def normal(start, end, sample_rate=16384, seed=0):
""" Generate data with a white Gaussian (normal) distribution
Parameters
----------
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate noise
sample-rate: float
Sample rate to generate the data at. Keep constant if you want to
ensure continuity between disjoint time spans.
seed : {None, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise
"""
# This is reproduceable because we used fixed seeds from known values
block_dur = BLOCK_SAMPLES / sample_rate
s = int(numpy.floor(start / block_dur))
e = int(numpy.floor(end / block_dur))
# The data evenly divides so the last block would be superfluous
if end % block_dur == 0:
e -= 1
sv = RandomState(seed).randint(-2**50, 2**50)
data = numpy.concatenate([block(i + sv, sample_rate)
for i in numpy.arange(s, e + 1, 1)])
ts = TimeSeries(data, delta_t=1.0 / sample_rate, epoch=(s * block_dur))
return ts.time_slice(start, end)
def colored_noise(psd, start_time, end_time,
seed=0, sample_rate=16384,
low_frequency_cutoff=1.0,
filter_duration=128,
scale=1.0):
""" Create noise from a PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd : pycbc.types.FrequencySeries
PSD to color the noise
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
sample_rate: {16384, float}
The sample rate of the output data. Keep constant if you want to
ensure continuity between disjoint time spans.
low_frequency_cutof : {1.0, float}
The low frequency cutoff to pass to the PSD generation.
filter_duration : {128, float}
The duration in seconds of the coloring filter
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
psd = psd.copy()
flen = int(sample_rate / psd.delta_f) // 2 + 1
oldlen = len(psd)
psd.resize(flen)
# Want to avoid zeroes in PSD.
max_val = psd.max()
for i in range(len(psd)):
if i >= (oldlen-1):
psd.data[i] = psd[oldlen - 2]
if psd[i] == 0:
psd.data[i] = max_val
fil_len = int(filter_duration * sample_rate)
wn_dur = int(end_time - start_time) + 2 * filter_duration
if psd.delta_f >= 1. / (2.*filter_duration):
# If the PSD is short enough, this method is less memory intensive than
# resizing and then calling inverse_spectrum_truncation
psd = pycbc.psd.interpolate(psd, 1.0 / (2. * filter_duration))
# inverse_spectrum_truncation truncates the inverted PSD. To truncate
# the non-inverted PSD we give it the inverted PSD to truncate and then
# invert the output.
psd = 1. / pycbc.psd.inverse_spectrum_truncation(
1./psd,
fil_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
psd = psd.astype(complex_same_precision_as(psd))
# Zero-pad the time-domain PSD to desired length. Zeroes must be added
# in the middle, so some rolling between a resize is used.
psd = psd.to_timeseries()
psd.roll(fil_len)
psd.resize(int(wn_dur * sample_rate))
psd.roll(-fil_len)
# As time series is still mirrored the complex frequency components are
# 0. But convert to real by using abs as in inverse_spectrum_truncate
psd = psd.to_frequencyseries()
else:
psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur)
psd = 1. / pycbc.psd.inverse_spectrum_truncation(
1./psd,
fil_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[:kmin].clear()
asd = (psd.squared_norm())**0.25
del psd
white_noise = normal(start_time - filter_duration,
end_time + filter_duration,
seed=seed,
sample_rate=sample_rate)
white_noise = white_noise.to_frequencyseries()
# Here we color. Do not want to duplicate memory here though so use '*='
white_noise *= asd*scale
del asd
colored = white_noise.to_timeseries(delta_t=1.0/sample_rate)
del white_noise
return colored.time_slice(start_time, end_time)
def noise_from_string(psd_name, start_time, end_time,
seed=0,
sample_rate=16384,
low_frequency_cutoff=1.0,
filter_duration=128,
scale=1.0):
""" Create noise from an analytic PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd_name : str
Name of the analytic PSD to use.
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
sample_rate: {16384, float}
The sample rate of the output data. Keep constant if you want to
ensure continuity between disjoint time spans.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
filter_duration : {128, float}
The duration in seconds of the coloring filter
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
delta_f = 1.0 / filter_duration
flen = int(sample_rate / delta_f) // 2 + 1
psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)
return colored_noise(psd, start_time, end_time,
seed=seed,
sample_rate=sample_rate,
low_frequency_cutoff=low_frequency_cutoff,
filter_duration=filter_duration,
scale=scale)
| 8,316
| 35.964444
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/fft/backend_mkl.py
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .core import _list_available
_backend_dict = {'mkl' : 'mkl'}
_backend_list = ['mkl']
_alist, _adict = _list_available(_backend_list, _backend_dict)
mkl_backend = None
def set_backend(backend_list):
global mkl_backend
for backend in backend_list:
if backend in _alist:
mkl_backend = backend
break
def get_backend():
return _adict[mkl_backend]
set_backend(_backend_list)
| 1,197
| 30.526316
| 71
|
py
|
pycbc
|
pycbc-master/pycbc/fft/core.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
from pycbc.types import Array as _Array
from pycbc.types import TimeSeries as _TimeSeries
from pycbc.types import FrequencySeries as _FrequencySeries
# The following helper function is in this top-level module because it
# is used by the scheme-dependent files to write their version of the
# _available_backends() function. It cannot go in backend_support as
# that woulc cause circular imports
def _list_available(possible_list, possible_dict):
# It possibly is strange that we have both a list and a dict.
# The reason for this is that the name the user specfies for a
# backend, e.g. 'numpy', may be something that the fft submodule
# cannot be called, so we need a dict mapping those names to the
# actual names of the modules. However when we iterate, it must
# be in the defined order, because that represents a preference
# for which backends are most likely to be preferrable. As a
# dict is unordered, we cannot simply use its keys for this purpose.
available_list = []
available_dict = {}
for backend in possible_list:
try:
mod = __import__('pycbc.fft.' + possible_dict[backend], fromlist = ['pycbc.fft'])
available_dict.update({backend:mod})
available_list.append(backend)
except (ImportError, OSError):
pass
return available_list, available_dict
# The main purpose of the top-level module is to present a
# uniform interface for a forward and reverse FFT, independent of
# the underlying backend. We perform sanity checking here, at the
# top-level, and then don't worry about it in submodules. To
# facilitate this checking, we define dicts mapping the numpy dtype
# to the corresponding precisions and types.
def _check_fft_args(invec, outvec):
if not isinstance(invec,_Array):
raise TypeError("Input is not a PyCBC Array")
if not isinstance(outvec,_Array):
raise TypeError("Output is not a PyCBC Array")
if isinstance(invec,_TimeSeries) and not isinstance(
outvec,_FrequencySeries):
raise TypeError(
"When input is TimeSeries output must be FrequencySeries")
if isinstance(outvec,_TimeSeries) and not isinstance(
invec,_FrequencySeries):
raise TypeError(
"When output is TimeSeries input must be FrequencySeries")
if isinstance(invec,_FrequencySeries) and not isinstance(
outvec,_TimeSeries):
raise TypeError(
"When input is FrequencySeries output must be TimeSeries")
if isinstance(outvec,_FrequencySeries) and not isinstance(
invec,_TimeSeries):
raise TypeError(
"When output is FrequencySeries input must be TimeSeries")
iprec = invec.precision
oprec = outvec.precision
if iprec != oprec:
raise ValueError("Input and output precisions must agree")
itype = invec.kind
otype = outvec.kind
return [iprec,itype,otype]
def _check_fwd_args(invec, itype, outvec, otype, nbatch, size):
ilen = len(invec)
olen = len(outvec)
if nbatch < 1:
raise ValueError("nbatch must be >= 1")
if (nbatch > 1) and size is not None:
raise ValueError("When nbatch > 1, size cannot be 'None'")
if size is None:
size = ilen
inplace = (invec.ptr == outvec.ptr)
if (ilen % nbatch) != 0:
raise ValueError("Input length must be divisible by nbatch")
if (olen % nbatch) != 0:
raise ValueError("Output length must be divisible by nbatch")
if itype == 'complex' and otype == 'complex':
if (ilen/nbatch) != size:
raise ValueError("For C2C FFT, len(invec) must be nbatch*size")
if (olen/nbatch) != size:
raise ValueError("For C2C FFT, len(outvec) must be nbatch*size")
elif itype == 'real' and otype == 'complex':
if (olen/nbatch) != int(size/2 + 1):
raise ValueError("For R2C FFT, len(outvec) must be nbatch*(size/2 + 1)")
if inplace:
if (ilen/nbatch) != int(2*(size/2 + 1)):
raise ValueError("For R2C in-place FFT, len(invec) must be nbatch*2*(size/2+1)")
else:
if (ilen/nbatch) != size:
raise ValueError("For R2C out-of-place FFT, len(invec) must be nbatch*size")
else:
raise ValueError("Inconsistent dtypes for forward FFT")
def _check_inv_args(invec, itype, outvec, otype, nbatch, size):
ilen = len(invec)
olen = len(outvec)
if nbatch < 1:
raise ValueError("nbatch must be >= 1")
if (nbatch > 1) and size is None:
raise ValueError("When nbatch > 1, size cannot be 'None'")
if size is None:
size = olen
inplace = (invec.ptr == outvec.ptr)
if (ilen % nbatch) != 0:
raise ValueError("Input length must be divisible by nbatch")
if (olen % nbatch) != 0:
raise ValueError("Output length must be divisible by nbatch")
if itype == 'complex' and otype == 'complex':
if (ilen/nbatch) != size:
raise ValueError("For C2C IFFT, len(invec) must be nbatch*size")
if (olen/nbatch) != size:
raise ValueError("For C2C IFFT, len(outvec) must be nbatch*size")
elif itype == 'complex' and otype == 'real':
if (ilen/nbatch) != int(size/2 + 1):
raise ValueError("For C2R IFFT, len(invec) must be nbatch*(size/2 + 1)")
if inplace:
if (olen/nbatch) != 2*int(size/2 + 1):
raise ValueError("For C2R in-place IFFT, len(outvec) must be nbatch*2*(size/2+1)")
else:
if (olen/nbatch) != size:
raise ValueError("For C2R out-of-place IFFT, len(outvec) must be nbatch*size")
# The class-based approach requires the following:
# The classes below should serve as the parent for all schemed classes.
# In part, these classes should serve as the location for
# all documentation of the class and its methods, though that is not
# yet implemented. Perhaps something along the lines of:
#
# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance
#
# will work? Is there a better way?
#
# Unlike some other places within PyCBC, however, the __init__ method of these classes do
# nontrivial work and hence should be called inside the __init__ method of all child classes,
# before anything else.
class _BaseFFT(object):
def __init__(self, invec, outvec, nbatch, size):
_, itype, otype = _check_fft_args(invec, outvec)
_check_fwd_args(invec, itype, outvec, otype, nbatch, size)
self.forward = True
self.invec = invec
self.outvec = outvec
self.inplace = (self.invec.ptr == self.outvec.ptr)
self.nbatch = nbatch
if nbatch > 1:
self.size = size
else:
self.size = len(invec)
# Whether we are complex-to-complex or real-to-complex is determined
# by itype:
if itype == 'complex':
# Complex-to-complex case:
self.idist = self.size
self.odist = self.size
else:
# Real-to-complex case:
self.odist = int(self.size/2 + 1)
if self.inplace:
self.idist = 2*int(self.size/2 + 1)
else:
self.idist = self.size
# For a forward FFT, the length of the *input* vector is the length
# we should divide by, whether C2C or R2HC transform
if isinstance(self.invec, _TimeSeries):
self.outvec._epoch = self.invec._epoch
self.outvec._delta_f = 1.0/(self.invec._delta_t * len(self.invec))
self.scale = self.invec._delta_t
elif isinstance(self.invec, _FrequencySeries):
self.outvec._epoch = self.invec._epoch
self.outvec._delta_t = 1.0/(self.invec._delta_f * len(self.invec))
self.scale = self.invec._delta_f
def execute(self):
"""
Compute the (forward) FFT of the input vector specified at object
instantiation, putting the output into the output vector specified
at objet instantiation. The intention is that this method should
be called many times, with the contents of the input vector
changing between invocations, but not the locations in memory or
length of either input or output vector.
*Unlike* the function based API, the class based API does NOT rescale
its output by the input vector's delta_t (when input is a TimeSeries)
or delta_f (when input is a FrequencySeries).
"""
pass
class _BaseIFFT(object):
def __init__(self, invec, outvec, nbatch, size):
_, itype, otype = _check_fft_args(invec, outvec)
_check_inv_args(invec, itype, outvec, otype, nbatch, size)
self.forward = False
self.invec = invec
self.outvec = outvec
self.inplace = (self.invec.ptr == self.outvec.ptr)
self.nbatch = nbatch
if nbatch > 1:
self.size = size
else:
self.size = len(outvec)
# Whether we are complex-to-complex or complex-to-real is determined
# by otype:
if otype == 'complex':
# Complex-to-complex case:
self.idist = self.size
self.odist = self.size
else:
# Complex-to-real case:
self.idist = int(self.size/2 + 1)
if self.inplace:
self.odist = 2*int(self.size/2 + 1)
else:
self.odist = self.size
# For an inverse FFT, the length of the *output* vector is the length
# we should divide by, whether C2C or HC2R transform
if isinstance(self.invec, _TimeSeries):
self.outvec._epoch = self.invec._epoch
self.outvec._delta_f = 1.0/(self.invec._delta_t * len(self.outvec))
self.scale = self.invec._delta_t
elif isinstance(self.invec, _FrequencySeries):
self.outvec._epoch = self.invec._epoch
self.outvec._delta_t = 1.0/(self.invec._delta_f * len(self.outvec))
self.scale = self.invec._delta_f
def execute(self):
"""
Compute the (backward) FFT of the input vector specified at object
instantiation, putting the output into the output vector specified
at objet instantiation. The intention is that this method should
be called many times, with the contents of the input vector
changing between invocations, but not the locations in memory or
length of either input or output vector.
*Unlike* the function based API, the class based API does NOT rescale
its output by the input vector's delta_t (when input is a TimeSeries)
or delta_f (when input is a FrequencySeries).
"""
pass
| 11,855
| 41.647482
| 98
|
py
|
pycbc
|
pycbc-master/pycbc/fft/mkl.py
|
import ctypes, pycbc.libutils
from pycbc.types import zeros
from .core import _BaseFFT, _BaseIFFT
import pycbc.scheme as _scheme
lib = pycbc.libutils.get_ctypes_library('mkl_rt', [])
if lib is None:
raise ImportError
#MKL constants taken from mkl_df_defines.h
DFTI_FORWARD_DOMAIN = 0
DFTI_DIMENSION = 1
DFTI_LENGTHS = 2
DFTI_PRECISION = 3
DFTI_FORWARD_SCALE = 4
DFTI_BACKWARD_SCALE = 5
DFTI_NUMBER_OF_TRANSFORMS = 7
DFTI_COMPLEX_STORAGE = 8
DFTI_REAL_STORAGE = 9
DFTI_CONJUGATE_EVEN_STORAGE = 10
DFTI_PLACEMENT = 11
DFTI_INPUT_STRIDES = 12
DFTI_OUTPUT_STRIDES = 13
DFTI_INPUT_DISTANCE = 14
DFTI_OUTPUT_DISTANCE = 15
DFTI_WORKSPACE = 17
DFTI_ORDERING = 18
DFTI_TRANSPOSE = 19
DFTI_DESCRIPTOR_NAME = 20
DFTI_PACKED_FORMAT = 21
DFTI_COMMIT_STATUS = 22
DFTI_VERSION = 23
DFTI_NUMBER_OF_USER_THREADS = 26
DFTI_THREAD_LIMIT = 27
DFTI_COMMITTED = 30
DFTI_UNCOMMITTED = 31
DFTI_COMPLEX = 32
DFTI_REAL = 33
DFTI_SINGLE = 35
DFTI_DOUBLE = 36
DFTI_COMPLEX_COMPLEX = 39
DFTI_COMPLEX_REAL = 40
DFTI_REAL_COMPLEX = 41
DFTI_REAL_REAL = 42
DFTI_INPLACE = 43
DFTI_NOT_INPLACE = 44
DFTI_ORDERED = 48
DFTI_BACKWARD_SCRAMBLED = 49
DFTI_ALLOW = 51
DFTI_AVOID = 52
DFTI_NONE = 53
DFTI_CCS_FORMAT = 54
DFTI_PACK_FORMAT = 55
DFTI_PERM_FORMAT = 56
DFTI_CCE_FORMAT = 57
mkl_prec = {'single': DFTI_SINGLE,
'double': DFTI_DOUBLE,
}
mkl_domain = {'real': {'complex': DFTI_REAL},
'complex': {'real': DFTI_REAL,
'complex':DFTI_COMPLEX,
}
}
def check_status(status):
""" Check the status of a mkl functions and raise a python exeption if
there is an error.
"""
if status:
lib.DftiErrorMessage.restype = ctypes.c_char_p
msg = lib.DftiErrorMessage(status)
raise RuntimeError(msg)
def create_descriptor(size, idtype, odtype, inplace):
invec = zeros(1, dtype=idtype)
outvec = zeros(1, dtype=odtype)
desc = ctypes.c_void_p(1)
f = lib.DftiCreateDescriptor
f.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
prec = mkl_prec[invec.precision]
domain = mkl_domain[str(invec.kind)][str(outvec.kind)]
status = f(ctypes.byref(desc), prec, domain, 1, size)
if inplace:
lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
else:
lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
nthreads = _scheme.mgr.state.num_threads
status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads)
check_status(status)
lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_CCS_FORMAT)
lib.DftiCommitDescriptor(desc)
check_status(status)
return desc
def fft(invec, outvec, prec, itype, otype):
descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,
outvec.dtype, (invec.ptr == outvec.ptr))
f = lib.DftiComputeForward
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
status = f(descr, invec.ptr, outvec.ptr)
lib.DftiFreeDescriptor(ctypes.byref(descr))
check_status(status)
def ifft(invec, outvec, prec, itype, otype):
descr = create_descriptor(max(len(invec), len(outvec)), invec.dtype,
outvec.dtype, (invec.ptr == outvec.ptr))
f = lib.DftiComputeBackward
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
status = f(descr, invec.ptr, outvec.ptr)
lib.DftiFreeDescriptor(ctypes.byref(descr))
check_status(status)
# Class based API
_create_descr = lib.DftiCreateDescriptor
_create_descr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
def _get_desc(fftobj):
desc = ctypes.c_void_p(1)
prec = mkl_prec[fftobj.invec.precision]
domain = mkl_domain[str(fftobj.invec.kind)][str(fftobj.outvec.kind)]
status = _create_descr(ctypes.byref(desc), prec, domain,
1, int(fftobj.size))
check_status(status)
# Now we set various things depending on exactly what kind of transform we're
# performing.
lib.DftiSetValue.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
# The following only matters if the transform is C2R or R2C
status = lib.DftiSetValue(desc, DFTI_CONJUGATE_EVEN_STORAGE,
DFTI_COMPLEX_COMPLEX)
check_status(status)
# In-place or out-of-place:
if fftobj.inplace:
status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_INPLACE)
else:
status = lib.DftiSetValue(desc, DFTI_PLACEMENT, DFTI_NOT_INPLACE)
check_status(status)
# If we are performing a batched transform:
if fftobj.nbatch > 1:
status = lib.DftiSetValue(desc, DFTI_NUMBER_OF_TRANSFORMS, fftobj.nbatch)
check_status(status)
status = lib.DftiSetValue(desc, DFTI_INPUT_DISTANCE, fftobj.idist)
check_status(status)
status = lib.DftiSetValue(desc, DFTI_OUTPUT_DISTANCE, fftobj.odist)
check_status(status)
# Knowing how many threads will be allowed may help select a better transform
nthreads = _scheme.mgr.state.num_threads
status = lib.DftiSetValue(desc, DFTI_THREAD_LIMIT, nthreads)
check_status(status)
# Now everything's ready, so commit
status = lib.DftiCommitDescriptor(desc)
check_status(status)
return desc
class FFT(_BaseFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = lib.DftiComputeForward
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.desc = _get_desc(self)
def execute(self):
self._efunc(self.desc, self.iptr, self.optr)
class IFFT(_BaseIFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = lib.DftiComputeBackward
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.desc = _get_desc(self)
def execute(self):
self._efunc(self.desc, self.iptr, self.optr)
| 6,206
| 31.160622
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/fft/backend_support.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
import pycbc
import pycbc.scheme
# These are global variables, that are modified by the various scheme-
# dependent submodules, to maintain a list of all possible backends
# for all possible schemes that are available at runtime. This list
# and dict are then used when parsing command-line options.
_all_backends_list = []
_all_backends_dict = {}
# The following is the function called by each scheme's setup to add whatever new
# backends may have been found to the global list. Since some backends may be
# shared, we must first check to make sure that the item in the list is not already
# in the global list, and we assume that the keys to the dict are in one-to-one
# correspondence with the items in the list.
def _update_global_available(new_list, new_dict, global_list, global_dict):
for item in new_list:
if item not in global_list:
global_list.append(item)
global_dict.update({item:new_dict[item]})
def get_backend_modules():
return _all_backends_dict.values()
def get_backend_names():
return list(_all_backends_dict.keys())
BACKEND_PREFIX="pycbc.fft.backend_"
@pycbc.scheme.schemed(BACKEND_PREFIX)
def set_backend(backend_list):
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@pycbc.scheme.schemed(BACKEND_PREFIX)
def get_backend():
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
# Import all scheme-dependent backends, to get _all_backends accurate:
for scheme_name in ["cpu", "mkl", "cuda"]:
try:
mod = __import__('pycbc.fft.backend_' + scheme_name, fromlist = ['_alist', '_adict'])
_alist = getattr(mod, "_alist")
_adict = getattr(mod, "_adict")
_update_global_available(_alist, _adict, _all_backends_list,
_all_backends_dict)
except ImportError:
pass
| 3,126
| 36.22619
| 93
|
py
|
pycbc
|
pycbc-master/pycbc/fft/fftw_pruned.py
|
"""This module provides a functions to perform a pruned FFT based on FFTW
This should be considered a test and example module, as the functionality
can and should be generalized to other FFT backends, and precisions.
These functions largely implemented the generic FFT decomposition as
described rather nicely by wikipedia.
http://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm
I use a similar naming convention here, with minor simplifications to the
twiddle factors.
"""
import numpy, ctypes, pycbc.types
from pycbc.libutils import get_ctypes_library
import logging
from .fftw_pruned_cython import second_phase_cython
warn_msg = ("The FFTW_pruned module can be used to speed up computing SNR "
"timeseries by computing first at a low sample rate and then "
"computing at full sample rate only at certain samples. This code "
"has not yet been used in production, and has no test case. "
"This was also ported to Cython in this state. "
"This code would need verification before trusting results. "
"Please do contribute test cases.")
logging.warning(warn_msg)
# FFTW constants
FFTW_FORWARD = -1
FFTW_BACKWARD = 1
FFTW_MEASURE = 0
FFTW_PATIENT = 1 << 5
FFTW_ESTIMATE = 1 << 6
float_lib = get_ctypes_library('fftw3f', ['fftw3f'],mode=ctypes.RTLD_GLOBAL)
fexecute = float_lib.fftwf_execute_dft
fexecute.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ftexecute = float_lib.fftwf_execute_dft
ftexecute.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
def plan_transpose(N1, N2):
"""
Create a plan for transposing internally to the pruned_FFT calculation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the FFTW transpose.
"""
rows = N1
cols = N2
iodim = numpy.zeros(6, dtype=numpy.int32)
iodim[0] = rows
iodim[1] = 1
iodim[2] = cols
iodim[3] = cols
iodim[4] = rows
iodim[5] = 1
N = N1*N2
vin = pycbc.types.zeros(N, dtype=numpy.complex64)
vout = pycbc.types.zeros(N, dtype=numpy.complex64)
f = float_lib.fftwf_plan_guru_dft
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int]
f.restype = ctypes.c_void_p
return f(0, None, 2, iodim.ctypes.data, vin.ptr, vout.ptr, None, FFTW_MEASURE)
def plan_first_phase(N1, N2):
"""
Create a plan for the first stage of the pruned FFT operation.
(Alex to provide a write up with more details.)
Parameters
-----------
N1 : int
Number of rows.
N2 : int
Number of columns.
Returns
--------
plan : FFTWF plan
The plan for performing the first phase FFT.
"""
N = N1*N2
vin = pycbc.types.zeros(N, dtype=numpy.complex64)
vout = pycbc.types.zeros(N, dtype=numpy.complex64)
f = float_lib.fftwf_plan_many_dft
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
f.restype = ctypes.c_void_p
return f(1, ctypes.byref(ctypes.c_int(N2)), N1,
vin.ptr, None, 1, N2,
vout.ptr, None, 1, N2, FFTW_BACKWARD, FFTW_MEASURE)
_theplan = None
def first_phase(invec, outvec, N1, N2):
"""
This implements the first phase of the FFT decomposition, using
the standard FFT many plans.
Parameters
-----------
invec : array
The input array.
outvec : array
The output array.
N1 : int
Number of rows.
N2 : int
Number of columns.
"""
global _theplan
if _theplan is None:
_theplan = plan_first_phase(N1, N2)
fexecute(_theplan, invec.ptr, outvec.ptr)
def second_phase(invec, indices, N1, N2):
"""
This is the second phase of the FFT decomposition that actually performs
the pruning. It is an explicit calculation for the subset of points. Note
that there seem to be some numerical accumulation issues at various values
of N1 and N2.
Parameters
----------
invec :
The result of the first phase FFT
indices : array of ints
The index locations to calculate the FFT
N1 : int
The length of the second phase "FFT"
N2 : int
The length of the first phase FFT
Returns
-------
out : array of floats
"""
invec = numpy.array(invec.data, copy=False)
NI = len(indices) # pylint:disable=unused-variable
N1 = int(N1)
N2 = int(N2)
out = numpy.zeros(len(indices), dtype=numpy.complex64)
indices = numpy.array(indices, dtype=numpy.uint32)
# Note, the next step if this needs to be faster is to invert the loops
second_phase_cython(N1, N2, NI, indices, out, invec)
return out
_thetransposeplan = None
def fft_transpose_fftw(vec):
"""
Perform an FFT transpose from vec into outvec.
(Alex to provide more details in a write-up.)
Parameters
-----------
vec : array
Input array.
Returns
--------
outvec : array
Transposed output array.
"""
global _thetransposeplan
outvec = pycbc.types.zeros(len(vec), dtype=vec.dtype)
if _theplan is None:
N1, N2 = splay(vec)
_thetransposeplan = plan_transpose(N1, N2)
ftexecute(_thetransposeplan, vec.ptr, outvec.ptr)
return outvec
fft_transpose = fft_transpose_fftw
def splay(vec):
""" Determine two lengths to split stride the input vector by
"""
N2 = 2 ** int(numpy.log2( len(vec) ) / 2)
N1 = len(vec) / N2
return N1, N2
def pruned_c2cifft(invec, outvec, indices, pretransposed=False):
"""
Perform a pruned iFFT, only valid for power of 2 iffts as the
decomposition is easier to choose. This is not a strict requirement of the
functions, but it is unlikely to the optimal to use anything but power
of 2. (Alex to provide more details in write up.
Parameters
-----------
invec : array
The input vector. This should be the correlation between the data and
the template at full sample rate. Ideally this is pre-transposed, but
if not this will be transposed in this function.
outvec : array
The output of the first phase of the pruned FFT.
indices : array of ints
The indexes at which to calculate the full sample-rate SNR.
pretransposed : boolean, default=False
Used to indicate whether or not invec is pretransposed.
Returns
--------
SNRs : array
The complex SNRs at the indexes given by indices.
"""
N1, N2 = splay(invec)
if not pretransposed:
invec = fft_transpose(invec)
first_phase(invec, outvec, N1=N1, N2=N2)
out = second_phase(outvec, indices, N1=N1, N2=N2)
return out
| 7,186
| 29.45339
| 82
|
py
|
pycbc
|
pycbc-master/pycbc/fft/fftw.py
|
import os
from pycbc.types import zeros
import numpy as _np
import ctypes
import pycbc.scheme as _scheme
from pycbc.libutils import get_ctypes_library
from .core import _BaseFFT, _BaseIFFT
from ..types import check_aligned
# IMPORTANT NOTE TO PYCBC DEVELOPERS:
# Because this module is loaded automatically when present, and because
# no FFTW function should be called until the user has had the chance
# to set the threading backend, it is ESSENTIAL that simply loading this
# module should not actually *call* ANY functions.
# NOTE:
# When loading FFTW we use os.RTLD_DEEPBIND to avoid potential segfaults due
# to conflicts with MKL if both are present.
if hasattr(os, 'RTLD_DEEPBIND'):
FFTW_RTLD_MODE = os.RTLD_DEEPBIND
else:
FFTW_RTLD_MODE = ctypes.DEFAULT_MODE
#FFTW constants, these are pulled from fftw3.h
FFTW_FORWARD = -1
FFTW_BACKWARD = 1
FFTW_MEASURE = 0
FFTW_DESTROY_INPUT = 1 << 0
FFTW_UNALIGNED = 1 << 1
FFTW_CONSERVE_MEMORY = 1 << 2
FFTW_EXHAUSTIVE = 1 << 3
FFTW_PRESERVE_INPUT = 1 << 4
FFTW_PATIENT = 1 << 5
FFTW_ESTIMATE = 1 << 6
FFTW_WISDOM_ONLY = 1 << 21
# Load the single and double precision libraries
# We need to construct them directly with CDLL so
# we can give the RTLD_GLOBAL mode, which we must do
# in order to use the threaded libraries as well.
double_lib = get_ctypes_library('fftw3', ['fftw3'], mode=FFTW_RTLD_MODE)
float_lib = get_ctypes_library('fftw3f', ['fftw3f'], mode=FFTW_RTLD_MODE)
if (double_lib is None) or (float_lib is None):
raise ImportError("Unable to find FFTW libraries")
# Support for FFTW's two different threading backends
_fftw_threaded_lib = None
_fftw_threaded_set = False
_double_threaded_lib = None
_float_threaded_lib = None
HAVE_FFTW_THREADED = False
# Although we set the number of threads based on the scheme,
# we need a private variable that records the last value used so
# we know whether we need to call plan_with_nthreads() again.
_fftw_current_nthreads = 0
# This function sets the number of threads used internally by FFTW
# in planning. It just takes a number of threads, rather than itself
# looking at scheme.mgr.num_threads, because it should not be called
# directly, but only by functions that get the value they use from
# scheme.mgr.num_threads
def _fftw_plan_with_nthreads(nthreads):
global _fftw_current_nthreads
if not HAVE_FFTW_THREADED:
if (nthreads > 1):
raise ValueError("Threading is NOT enabled, but {0} > 1 threads specified".format(nthreads))
else:
_pycbc_current_threads = nthreads
else:
dplanwthr = _double_threaded_lib.fftw_plan_with_nthreads
fplanwthr = _float_threaded_lib.fftwf_plan_with_nthreads
dplanwthr.restype = None
fplanwthr.restype = None
dplanwthr(nthreads)
fplanwthr(nthreads)
_fftw_current_nthreads = nthreads
# This is a global dict-of-dicts used when initializing threads and
# setting the threading library
_fftw_threading_libnames = { 'unthreaded' : {'double' : None, 'float' : None},
'openmp' : {'double' : 'fftw3_omp', 'float' : 'fftw3f_omp'},
'pthreads' : {'double' : 'fftw3_threads', 'float' : 'fftw3f_threads'}}
def _init_threads(backend):
# This function actually sets the backend and initializes. It returns zero on
# success and 1 if given a valid backend but that cannot be loaded. It raises
# an exception if called after the threading backend has already been set, or
# if given an invalid backend.
global _fftw_threaded_set
global _fftw_threaded_lib
global HAVE_FFTW_THREADED
global _double_threaded_lib
global _float_threaded_lib
if _fftw_threaded_set:
raise RuntimeError(
"Threading backend for FFTW already set to {0}; cannot be changed".format(_fftw_threaded_lib))
try:
double_threaded_libname = _fftw_threading_libnames[backend]['double']
float_threaded_libname = _fftw_threading_libnames[backend]['float']
except KeyError:
raise ValueError("Backend {0} for FFTW threading does not exist!".format(backend))
if double_threaded_libname is not None:
try:
# For reasons Ian doesn't understand we should not load libgomp
# first using RTLD_DEEPBIND, so force loading it here if needed
if backend == 'openmp':
get_ctypes_library('gomp', [], mode=ctypes.DEFAULT_MODE)
# Note that the threaded libraries don't have their own pkg-config
# files we must look for them wherever we look for double or single
# FFTW itself.
_double_threaded_lib = get_ctypes_library(
double_threaded_libname,
['fftw3'],
mode=FFTW_RTLD_MODE
)
_float_threaded_lib = get_ctypes_library(
float_threaded_libname,
['fftw3f'],
mode=FFTW_RTLD_MODE
)
if (_double_threaded_lib is None) or (_float_threaded_lib is None):
err_str = 'Unable to load threaded libraries'
err_str += f'{double_threaded_libname} or '
err_str += f'{float_threaded_libname}'
raise RuntimeError(err_str)
dret = _double_threaded_lib.fftw_init_threads()
fret = _float_threaded_lib.fftwf_init_threads()
# FFTW for some reason uses *0* to indicate failure. In C.
if (dret == 0) or (fret == 0):
return 1
HAVE_FFTW_THREADED = True
_fftw_threaded_set = True
_fftw_threaded_lib = backend
return 0
except:
return 1
else:
# We get here when we were given the 'unthreaded' backend
HAVE_FFTW_THREADED = False
_fftw_threaded_set = True
_fftw_threaded_lib = backend
return 0
def set_threads_backend(backend=None):
# This is the user facing function. If given a backend it just
# calls _init_threads and lets it do the work. If not (the default)
# then it cycles in order through threaded backends,
if backend is not None:
retval = _init_threads(backend)
# Since the user specified this backend raise an exception if the above failed
if retval != 0:
raise RuntimeError("Could not initialize FFTW threading backend {0}".format(backend))
else:
# Note that we pop() from the end, so 'pthreads'
# is the first thing tried
_backend_list = ['unthreaded','openmp', 'pthreads']
while not _fftw_threaded_set:
_next_backend = _backend_list.pop()
retval = _init_threads(_next_backend)
# Function to import system-wide wisdom files.
def import_sys_wisdom():
if not _fftw_threaded_set:
set_threads_backend()
double_lib.fftw_import_system_wisdom()
float_lib.fftwf_import_system_wisdom()
# We provide an interface for changing the "measure level"
# By default this is 0, which does no planning,
# but we provide functions to read and set it
_default_measurelvl = 0
def get_measure_level():
"""
Get the current 'measure level' used in deciding how much effort to put into
creating FFTW plans. From least effort (and shortest planning time) to most
they are 0 to 3. No arguments.
"""
return _default_measurelvl
def set_measure_level(mlvl):
"""
Set the current 'measure level' used in deciding how much effort to expend
creating FFTW plans. Must be an integer from 0 (least effort, shortest time)
to 3 (most effort and time).
"""
global _default_measurelvl
if mlvl not in (0,1,2,3):
raise ValueError("Measure level can only be one of 0, 1, 2, or 3")
_default_measurelvl = mlvl
_flag_dict = {0: FFTW_ESTIMATE,
1: FFTW_MEASURE,
2: FFTW_MEASURE|FFTW_PATIENT,
3: FFTW_MEASURE|FFTW_PATIENT|FFTW_EXHAUSTIVE}
def get_flag(mlvl,aligned):
if aligned:
return _flag_dict[mlvl]
else:
return (_flag_dict[mlvl]|FFTW_UNALIGNED)
# Add the ability to read/store wisdom to filenames
def wisdom_io(filename, precision, action):
"""Import or export an FFTW plan for single or double precision.
"""
if not _fftw_threaded_set:
set_threads_backend()
fmap = {('float', 'import'): float_lib.fftwf_import_wisdom_from_filename,
('float', 'export'): float_lib.fftwf_export_wisdom_to_filename,
('double', 'import'): double_lib.fftw_import_wisdom_from_filename,
('double', 'export'): double_lib.fftw_export_wisdom_to_filename}
f = fmap[(precision, action)]
f.argtypes = [ctypes.c_char_p]
retval = f(filename.encode())
if retval == 0:
raise RuntimeError(('Could not {0} wisdom '
'from file {1}').format(action, filename))
def import_single_wisdom_from_filename(filename):
wisdom_io(filename, 'float', 'import')
def import_double_wisdom_from_filename(filename):
wisdom_io(filename, 'double', 'import')
def export_single_wisdom_to_filename(filename):
wisdom_io(filename, 'float', 'export')
def export_double_wisdom_to_filename(filename):
wisdom_io(filename, 'double', 'export')
def set_planning_limit(time):
if not _fftw_threaded_set:
set_threads_backend()
f = double_lib.fftw_set_timelimit
f.argtypes = [ctypes.c_double]
f(time)
f = float_lib.fftwf_set_timelimit
f.argtypes = [ctypes.c_double]
f(time)
# Create function maps for the dtypes
plan_function = {'float32': {'complex64': float_lib.fftwf_plan_dft_r2c_1d},
'float64': {'complex128': double_lib.fftw_plan_dft_r2c_1d},
'complex64': {'float32': float_lib.fftwf_plan_dft_c2r_1d,
'complex64': float_lib.fftwf_plan_dft_1d},
'complex128': {'float64': double_lib.fftw_plan_dft_c2r_1d,
'complex128': double_lib.fftw_plan_dft_1d}
}
execute_function = {'float32': {'complex64': float_lib.fftwf_execute_dft_r2c},
'float64': {'complex128': double_lib.fftw_execute_dft_r2c},
'complex64': {'float32': float_lib.fftwf_execute_dft_c2r,
'complex64': float_lib.fftwf_execute_dft},
'complex128': {'float64': double_lib.fftw_execute_dft_c2r,
'complex128': double_lib.fftw_execute_dft}
}
def plan(size, idtype, odtype, direction, mlvl, aligned, nthreads, inplace):
if not _fftw_threaded_set:
set_threads_backend()
if nthreads != _fftw_current_nthreads:
_fftw_plan_with_nthreads(nthreads)
# Convert a measure-level to flags
flags = get_flag(mlvl,aligned)
# We make our arrays of the necessary type and size. Things can be
# tricky, especially for in-place transforms with one of input or
# output real.
if (idtype == odtype):
# We're in the complex-to-complex case, so lengths are the same
ip = zeros(size, dtype=idtype)
if inplace:
op = ip
else:
op = zeros(size, dtype=odtype)
elif (idtype.kind == 'c') and (odtype.kind == 'f'):
# Complex-to-real (reverse), so size is length of real array.
# However the complex array may be larger (in bytes) and
# should therefore be allocated first and reused for an in-place
# transform
ip = zeros(size/2+1, dtype=idtype)
if inplace:
op = ip.view(dtype=odtype)[0:size]
else:
op = zeros(size, dtype=odtype)
else:
# Real-to-complex (forward), and size is still that of real.
# However it is still true that the complex array may be larger
# (in bytes) and should therefore be allocated first and reused
# for an in-place transform
op = zeros(size/2+1, dtype=odtype)
if inplace:
ip = op.view(dtype=idtype)[0:size]
else:
ip = zeros(size, dtype=idtype)
# Get the plan function
idtype = _np.dtype(idtype)
odtype = _np.dtype(odtype)
f = plan_function[str(idtype)][str(odtype)]
f.restype = ctypes.c_void_p
# handle the C2C cases (forward and reverse)
if idtype.kind == odtype.kind:
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int]
theplan = f(size, ip.ptr, op.ptr, direction, flags)
# handle the R2C and C2R case
else:
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int]
theplan = f(size, ip.ptr, op.ptr, flags)
# We don't need ip or op anymore
del ip, op
# Make the destructors
if idtype.char in ['f', 'F']:
destroy = float_lib.fftwf_destroy_plan
else:
destroy = double_lib.fftw_destroy_plan
destroy.argtypes = [ctypes.c_void_p]
return theplan, destroy
# Note that we don't need to check whether we've set the threading backend
# in the following functions, since execute is not called directly and
# the fft and ifft will call plan first.
def execute(plan, invec, outvec):
f = execute_function[str(invec.dtype)][str(outvec.dtype)]
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
f(plan, invec.ptr, outvec.ptr)
def fft(invec, outvec, prec, itype, otype):
theplan, destroy = plan(len(invec), invec.dtype, outvec.dtype, FFTW_FORWARD,
get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)),
_scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr))
execute(theplan, invec, outvec)
destroy(theplan)
def ifft(invec, outvec, prec, itype, otype):
theplan, destroy = plan(len(outvec), invec.dtype, outvec.dtype, FFTW_BACKWARD,
get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)),
_scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr))
execute(theplan, invec, outvec)
destroy(theplan)
# Class based API
# First, set up a lot of different ctypes functions:
plan_many_c2c_f = float_lib.fftwf_plan_many_dft
plan_many_c2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_uint]
plan_many_c2c_f.restype = ctypes.c_void_p
plan_many_c2c_d = double_lib.fftw_plan_many_dft
plan_many_c2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_uint]
plan_many_c2c_d.restype = ctypes.c_void_p
plan_many_c2r_f = float_lib.fftwf_plan_many_dft_c2r
plan_many_c2r_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_c2r_f.restype = ctypes.c_void_p
plan_many_c2r_d = double_lib.fftw_plan_many_dft_c2r
plan_many_c2r_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_c2r_d.restype = ctypes.c_void_p
plan_many_r2c_f = float_lib.fftwf_plan_many_dft_r2c
plan_many_r2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_r2c_f.restype = ctypes.c_void_p
plan_many_r2c_d = double_lib.fftw_plan_many_dft_r2c
plan_many_r2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_r2c_d.restype = ctypes.c_void_p
# Now set up a dictionary indexed by (str(input_dtype), str(output_dtype)) to
# translate input and output dtypes into the correct planning function.
_plan_funcs_dict = { ('complex64', 'complex64') : plan_many_c2c_f,
('float32', 'complex64') : plan_many_r2c_f,
('complex64', 'float32') : plan_many_c2r_f,
('complex128', 'complex128') : plan_many_c2c_d,
('float64', 'complex128') : plan_many_r2c_d,
('complex128', 'float64') : plan_many_c2r_d }
# To avoid multiple-inheritance, we set up a function that returns much
# of the initialization that will need to be handled in __init__ of both
# classes.
def _fftw_setup(fftobj):
n = _np.asarray([fftobj.size], dtype=_np.int32)
inembed = _np.asarray([len(fftobj.invec)], dtype=_np.int32)
onembed = _np.asarray([len(fftobj.outvec)], dtype=_np.int32)
nthreads = _scheme.mgr.state.num_threads
if not _fftw_threaded_set:
set_threads_backend()
if nthreads != _fftw_current_nthreads:
_fftw_plan_with_nthreads(nthreads)
mlvl = get_measure_level()
aligned = check_aligned(fftobj.invec.data) and check_aligned(fftobj.outvec.data)
flags = get_flag(mlvl, aligned)
plan_func = _plan_funcs_dict[ (str(fftobj.invec.dtype), str(fftobj.outvec.dtype)) ]
tmpin = zeros(len(fftobj.invec), dtype = fftobj.invec.dtype)
tmpout = zeros(len(fftobj.outvec), dtype = fftobj.outvec.dtype)
# C2C
if fftobj.outvec.kind == 'complex' and fftobj.invec.kind == 'complex':
if fftobj.forward:
ffd = FFTW_FORWARD
else:
ffd = FFTW_BACKWARD
plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
ffd, flags)
# R2C or C2R (hence no direction argument for plan creation)
else:
plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
flags)
del tmpin
del tmpout
return plan
class FFT(_BaseFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)]
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.plan = _fftw_setup(self)
def execute(self):
self._efunc(self.plan, self.iptr, self.optr)
class IFFT(_BaseIFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)]
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.plan = _fftw_setup(self)
def execute(self):
self._efunc(self.plan, self.iptr, self.optr)
def insert_fft_options(optgroup):
"""
Inserts the options that affect the behavior of this backend
Parameters
----------
optgroup: fft_option
OptionParser argument group whose options are extended
"""
optgroup.add_argument("--fftw-measure-level",
help="Determines the measure level used in planning "
"FFTW FFTs; allowed values are: " + str([0,1,2,3]),
type=int, default=_default_measurelvl)
optgroup.add_argument("--fftw-threads-backend",
help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use",
default=None)
optgroup.add_argument("--fftw-input-float-wisdom-file",
help="Filename from which to read single-precision wisdom",
default=None)
optgroup.add_argument("--fftw-input-double-wisdom-file",
help="Filename from which to read double-precision wisdom",
default=None)
optgroup.add_argument("--fftw-output-float-wisdom-file",
help="Filename to which to write single-precision wisdom",
default=None)
optgroup.add_argument("--fftw-output-double-wisdom-file",
help="Filename to which to write double-precision wisdom",
default=None)
optgroup.add_argument("--fftw-import-system-wisdom",
help = "If given, call fftw[f]_import_system_wisdom()",
action = "store_true")
def verify_fft_options(opt,parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if opt.fftw_measure_level not in [0,1,2,3]:
parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level))
if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None)
or (opt.fftw_input_double_wisdom_file is not None)):
parser.error("If --fftw-import-system-wisdom is given, then you cannot give"
" either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file")
if opt.fftw_threads_backend is not None:
if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']:
parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'")
def from_cli(opt):
# Since opt.fftw_threads_backend defaults to None, the following is always
# appropriate:
set_threads_backend(opt.fftw_threads_backend)
# Set the user-provided measure level
set_measure_level(opt.fftw_measure_level)
| 22,788
| 41.201852
| 109
|
py
|
pycbc
|
pycbc-master/pycbc/fft/backend_cpu.py
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .core import _list_available
_backend_dict = {'fftw' : 'fftw',
'mkl' : 'mkl',
'numpy' : 'npfft'}
_backend_list = ['mkl', 'fftw', 'numpy']
_alist, _adict = _list_available(_backend_list, _backend_dict)
cpu_backend = None
def set_backend(backend_list):
global cpu_backend
for backend in backend_list:
if backend in _alist:
cpu_backend = backend
break
def get_backend():
return _adict[cpu_backend]
set_backend(_backend_list)
| 1,285
| 30.365854
| 71
|
py
|
pycbc
|
pycbc-master/pycbc/fft/cuda_pyfft.py
|
# Copyright (C) 2012 Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides the pyfft backend of the fast Fourier transform
for the PyCBC package.
"""
import pycbc.scheme
from pyfft.cuda import Plan
_plans = {}
#These dicts need to be cleared before the cuda context is destroyed
def _clear_plan_dict():
_plans.clear()
pycbc.scheme.register_clean_cuda(_clear_plan_dict)
#itype and otype are actual dtypes here, not strings
def _get_plan(itype,otype,inlen):
try:
theplan = _plans[(itype,otype,inlen)]
except KeyError:
theplan = Plan(inlen,dtype = itype,normalize=False,fast_math=True)
_plans.update({(itype,otype,inlen) : theplan })
return theplan
def fft(invec,outvec,prec,itype,otype):
if itype =='complex' and otype == 'complex':
pyplan=_get_plan(invec.dtype, outvec.dtype, len(invec))
pyplan.execute(invec.data,outvec.data)
elif itype=='real' and otype=='complex':
raise NotImplementedError("Only Complex to Complex FFTs for pyfft currently.")
def ifft(invec,outvec,prec,itype,otype):
if itype =='complex' and otype == 'complex':
pyplan=_get_plan(invec.dtype,outvec.dtype,len(invec))
pyplan.execute(invec.data,outvec.data,inverse=True)
elif itype=='complex' and otype=='real':
raise NotImplementedError("Only Complex to Complex IFFTs for pyfft currently.")
| 2,283
| 33.089552
| 87
|
py
|
pycbc
|
pycbc-master/pycbc/fft/parser_support.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
from .backend_support import get_backend_modules, get_backend_names
from .backend_support import set_backend, get_backend
# Next we add all of the machinery to set backends and their options
# from the command line.
def insert_fft_option_group(parser):
"""
Adds the options used to choose an FFT backend. This should be used
if your program supports the ability to select the FFT backend; otherwise
you may simply call the fft and ifft functions and rely on default
choices. This function will also attempt to add any options exported
by available backends through a function called insert_fft_options.
These submodule functions should take the fft_group object as argument.
Parameters
----------
parser : object
OptionParser instance
"""
fft_group = parser.add_argument_group("Options for selecting the"
" FFT backend and controlling its performance"
" in this program.")
# We have one argument to specify the backends. This becomes the default list used
# if none is specified for a particular call of fft() of ifft(). Note that this
# argument expects a *list* of inputs, as indicated by the nargs='*'.
fft_group.add_argument("--fft-backends",
help="Preference list of the FFT backends. "
"Choices are: \n" + str(get_backend_names()),
nargs='*', default=[])
for backend in get_backend_modules():
try:
backend.insert_fft_options(fft_group)
except AttributeError:
pass
def verify_fft_options(opt, parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if len(opt.fft_backends) > 0:
_all_backends = get_backend_names()
for backend in opt.fft_backends:
if backend not in _all_backends:
parser.error("Backend {0} is not available".format(backend))
for backend in get_backend_modules():
try:
backend.verify_fft_options(opt, parser)
except AttributeError:
pass
# The following function is the only one that is designed
# only to work with the active scheme. We'd like to fix that,
# eventually, but it's non-trivial because of how poorly MKL
# and FFTW cooperate.
def from_cli(opt):
"""Parses the command line options and sets the FFT backend
for each (available) scheme. Aside from setting the default
backed for this context, this function will also call (if
it exists) the from_cli function of the specified backends in
the *current* scheme; typically one would only call this function
once inside of a scheme context manager, but if it is desired
to perform FFTs both inside and outside of a context, then
this function would need to be called again.
Parameters
----------
opt: object
Result of parsing the CLI with OptionParser, or any object with
the required attributes.
Returns
"""
set_backend(opt.fft_backends)
# Eventually, we need to be able to parse command lines
# from more than just the current scheme's preference. But
# the big problem is that calling from_cli for more than one
# backend could cause interference; apparently, FFTW and MKL
# don't play nice unless FFTW has been compiled and linked
# with icc (and possibly numpy, scipy, and/or Python as well?)
backend = get_backend()
try:
backend.from_cli(opt)
except AttributeError:
pass
| 4,879
| 36.829457
| 88
|
py
|
pycbc
|
pycbc-master/pycbc/fft/func_api.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
from pycbc.types import TimeSeries as _TimeSeries
from pycbc.types import FrequencySeries as _FrequencySeries
from .core import _check_fft_args, _check_fwd_args, _check_inv_args
from .backend_support import get_backend
def fft(invec, outvec):
""" Fourier transform from invec to outvec.
Perform a fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output.
"""
prec, itype, otype = _check_fft_args(invec, outvec)
_check_fwd_args(invec, itype, outvec, otype, 1, None)
# The following line is where all the work is done:
backend = get_backend()
backend.fft(invec, outvec, prec, itype, otype)
# For a forward FFT, the length of the *input* vector is the length
# we should divide by, whether C2C or R2HC transform
if isinstance(invec, _TimeSeries):
outvec._epoch = invec._epoch
outvec._delta_f = 1.0/(invec._delta_t * len(invec))
outvec *= invec._delta_t
elif isinstance(invec, _FrequencySeries):
outvec._epoch = invec._epoch
outvec._delta_t = 1.0/(invec._delta_f * len(invec))
outvec *= invec._delta_f
def ifft(invec, outvec):
""" Inverse fourier transform from invec to outvec.
Perform an inverse fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output.
"""
prec, itype, otype = _check_fft_args(invec, outvec)
_check_inv_args(invec, itype, outvec, otype, 1, None)
# The following line is where all the work is done:
backend = get_backend()
backend.ifft(invec, outvec, prec, itype, otype)
# For an inverse FFT, the length of the *output* vector is the length
# we should divide by, whether C2C or HC2R transform
if isinstance(invec, _TimeSeries):
outvec._epoch = invec._epoch
outvec._delta_f = 1.0/(invec._delta_t * len(outvec))
outvec *= invec._delta_t
elif isinstance(invec,_FrequencySeries):
outvec._epoch = invec._epoch
outvec._delta_t = 1.0/(invec._delta_f * len(outvec))
outvec *= invec._delta_f
| 3,470
| 35.925532
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/fft/backend_cuda.py
|
# Copyright (C) 2014 Josh Willis
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import pycbc
from .core import _list_available
_backend_dict = {'cuda' : 'cufft',
'pyfft' : 'cuda_pyfft'}
_backend_list = ['cuda','pyfft']
_alist = []
_adict = {}
if pycbc.HAVE_CUDA:
_alist, _adict = _list_available(_backend_list,_backend_dict)
cuda_backend = None
def set_backend(backend_list):
global cuda_backend
for backend in backend_list:
if backend in _alist:
cuda_backend = backend
break
def get_backend():
return _adict[cuda_backend]
set_backend(_backend_list)
| 1,315
| 28.909091
| 71
|
py
|
pycbc
|
pycbc-master/pycbc/fft/fft_callback.py
|
#!/usr/bin/python
import os, subprocess, ctypes
from mako.template import Template
full_corr = """
__device__ cufftComplex in_call(void* input, size_t offset,
void* caller_info, void* shared) {
cufftComplex r;
cufftComplex* hp = ((cufftComplex*) callback_params.htilde);
cufftComplex h = hp[offset];
cufftComplex s = ((cufftComplex*) input)[offset];
r.x = h.x * s.x + h.y * s.y;
r.y = h.x * s.y - h.y * s.x;
return r;
}
"""
zero_corr = """
__device__ cufftComplex in_call(void* input, size_t offset,
void* caller_info, void* shared) {
if (offset >= callback_params.in_kmax)
return (cufftComplex){0, 0};
else{
cufftComplex r;
cufftComplex s = ((cufftComplex*) input)[offset];
cufftComplex* hp = ((cufftComplex*) callback_params.htilde);
cufftComplex h = hp[offset];
r.x = h.x * s.x + h.y * s.y;
r.y = h.x * s.y - h.y * s.x;
return r;
}
}
"""
zero_out = """
__device__ void out_call(void *out, size_t offset, cufftComplex element,
void *caller_info, void *shared){
if (offset > callback_params.out_kmin && offset < callback_params.out_kmax)
((cufftComplex*) out)[offset] = element;
}
"""
copy_callback = """
__device__ cufftComplex correlate(void* input, size_t offset,
void* caller_info, void* shared) {
return ((cufftComplex*)input)[offset];
}
"""
copy_out = """
__device__ void out_call(void *out, size_t offset, cufftComplex element,
void *caller_info, void *shared){
((cufftComplex*) out)[offset] = element;
}
"""
real_out = """
__device__ void out_call(void *out, size_t offset, cufftComplex element,
void *caller_info, void *shared){
((cufftReal*) out)[offset] = element.x * element.x + element.y * element.y;
}
"""
copy_out_fp16 = """
__device__ void out_call(void *out, size_t offset, cufftComplex element,
void *caller_info, void *shared){
((short*) out)[offset*2] = __float2half_rn(element.x);
((short*) out)[offset*2+1] = __float2half_rn(element.y);
}
"""
no_out = """
__device__ void out_call(void *out, size_t offset, cufftComplex element,
void *caller_info, void *shared){
}
"""
fftsrc = Template("""
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
typedef struct {
%for t, n in parameters:
${t} ${n};
%endfor
} param_t;
__constant__ param_t callback_params;
#define checkCudaErrors(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
template <typename T>
inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
{
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \\"%s\\" \\n",
file, line, (unsigned int)code, func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
% if input_callback:
${input_callback}
__device__ cufftCallbackLoadC input_callback = in_call;
% endif
% if output_callback:
${output_callback}
__device__ cufftCallbackStoreC output_callback = out_call;
% endif
extern "C" cufftHandle* create_plan(unsigned int size){
cufftHandle* plan = new cufftHandle;
size_t work_size;
cufftCreate(plan);
checkCudaErrors(cufftMakePlan1d(*plan, size, CUFFT_C2C, 1, &work_size));
% if input_callback:
cufftCallbackLoadC h_input_callback;
checkCudaErrors(cudaMemcpyFromSymbol(&h_input_callback, input_callback,
sizeof(h_input_callback)));
checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_input_callback,
CUFFT_CB_LD_COMPLEX, 0));
% endif
% if output_callback:
cufftCallbackStoreC h_output_callback;
checkCudaErrors(cudaMemcpyFromSymbol(&h_output_callback, output_callback,
sizeof(h_output_callback)));
checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_output_callback,
CUFFT_CB_ST_COMPLEX, 0));
% endif
return plan;
}
extern "C" void execute(cufftHandle* plan, cufftComplex* in, cufftComplex* out, param_t* p){
if (p != NULL)
checkCudaErrors(cudaMemcpyToSymbolAsync(callback_params, p, sizeof(param_t), 0, cudaMemcpyHostToDevice, 0));
checkCudaErrors(cufftExecC2C(*plan, in, out, CUFFT_INVERSE));
}
""")
def compile(source, name):
""" Compile the string source code into a shared object linked against
the static version of cufft for callback support.
"""
# If we start using this again, we should find a better place for the cache
cache = os.path.join('/tmp', name)
hash_file = cache + ".hash"
lib_file = cache + ".so"
obj_file = cache + ".o"
try:
if int(open(hash_file, "r").read()) == hash(source):
return lib_file
raise ValueError
except:
pass
src_file = cache + ".cu"
fsrc = open(src_file, "w")
fsrc.write(source)
fsrc.close()
cmd = ["nvcc", "-ccbin", "g++", "-dc", "-m64",
"--compiler-options", "'-fPIC'",
"-o", obj_file,
"-c", src_file]
print(" ".join(cmd))
subprocess.check_call(cmd)
cmd = ["nvcc", "-shared", "-ccbin", "g++", "-m64",
"-o", lib_file, obj_file, "-lcufft_static", "-lculibos"]
print(" ".join(cmd))
subprocess.check_call(cmd)
hash_file = cache + ".hash"
fhash = open(hash_file, "w")
fhash.write(str(hash(source)))
return lib_file
def get_fn_plan(callback=None, out_callback=None, name='pycbc_cufft', parameters=None):
""" Get the IFFT execute and plan functions
"""
if parameters is None:
parameters = []
source = fftsrc.render(input_callback=callback, output_callback=out_callback, parameters=parameters)
path = compile(source, name)
lib = ctypes.cdll.LoadLibrary(path)
fn = lib.execute
fn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
plan = lib.create_plan
plan.restype = ctypes.c_void_p
plan.argyptes = [ctypes.c_uint]
return fn, plan
_plans = {}
class param(ctypes.Structure):
_fields_ = [("htilde", ctypes.c_void_p)]
hparam = param()
def c2c_correlate_ifft(htilde, stilde, outvec):
key = 'cnf'
if key not in _plans:
fn, pfn = get_fn_plan(callback=full_corr, parameters = [("void*", "htilde")])
plan = pfn(len(outvec), int(htilde.data.gpudata))
_plans[key] = (fn, plan, int(htilde.data.gpudata))
fn, plan, _ = _plans[key]
hparam.htilde = htilde.data.gpudata
fn(plan, int(stilde.data.gpudata), int(outvec.data.gpudata), ctypes.pointer(hparam))
class param2(ctypes.Structure):
_fields_ = [("htilde", ctypes.c_void_p),
("in_kmax", ctypes.c_uint),
("out_kmin", ctypes.c_uint),
("out_kmax", ctypes.c_uint)]
hparam_zeros = param2()
def c2c_half_correlate_ifft(htilde, stilde, outvec):
key = 'cn'
if key not in _plans:
fn, pfn = get_fn_plan(callback=zero_corr,
parameters = [("void*", "htilde"),
("unsigned int", "in_kmax"),
("unsigned int", "out_kmin"),
("unsigned int", "out_kmax")],
out_callback=zero_out)
plan = pfn(len(outvec), int(htilde.data.gpudata))
_plans[key] = (fn, plan, int(htilde.data.gpudata))
fn, plan, _ = _plans[key]
hparam_zeros.htilde = htilde.data.gpudata
hparam_zeros.in_kmax = htilde.end_idx
hparam_zeros.out_kmin = stilde.analyze.start
hparam_zeros.out_kmax = stilde.analyze.stop
fn(plan, int(stilde.data.gpudata), int(outvec.data.gpudata), ctypes.pointer(hparam_zeros))
| 8,478
| 31.737452
| 121
|
py
|
pycbc
|
pycbc-master/pycbc/fft/class_api.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
from .backend_support import get_backend
def _fft_factory(invec, outvec, nbatch=1, size=None):
backend = get_backend()
cls = getattr(backend, 'FFT')
return cls
def _ifft_factory(invec, outvec, nbatch=1, size=None):
backend = get_backend()
cls = getattr(backend, 'IFFT')
return cls
class FFT(object):
""" Create a forward FFT engine
Parameters
----------
invec : complex64 or float32
Input pycbc.types.Array (or subclass); its FFT will be computed
outvec : complex64
Output pycbc.types.Array (or subclass); it will hold the FFT of invec
nbatch : int (default 1)
When not one, specifies that invec and outvec should each be interpreted
as nbatch distinct vectors. The total length of invec and outvec should
then be that appropriate to a single vector, multiplied by nbatch
size : int (default None)
When nbatch is not 1, this parameter gives the logical size of each
transform. If nbatch is 1 (the default) this can be None, and the
logical size is the length of invec.
The addresses in memory of both vectors should be divisible by
pycbc.PYCBC_ALIGNMENT.
"""
def __new__(cls, *args, **kwargs):
real_cls = _fft_factory(*args, **kwargs)
return real_cls(*args, **kwargs)
class IFFT(object):
""" Create a reverse FFT engine
Parameters
----------
invec : complex64
Input pycbc.types.Array (or subclass); its IFFT will be computed
outvec : complex64 or float32
Output pycbc.types.Array (or subclass); it will hold the IFFT of invec
nbatch : int (default 1)
When not one, specifies that invec and outvec should each be interpreted
as nbatch distinct vectors. The total length of invec and outvec should
then be that appropriate to a single vector, multiplied by nbatch
size : int (default None)
When nbatch is not 1, this parameter gives the logical size of each
transform. If nbatch is 1 (the default) this can be None, and the
logical size is the length of outvec.
The addresses in memory of both vectors should be divisible by
pycbc.PYCBC_ALIGNMENT.
"""
def __new__(cls, *args, **kwargs):
real_cls = _ifft_factory(*args, **kwargs)
return real_cls(*args, **kwargs)
| 3,391
| 36.274725
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/fft/__init__.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from .parser_support import insert_fft_option_group, verify_fft_options, from_cli
from .func_api import fft, ifft
from .class_api import FFT, IFFT
from .backend_support import get_backend_names
| 952
| 44.380952
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/fft/npfft.py
|
# Copyright (C) 2012 Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides the numpy backend of the fast Fourier transform
for the PyCBC package.
"""
import logging
import numpy.fft
from .core import _check_fft_args
from .core import _BaseFFT, _BaseIFFT
_INV_FFT_MSG = ("I cannot perform an {} between data with an input type of "
"{} and an output type of {}")
def fft(invec, outvec, _, itype, otype):
if invec.ptr == outvec.ptr:
raise NotImplementedError("numpy backend of pycbc.fft does not "
"support in-place transforms")
if itype == 'complex' and otype == 'complex':
outvec.data[:] = numpy.asarray(numpy.fft.fft(invec.data),
dtype=outvec.dtype)
elif itype == 'real' and otype == 'complex':
outvec.data[:] = numpy.asarray(numpy.fft.rfft(invec.data),
dtype=outvec.dtype)
else:
raise ValueError(_INV_FFT_MSG.format("FFT", itype, otype))
def ifft(invec, outvec, _, itype, otype):
if invec.ptr == outvec.ptr:
raise NotImplementedError("numpy backend of pycbc.fft does not "
"support in-place transforms")
if itype == 'complex' and otype == 'complex':
outvec.data[:] = numpy.asarray(numpy.fft.ifft(invec.data),
dtype=outvec.dtype)
outvec *= len(outvec)
elif itype == 'complex' and otype == 'real':
outvec.data[:] = numpy.asarray(numpy.fft.irfft(invec.data,len(outvec)),
dtype=outvec.dtype)
outvec *= len(outvec)
else:
raise ValueError(_INV_FFT_MSG.format("IFFT", itype, otype))
WARN_MSG = ("You are using the class-based PyCBC FFT API, with the numpy "
"backed. This is provided for convenience only. If performance is "
"important use the class-based API with one of the other backends "
"(for e.g. MKL or FFTW)")
class FFT(_BaseFFT):
"""
Class for performing FFTs via the numpy interface.
"""
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
logging.warning(WARN_MSG)
self.prec, self.itype, self.otype = _check_fft_args(invec, outvec)
def execute(self):
fft(self.invec, self.outvec, self.prec, self.itype, self.otype)
class IFFT(_BaseIFFT):
"""
Class for performing IFFTs via the numpy interface.
"""
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
logging.warning(WARN_MSG)
self.prec, self.itype, self.otype = _check_fft_args(invec, outvec)
def execute(self):
ifft(self.invec, self.outvec, self.prec, self.itype, self.otype)
| 3,765
| 37.824742
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/fft/cufft.py
|
# Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides the cufft backend of the fast Fourier transform
for the PyCBC package.
"""
import pycbc.scheme
# The following is a hack, to ensure that any error in importing
# cufft is treated as the module being unavailable at runtime.
# Ideally, the real error and its traceback would be appended to
# the ImportError we raise here. But the method to do that is very
# different between python 2 and python 3.
try:
import skcuda.fft as cu_fft
except:
raise ImportError("Unable to import skcuda.fft; try direct import"
" to get full traceback")
from .core import _BaseFFT, _BaseIFFT
_forward_plans = {}
_reverse_plans = {}
#These dicts need to be cleared before the cuda context is destroyed
def _clear_plan_dicts():
_forward_plans.clear()
_reverse_plans.clear()
pycbc.scheme.register_clean_cuda(_clear_plan_dicts)
#itype and otype are actual dtypes here, not strings
def _get_fwd_plan(itype, otype, inlen, batch=1):
try:
theplan = _forward_plans[(itype, otype, inlen, batch)]
except KeyError:
theplan = cu_fft.Plan((inlen,), itype, otype, batch=batch)
_forward_plans.update({(itype, otype, inlen) : theplan })
return theplan
#The complex to real plan wants the actual size, not the N/2+1
#That's why the inverse plans use the outvec length, instead of the invec
def _get_inv_plan(itype, otype, outlen, batch=1):
try:
theplan = _reverse_plans[(itype, otype, outlen, batch)]
except KeyError:
theplan = cu_fft.Plan((outlen,), itype, otype, batch=batch)
_reverse_plans.update({(itype, otype, outlen) : theplan })
return theplan
def fft(invec, outvec, prec, itype, otype):
cuplan = _get_fwd_plan(invec.dtype, outvec.dtype, len(invec))
cu_fft.fft(invec.data, outvec.data, cuplan)
def ifft(invec, outvec, prec, itype, otype):
cuplan = _get_inv_plan(invec.dtype, outvec.dtype, len(outvec))
cu_fft.ifft(invec.data, outvec.data, cuplan)
class FFT(_BaseFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
self.plan = _get_fwd_plan(invec.dtype, outvec.dtype, len(invec), batch=nbatch)
self.invec = invec.data
self.outvec = outvec.data
def execute(self):
cu_fft.fft(self.invec, self.outvec, self.plan)
class IFFT(_BaseIFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
self.plan = _get_inv_plan(invec.dtype, outvec.dtype, len(outvec), batch=nbatch)
self.invec = invec.data
self.outvec = outvec.data
def execute(self):
cu_fft.ifft(self.invec, self.outvec, self.plan)
| 3,710
| 35.029126
| 87
|
py
|
pycbc
|
pycbc-master/pycbc/inject/__init__.py
|
from pycbc.inject.injfilterrejector import *
from pycbc.inject.inject import *
| 79
| 25.666667
| 44
|
py
|
pycbc
|
pycbc-master/pycbc/inject/injfilterrejector.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This module contains functions to filter injections with only useful templates.
This module implements a set of checks to test for each segment and template
combination whether injections contained within the segment are sufficiently
"similar" to the template to require a matched-filter. There are a few ways of
testing the "similarity" of templates and injections.
* A chirp time threshold rejects templates if chirp time difference is large
* A coarse match threshold rejects templates if a coarse overlap is small
"""
import numpy as np
from pycbc import DYN_RANGE_FAC
from pycbc.filter import match
from pycbc.pnutils import nearest_larger_binary_number
from pycbc.pnutils import mass1_mass2_to_tau0_tau3
from pycbc.types import FrequencySeries, zeros
from pycbc.types import MultiDetOptionAction
_injfilterrejector_group_help = \
("Options that, if injections are present in "
"this run, are responsible for performing pre-checks between injections "
"in the data being filtered and the current search template to determine "
"if the template has any chance of actually detecting the injection. "
"The parameters of this test are given by the various options below. "
"The --injection-filter-rejector-chirp-time-window and "
"--injection-filter-rejector-match-threshold options need to be provided "
"if those tests are desired. Other options will take default values "
"unless overriden. More details on these options follow.")
_injfilterer_cthresh_help = \
("If this value is not None and an "
"injection file is given then we will calculate the difference in "
"chirp time (tau_0) between the template and each injection in the "
"analysis segment. If the difference is greate than this threshold for "
"all injections then filtering is not performed. By default this will "
"be None.")
_injfilterer_mthresh_help = \
("If this value is not None and an "
"injection file is provided then we will calculate a 'coarse match' "
"between the template and each injection in the analysis segment. If the "
"match is less than this threshold for all injections then filtering is "
"not performed. Parameters for the 'coarse match' follow. By default "
"this value will be None.")
_injfilterer_deltaf_help = \
("If injections are present and a match threshold is "
"provided, this option specifies the frequency spacing that will be used "
"for injections, templates and PSD when computing the 'coarse match'. "
"Templates will be generated directly with this spacing. The PSD and "
"injections will be resampled.")
_injfilterer_fmax_help = \
("If injections are present and a match threshold is "
"provided, this option specifies the maximum frequency that will be used "
"for injections, templates and PSD when computing the 'coarse match'. "
"Templates will be generated directly with this max frequency. The PSD "
"and injections' frequency series will be truncated.")
_injfilterer_buffer_help = \
("If injections are present and either a match "
"threshold or a chirp-time window is given, we will determine if "
"injections are 'in' the specified analysis chunk by using the end "
"times. If this value is non-zero the analysis chunk is extended on both "
"sides by this amount before determining if injections are within the "
"given window.")
_injfilterer_flower_help = \
("If injections are present and either a match "
"threshold or a chirp-time window is given, this value is used to set "
"the lower frequency for determine chirp times or for calculating "
"matches. If this value is None the lower frequency used for the full "
"matched-filter is used. Otherwise this value is used.")
def insert_injfilterrejector_option_group(parser):
"""Add options for injfilterrejector to executable."""
injfilterrejector_group = \
parser.add_argument_group(_injfilterrejector_group_help)
curr_arg = "--injection-filter-rejector-chirp-time-window"
injfilterrejector_group.add_argument(curr_arg, type=float, default=None,
help=_injfilterer_cthresh_help)
curr_arg = "--injection-filter-rejector-match-threshold"
injfilterrejector_group.add_argument(curr_arg, type=float, default=None,
help=_injfilterer_mthresh_help)
curr_arg = "--injection-filter-rejector-coarsematch-deltaf"
injfilterrejector_group.add_argument(curr_arg, type=float, default=1.,
help=_injfilterer_deltaf_help)
curr_arg = "--injection-filter-rejector-coarsematch-fmax"
injfilterrejector_group.add_argument(curr_arg, type=float, default=256.,
help=_injfilterer_fmax_help)
curr_arg = "--injection-filter-rejector-seg-buffer"
injfilterrejector_group.add_argument(curr_arg, type=int, default=10,
help=_injfilterer_buffer_help)
curr_arg = "--injection-filter-rejector-f-lower"
injfilterrejector_group.add_argument(curr_arg, type=int, default=None,
help=_injfilterer_flower_help)
def insert_injfilterrejector_option_group_multi_ifo(parser):
"""Add options for injfilterrejector to executable."""
injfilterrejector_group = \
parser.add_argument_group(_injfilterrejector_group_help)
curr_arg = "--injection-filter-rejector-chirp-time-window"
injfilterrejector_group.add_argument(
curr_arg, type=float, default=None, nargs='+', metavar='IFO:VALUE',
action=MultiDetOptionAction, help=_injfilterer_cthresh_help)
curr_arg = "--injection-filter-rejector-match-threshold"
injfilterrejector_group.add_argument(
curr_arg, type=float, default=None, nargs='+', metavar='IFO:VALUE',
action=MultiDetOptionAction, help=_injfilterer_mthresh_help)
curr_arg = "--injection-filter-rejector-coarsematch-deltaf"
injfilterrejector_group.add_argument(
curr_arg, type=float, default=1., nargs='+', metavar='IFO:VALUE',
action=MultiDetOptionAction, help=_injfilterer_deltaf_help)
curr_arg = "--injection-filter-rejector-coarsematch-fmax"
injfilterrejector_group.add_argument(
curr_arg, type=float, default=256., nargs='+', metavar='IFO:VALUE',
action=MultiDetOptionAction, help=_injfilterer_fmax_help)
curr_arg = "--injection-filter-rejector-seg-buffer"
injfilterrejector_group.add_argument(
curr_arg, type=int, default=10, nargs='+', metavar='IFO:VALUE',
action=MultiDetOptionAction, help=_injfilterer_buffer_help)
curr_arg = "--injection-filter-rejector-f-lower"
injfilterrejector_group.add_argument(
curr_arg, type=int, default=None, help=_injfilterer_flower_help,
metavar='IFO:VALUE', action=MultiDetOptionAction, nargs='+')
class InjFilterRejector(object):
"""Class for holding parameters for using injection/template pre-filtering.
This class is responsible for identifying where a matched-filter operation
between templates and data is unncessary because the injections contained
in the data will not match well with the given template.
"""
def __init__(self, injection_file, chirp_time_window,
match_threshold, f_lower, coarsematch_deltaf=1.,
coarsematch_fmax=256, seg_buffer=10):
"""Initialise InjFilterRejector instance."""
# Determine if InjFilterRejector is to be enabled
if injection_file is None or injection_file == 'False' or\
(chirp_time_window is None and match_threshold is None):
self.enabled = False
return
else:
self.enabled = True
# Store parameters
self.chirp_time_window = chirp_time_window
self.match_threshold = match_threshold
self.coarsematch_deltaf = coarsematch_deltaf
self.coarsematch_fmax = coarsematch_fmax
self.seg_buffer = seg_buffer
self.f_lower = f_lower
assert(self.f_lower is not None)
# Variables for storing arrays (reduced injections, memory
# for templates, reduced PSDs ...)
self.short_injections = {}
self._short_template_mem = None
self._short_psd_storage = {}
self._short_template_id = None
@classmethod
def from_cli(cls, opt):
"""Create an InjFilterRejector instance from command-line options."""
injection_file = opt.injection_file
chirp_time_window = \
opt.injection_filter_rejector_chirp_time_window
match_threshold = opt.injection_filter_rejector_match_threshold
coarsematch_deltaf = opt.injection_filter_rejector_coarsematch_deltaf
coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax
seg_buffer = opt.injection_filter_rejector_seg_buffer
if opt.injection_filter_rejector_f_lower is not None:
f_lower = opt.injection_filter_rejector_f_lower
else:
# NOTE: Uses main low-frequency cutoff as default option. This may
# need some editing if using this in multi_inspiral, which I
# leave for future work, or if this is being used in another
# code which doesn't have --low-frequency-cutoff
f_lower = opt.low_frequency_cutoff
return cls(injection_file, chirp_time_window, match_threshold,
f_lower, coarsematch_deltaf=coarsematch_deltaf,
coarsematch_fmax=coarsematch_fmax,
seg_buffer=seg_buffer)
@classmethod
def from_cli_single_ifo(cls, opt, ifo):
"""Create an InjFilterRejector instance from command-line options."""
injection_file = opt.injection_file[ifo]
chirp_time_window = \
opt.injection_filter_rejector_chirp_time_window[ifo]
match_threshold = opt.injection_filter_rejector_match_threshold[ifo]
coarsematch_deltaf = \
opt.injection_filter_rejector_coarsematch_deltaf[ifo]
coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax[ifo]
seg_buffer = opt.injection_filter_rejector_seg_buffer[ifo]
if opt.injection_filter_rejector_f_lower[ifo] is not None:
f_lower = opt.injection_filter_rejector_f_lower[ifo]
else:
# NOTE: Uses main low-frequency cutoff as default option. This may
# need some editing if using this in multi_inspiral, which I
# leave for future work, or if this is being used in another
# code which doesn't have --low-frequency-cutoff
f_lower = opt.low_frequency_cutoff
return cls(injection_file, chirp_time_window,
match_threshold, f_lower,
coarsematch_deltaf, coarsematch_fmax,
seg_buffer=seg_buffer)
@classmethod
def from_cli_multi_ifos(cls, opt, ifos):
"""Create an InjFilterRejector instance from command-line options."""
inj_filter_rejectors = {}
for ifo in ifos:
inj_filter_rejectors[ifo] = cls.from_cli_single_ifo(opt, ifo)
return inj_filter_rejectors
def generate_short_inj_from_inj(self, inj_waveform, simulation_id):
"""Generate and a store a truncated representation of inj_waveform."""
if not self.enabled or not self.match_threshold:
# Do nothing!
return
if simulation_id in self.short_injections:
err_msg = "An injection with simulation id "
err_msg += str(simulation_id)
err_msg += " has already been added. This suggests "
err_msg += "that your injection file contains injections with "
err_msg += "duplicate simulation_ids. This is not allowed."
raise ValueError(err_msg)
curr_length = len(inj_waveform)
new_length = int(nearest_larger_binary_number(curr_length))
# Don't want length less than 1/delta_f
while new_length * inj_waveform.delta_t < 1./self.coarsematch_deltaf:
new_length = new_length * 2
inj_waveform.resize(new_length)
inj_tilde = inj_waveform.to_frequencyseries()
# Dynamic range is important here!
inj_tilde_np = inj_tilde.numpy() * DYN_RANGE_FAC
delta_f = inj_tilde.get_delta_f()
new_freq_len = int(self.coarsematch_fmax / delta_f + 1)
# This shouldn't be a problem if injections are generated at
# 16384 Hz ... It is only a problem of injection sample rate
# gives a lower Nyquist than the trunc_f_max. If this error is
# ever raised one could consider zero-padding the injection.
assert(new_freq_len <= len(inj_tilde))
df_ratio = int(self.coarsematch_deltaf/delta_f)
inj_tilde_np = inj_tilde_np[:new_freq_len:df_ratio]
new_inj = FrequencySeries(inj_tilde_np, dtype=np.complex64,
delta_f=self.coarsematch_deltaf)
self.short_injections[simulation_id] = new_inj
def template_segment_checker(self, bank, t_num, segment):
"""Test if injections in segment are worth filtering with template.
Using the current template, current segment, and injections within that
segment. Test if the injections and sufficiently "similar" to any of
the injections to justify actually performing a matched-filter call.
Ther are two parts to this test: First we check if the chirp time of
the template is within a provided window of any of the injections. If
not then stop here, it is not worth filtering this template, segment
combination for this injection set. If this check passes we compute a
match between a coarse representation of the template and a coarse
representation of each of the injections. If that match is above a
user-provided value for any of the injections then filtering can
proceed. This is currently only available if using frequency-domain
templates.
Parameters
-----------
FIXME
Returns
--------
FIXME
"""
if not self.enabled:
# If disabled, always filter (ie. return True)
return True
# Get times covered by segment analyze and add buffer
seg_start_time = segment.start_time - self.seg_buffer
seg_end_time = segment.end_time + self.seg_buffer
# Chirp time test
if self.chirp_time_window is not None:
m1 = bank.table[t_num]['mass1']
m2 = bank.table[t_num]['mass2']
tau0_temp, _ = mass1_mass2_to_tau0_tau3(m1, m2, self.f_lower)
for inj in self.injection_params.table:
if isinstance(inj, np.record):
# hdf format file
end_time = inj['tc']
else:
# must be an xml file originally
end_time = inj.geocent_end_time + \
1E-9 * inj.geocent_end_time_ns
if not(seg_start_time <= end_time <= seg_end_time):
continue
tau0_inj, _ = \
mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
self.f_lower)
tau_diff = abs(tau0_temp - tau0_inj)
if tau_diff <= self.chirp_time_window:
break
else:
# Get's here if all injections are outside chirp-time window
return False
# Coarse match test
if self.match_threshold:
if self._short_template_mem is None:
# Set the memory for the short templates
wav_len = 1 + int(self.coarsematch_fmax /
self.coarsematch_deltaf)
self._short_template_mem = zeros(wav_len, dtype=np.complex64)
# Set the current short PSD to red_psd
try:
red_psd = self._short_psd_storage[id(segment.psd)]
except KeyError:
# PSD doesn't exist yet, so make it!
curr_psd = segment.psd.numpy()
step_size = int(self.coarsematch_deltaf / segment.psd.delta_f)
max_idx = int(self.coarsematch_fmax / segment.psd.delta_f) + 1
red_psd_data = curr_psd[:max_idx:step_size]
red_psd = FrequencySeries(red_psd_data, #copy=False,
delta_f=self.coarsematch_deltaf)
self._short_psd_storage[id(curr_psd)] = red_psd
# Set htilde to be the current short template
if not t_num == self._short_template_id:
# Set the memory for the short templates if unset
if self._short_template_mem is None:
wav_len = 1 + int(self.coarsematch_fmax /
self.coarsematch_deltaf)
self._short_template_mem = zeros(wav_len,
dtype=np.complex64)
# Generate short waveform
htilde = bank.generate_with_delta_f_and_max_freq(
t_num, self.coarsematch_fmax, self.coarsematch_deltaf,
low_frequency_cutoff=bank.table[t_num].f_lower,
cached_mem=self._short_template_mem)
self._short_template_id = t_num
self._short_template_wav = htilde
else:
htilde = self._short_template_wav
for ii, inj in enumerate(self.injection_params.table):
if isinstance(inj, np.record):
# hdf format file
end_time = inj['tc']
sim_id = self.injection_ids[ii]
else:
# must be an xml file originally
end_time = inj.geocent_end_time + \
1E-9 * inj.geocent_end_time_ns
sim_id = inj.simulation_id
if not(seg_start_time < end_time < seg_end_time):
continue
curr_inj = self.short_injections[sim_id]
o, _ = match(htilde, curr_inj, psd=red_psd,
low_frequency_cutoff=self.f_lower)
if o > self.match_threshold:
break
else:
# Get's here if all injections are outside match threshold
return False
return True
| 19,409
| 49.025773
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inject/inject.py
|
# Copyright (C) 2012 Alex Nitz, Tito Dal Canton
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module provides utilities for injecting signals into data"""
import os
import numpy as np
import lal
import copy
import logging
from abc import ABCMeta, abstractmethod
import h5py
from pycbc import waveform, frame, libutils
from pycbc.opt import LimitedSizeDict
from pycbc.waveform import (get_td_waveform, fd_det,
get_td_det_waveform_from_fd_det)
from pycbc.waveform import utils as wfutils
from pycbc.waveform import ringdown_td_approximants
from pycbc.types import float64, float32, TimeSeries, load_timeseries
from pycbc.detector import Detector
from pycbc.conversions import tau0_from_mass1_mass2
from pycbc.filter import resample_to_delta_t
import pycbc.io
from pycbc.io.ligolw import LIGOLWContentHandler
from ligo.lw import utils as ligolw_utils, ligolw, lsctables
sim = libutils.import_optional('lalsimulation')
injection_func_map = {
np.dtype(float32): lambda *args: sim.SimAddInjectionREAL4TimeSeries(*args),
np.dtype(float64): lambda *args: sim.SimAddInjectionREAL8TimeSeries(*args),
}
# Map parameter names used in pycbc to names used in the sim_inspiral
# table, if they are different
sim_inspiral_map = {
'ra': 'longitude',
'dec': 'latitude',
'approximant': 'waveform',
}
def set_sim_data(inj, field, data):
"""Sets data of a SimInspiral instance."""
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
# for tc, map to geocentric times
if sim_field == 'tc':
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
# for spin1 and spin2 we need data to be an array
if sim_field in ['spin1', 'spin2']:
setattr(inj, sim_field, [0, 0, data])
else:
setattr(inj, sim_field, data)
def projector(detector_name, inj, hp, hc, distance_scale=1):
""" Use the injection row to project the polarizations into the
detector frame
"""
detector = Detector(detector_name)
hp /= distance_scale
hc /= distance_scale
try:
tc = inj.tc
ra = inj.ra
dec = inj.dec
except:
tc = inj.time_geocent
ra = inj.longitude
dec = inj.latitude
hp.start_time += tc
hc.start_time += tc
# taper the polarizations
try:
hp_tapered = wfutils.taper_timeseries(hp, inj.taper)
hc_tapered = wfutils.taper_timeseries(hc, inj.taper)
except AttributeError:
hp_tapered = hp
hc_tapered = hc
projection_method = 'lal'
if hasattr(inj, 'detector_projection_method'):
projection_method = inj.detector_projection_method
logging.info('Injecting at %s, method is %s', tc, projection_method)
# compute the detector response and add it to the strain
signal = detector.project_wave(hp_tapered, hc_tapered,
ra, dec, inj.polarization,
method=projection_method,
reference_time=tc,)
return signal
def legacy_approximant_name(apx):
"""Convert the old style xml approximant name to a name
and phase_order. Alex: I hate this function. Please delete this when we
use Collin's new tables.
"""
apx = str(apx)
try:
order = sim.GetOrderFromString(apx)
except:
print("Warning: Could not read phase order from string, using default")
order = -1
name = sim.GetStringFromApproximant(sim.GetApproximantFromString(apx))
return name, order
class _XMLInjectionSet(object):
"""Manages sets of injections: reads injections from LIGOLW XML files
and injects them into time series.
Parameters
----------
sim_file : string
Path to a LIGOLW XML file containing a SimInspiralTable
with injection definitions.
Attributes
----------
indoc
table
"""
def __init__(self, sim_file, **kwds):
self.indoc = ligolw_utils.load_filename(
sim_file, False, contenthandler=LIGOLWContentHandler)
self.table = lsctables.SimInspiralTable.get_table(self.indoc)
self.extra_args = kwds
def apply(self, strain, detector_name, f_lower=None, distance_scale=1,
simulation_ids=None,
inj_filter_rejector=None,
injection_sample_rate=None,):
"""Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
injection_sample_rate: float, optional
The sample rate to generate the signal before injection
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
"""
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
earth_travel_time = lal.REARTH_SI / lal.C_SI
t0 = float(strain.start_time) - earth_travel_time
t1 = float(strain.end_time) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
delta_t = strain.delta_t
if injection_sample_rate is not None:
delta_t = 1.0 / injection_sample_rate
injections = self.table
if simulation_ids:
injections = [inj for inj in injections \
if inj.simulation_id in simulation_ids]
injection_parameters = []
for inj in injections:
f_l = inj.f_lower if f_lower is None else f_lower
# roughly estimate if the injection may overlap with the segment
# Add 2s to end_time to account for ringdown and light-travel delay
end_time = inj.time_geocent + 2
inj_length = tau0_from_mass1_mass2(inj.mass1, inj.mass2, f_l)
# Start time is taken as twice approx waveform length with a 1s
# safety buffer
start_time = inj.time_geocent - 2 * (inj_length + 1)
if end_time < t0 or start_time > t1:
continue
signal = self.make_strain_from_inj_object(inj, delta_t,
detector_name, f_lower=f_l, distance_scale=distance_scale)
signal = resample_to_delta_t(signal, strain.delta_t, method='ldas')
if float(signal.start_time) > t1:
continue
signal = signal.astype(strain.dtype)
signal_lal = signal.lal()
add_injection(lalstrain, signal_lal, None)
injection_parameters.append(inj)
if inj_filter_rejector is not None:
sid = inj.simulation_id
inj_filter_rejector.generate_short_inj_from_inj(signal, sid)
strain.data[:] = lalstrain.data.data[:]
injected = copy.copy(self)
injected.table = lsctables.SimInspiralTable()
injected.table += injection_parameters
if inj_filter_rejector is not None:
inj_filter_rejector.injection_params = injected
return injected
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
f_lower=None, distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
a sim_inspiral table, for example.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
f_l = inj.f_lower if f_lower is None else f_lower
name, phase_order = legacy_approximant_name(inj.waveform)
# compute the waveform time series
hp, hc = get_td_waveform(
inj, approximant=name, delta_t=delta_t,
phase_order=phase_order,
f_lower=f_l, distance=inj.distance,
**self.extra_args)
return projector(detector_name,
inj, hp, hc, distance_scale=distance_scale)
def end_times(self):
"""Return the end times of all injections"""
return [inj.time_geocent for inj in self.table]
@staticmethod
def write(filename, samples, write_params=None, static_args=None):
"""Writes the injection samples to the given xml.
Parameters
----------
filename : str
The name of the file to write to.
samples : io.FieldArray
FieldArray of parameters.
write_params : list, optional
Only write the given parameter names. All given names must be keys
in ``samples``. Default is to write all parameters in ``samples``.
static_args : dict, optional
Dictionary mapping static parameter names to values. These are
written to the ``attrs``.
"""
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
simtable = lsctables.New(lsctables.SimInspiralTable)
xmldoc.childNodes[0].appendChild(simtable)
if static_args is None:
static_args = {}
if write_params is None:
write_params = samples.fieldnames
for ii in range(samples.size):
sim = lsctables.SimInspiral()
# initialize all elements to None
for col in sim.__slots__:
setattr(sim, col, None)
for field in write_params:
data = samples[ii][field]
set_sim_data(sim, field, data)
# set any static args
for (field, value) in static_args.items():
set_sim_data(sim, field, value)
simtable.append(sim)
ligolw_utils.write_filename(xmldoc, filename, compress='auto')
# -----------------------------------------------------------------------------
class _HDFInjectionSet(metaclass=ABCMeta):
"""Manages sets of injections: reads injections from hdf files
and injects them into time series.
Parameters
----------
sim_file : string
Path to an hdf file containing injections.
\**kwds :
The rest of the keyword arguments are passed to the waveform generation
function when generating injections.
Attributes
----------
filehandler
table
static_args
extra_args
required_params : tuple
Parameter names that must exist in the injection HDF file in order to
create an injection of that type.
"""
_tableclass = pycbc.io.FieldArray
injtype = None
required_params = ()
def __init__(self, sim_file, hdf_group=None, **kwds):
# open the file
fp = h5py.File(sim_file, 'r')
group = fp if hdf_group is None else fp[hdf_group]
self.filehandler = fp
# get parameters
parameters = list(group.keys())
# get all injection parameter values
injvals = {param: group[param][()] for param in parameters}
# make sure Numpy S strings are loaded as strings and not bytestrings
# (which could mess with approximant names, for example)
for k in injvals:
if injvals[k].dtype.kind == 'S':
injvals[k] = injvals[k].astype('U')
# if there were no variable args, then we only have a single injection
if len(parameters) == 0:
numinj = 1
else:
numinj = tuple(injvals.values())[0].size
# add any static args in the file
try:
# ensure parameter names are string types
self.static_args = group.attrs['static_args'].astype('U')
except KeyError:
self.static_args = []
parameters.extend(self.static_args)
# we'll expand the static args to be arrays with the same size as
# the other values
for param in self.static_args:
val = group.attrs[param]
# if val is a list or numpy array, we need to store it as an
# object; otherwise, we'll get a shape mismatch between fields
if isinstance(val, (np.ndarray, list, tuple)):
arr = np.empty(numinj, dtype=object)
for ii in range(numinj):
arr[ii] = val
else:
# otherwise, we can just repeat the value the needed number of
# times
arr = np.repeat(val, numinj)
# make sure any byte strings are stored as strings instead
if arr.dtype.char == 'S':
arr = arr.astype('U')
injvals[param] = arr
# make sure required parameters are provided
missing = set(self.required_params) - set(injvals.keys())
if missing:
raise ValueError("required parameter(s) {} not found in the given "
"injection file".format(', '.join(missing)))
# initialize the table
self.table = self._tableclass.from_kwargs(**injvals)
# save the extra arguments
self.extra_args = kwds
@abstractmethod
def apply(self, strain, detector_name, distance_scale=1,
simulation_ids=None, inj_filter_rejector=None,
**kwargs):
"""Adds injections to a detector's time series."""
pass
@abstractmethod
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1, **kwargs):
"""Make a h(t) strain time-series from an injection object.
"""
pass
@abstractmethod
def end_times(self):
"""Return the end times of all injections"""
pass
@abstractmethod
def supported_approximants(self):
"""Return a list of the supported approximants."""
pass
@classmethod
def write(cls, filename, samples, write_params=None, static_args=None,
**metadata):
"""Writes the injection samples to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to.
samples : io.FieldArray
FieldArray of parameters.
write_params : list, optional
Only write the given parameter names. All given names must be keys
in ``samples``. Default is to write all parameters in ``samples``.
static_args : dict, optional
Dictionary mapping static parameter names to values. These are
written to the ``attrs``.
\**metadata :
All other keyword arguments will be written to the file's attrs.
"""
with h5py.File(filename, 'w') as fp:
# write metadata
if static_args is None:
static_args = {}
fp.attrs["static_args"] = list(map(str, static_args.keys()))
fp.attrs['injtype'] = cls.injtype
for key, val in metadata.items():
fp.attrs[key] = val
if write_params is None:
write_params = samples.fieldnames
for arg, val in static_args.items():
try:
fp.attrs[arg] = val
except TypeError:
# can get this in python 3 if the val was numpy.str_ type
# try decoding it and writing
fp.attrs[arg] = str(val)
for field in write_params:
try:
fp[field] = samples[field]
except TypeError as e:
# can get this in python 3 if the val was a numpy.str_ type
# we'll try again as a string type
if samples[field].dtype.char == 'U':
fp[field] = samples[field].astype('S')
else:
raise e
class CBCHDFInjectionSet(_HDFInjectionSet):
"""Manages CBC injections.
"""
_tableclass = pycbc.io.WaveformArray
injtype = 'cbc'
required_params = ('tc',)
def apply(self, strain, detector_name, f_lower=None, distance_scale=1,
simulation_ids=None,
inj_filter_rejector=None,
injection_sample_rate=None,):
"""Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance; optional, default=None
If given send each injected waveform to the InjFilterRejector
instance so that it can store a reduced representation of that
injection if necessary.
injection_sample_rate: float, optional
The sample rate to generate the signal before injection
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
"""
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
if self.table[0]['approximant'] in fd_det:
t0 = float(strain.start_time)
t1 = float(strain.end_time)
else:
earth_travel_time = lal.REARTH_SI / lal.C_SI
t0 = float(strain.start_time) - earth_travel_time
t1 = float(strain.end_time) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
delta_t = strain.delta_t
if injection_sample_rate is not None:
delta_t = 1.0 / injection_sample_rate
injections = self.table
if simulation_ids:
injections = injections[list(simulation_ids)]
injected_ids = []
for ii, inj in enumerate(injections):
f_l = inj.f_lower if f_lower is None else f_lower
# roughly estimate if the injection may overlap with the segment
# Add 2s to end_time to account for ringdown and light-travel delay
end_time = inj.tc + 2
inj_length = tau0_from_mass1_mass2(inj.mass1, inj.mass2, f_l)
# Start time is taken as twice approx waveform length with a 1s
# safety buffer
start_time = inj.tc - 2 * (inj_length + 1)
if end_time < t0 or start_time > t1:
continue
signal = self.make_strain_from_inj_object(inj, delta_t,
detector_name, f_lower=f_l,
distance_scale=distance_scale)
signal = resample_to_delta_t(signal, strain.delta_t, method='ldas')
if float(signal.start_time) > t1:
continue
signal = signal.astype(strain.dtype)
signal_lal = signal.lal()
add_injection(lalstrain, signal_lal, None)
injected_ids.append(ii)
if inj_filter_rejector is not None:
inj_filter_rejector.generate_short_inj_from_inj(signal, ii)
strain.data[:] = lalstrain.data.data[:]
injected = copy.copy(self)
injected.table = injections[np.array(injected_ids).astype(int)]
if inj_filter_rejector is not None:
if hasattr(inj_filter_rejector, 'injected'):
prev_p = inj_filter_rejector.injection_params
prev_id = inj_filter_rejector.injection_ids
injected = np.concatenate([prev_p, injected])
injected_ids = np.concatenate([prev_id, injected_ids])
inj_filter_rejector.injection_params = injected
inj_filter_rejector.injection_ids = injected_ids
return injected
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
f_lower=None, distance_scale=1):
"""Make a h(t) strain time-series from an injection object.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t). Can be any
object which has waveform parameters as attributes, such as an
element in a ``WaveformArray``.
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
if f_lower is None:
f_l = inj.f_lower
else:
f_l = f_lower
if inj['approximant'] in fd_det:
strain = get_td_det_waveform_from_fd_det(
inj, delta_t=delta_t, f_lower=f_l,
ifos=detector_name, **self.extra_args)[detector_name]
strain /= distance_scale
else:
# compute the waveform time series
hp, hc = get_td_waveform(inj, delta_t=delta_t, f_lower=f_l,
**self.extra_args)
strain = projector(detector_name,
inj, hp, hc, distance_scale=distance_scale)
return strain
def end_times(self):
"""Return the end times of all injections"""
return self.table.tc
@staticmethod
def supported_approximants():
all_apprxs = []
for d in [waveform.waveform.td_wav, waveform.waveform.fd_wav]:
for key in d:
all_apprxs.extend(d[key])
all_apprxs.extend(waveform.waveform.fd_det)
return list(set(all_apprxs))
class RingdownHDFInjectionSet(_HDFInjectionSet):
"""Manages a ringdown injection: reads injection from hdf file
and injects it into time series.
"""
injtype = 'ringdown'
required_params = ('tc',)
def apply(self, strain, detector_name, distance_scale=1,
simulation_ids=None, inj_filter_rejector=None,
injection_sample_rate=None):
"""Add injection (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
simulation_ids: iterable, optional
If given, only inject signals with the given simulation IDs.
inj_filter_rejector: InjFilterRejector instance, optional
Not implemented. If not ``None``, a ``NotImplementedError`` will
be raised.
injection_sample_rate: float, optional
The sample rate to generate the signal before injection
Returns
-------
None
Raises
------
NotImplementedError
If an ``inj_filter_rejector`` is provided.
TypeError
For invalid types of `strain`.
"""
if inj_filter_rejector is not None:
raise NotImplementedError("Ringdown injections do not support "
"inj_filter_rejector")
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
delta_t = strain.delta_t
if injection_sample_rate is not None:
delta_t = 1.0 / injection_sample_rate
injections = self.table
if simulation_ids:
injections = injections[list(simulation_ids)]
for ii in range(injections.size):
injection = injections[ii]
signal = self.make_strain_from_inj_object(
injection, delta_t, detector_name,
distance_scale=distance_scale)
signal = resample_to_delta_t(signal, strain.delta_t, method='ldas')
signal = signal.astype(strain.dtype)
signal_lal = signal.lal()
add_injection(lalstrain, signal_lal, None)
strain.data[:] = lalstrain.data.data[:]
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
# compute the waveform time series
hp, hc = ringdown_td_approximants[inj['approximant']](
inj, delta_t=delta_t, **self.extra_args)
return projector(detector_name,
inj, hp, hc, distance_scale=distance_scale)
def end_times(self):
"""Return the approximate end times of all injections.
Currently, this just assumes all ringdowns are 2 seconds long.
"""
# the start times are the tcs
tcs = self.table.tc
# FIXME: this could be figured out using the ringdown module
return tcs + 2
@staticmethod
def supported_approximants():
return list(waveform.ringdown_td_approximants.keys())
class IncoherentFromFileHDFInjectionSet(_HDFInjectionSet):
"""Manages injecting an arbitrary time series loaded from a file.
The injections must have the following attributes set:
* ``filename``: (str) the name of the file to load containing the time
series. The file type and format can be a frame file or anything
understood by :py:func:`pycbc.types.timeseries.load_timeseries`. If a
frame file (ends in ``.gwf``) is specified, a ``channel`` attribute must
also be set.
* ``DETECTOR_gps_time``: (float) The GPS time at which the time series
should be added to the ``DETECTOR`` data, where ``DETECTOR`` is the name
of the instrument to inject into (e.g., ``h1_gps_time``). **The time
series will only be injected into a detector if a GPS time is given for
that detector.** Set to -inf, nan, or do not provide a GPS time for a
particular detector if you do not want to inject into that detector.
* ``ref_point``: (str or float) What to use as the reference time of the
injected time series. The time series will be injected into the detector
such that the ``ref_point`` in the time series occurs at the specifed
``DETECTOR_gps_time``. Options are: ``'start'``, ``'end'``, ``'center'``,
``'absmax'``, or a float giving the number of seconds into the time
series.
In addition, the following attributes may optionally be provided:
* ``channel``: (str): If the filename points to a frame file, the channel
to load in that file. Must be provided for frame files.
* ``DETECTOR_phase_shift``: (float) Apply a phase shift to the time series
before adding it to detector ``DETECTOR``.
* ``DETECTOR_amp_scale``: (float) Scale the amplitude by the given amount
before adding it to detector ``DETECTOR``.
* ``slice_start``: (float) Slice the time series starting at
``ref_point + slice_start`` before injecting into the data. Measured in
seconds.
* ``slice_end``: (float) Slice the time series ending at
``ref_point + slice_end`` before injecting into the data. Measured in
seconds.
* ``left_taper_width``: (float) Taper the start of the time series (after
slicing) using half a kaiser window over the given number of seconds.
See `:py:func:waveform.utils.td_taper` for more details.
* ``right_taper_width``: (float) Taper the end of the time series (after
slicing) using half a kaiser window over the given number of seconds.
See `:py:func:waveform.utils.td_taper` for more details.
The signal will be resampled to the same sample rate as the data it is
being injected into.
In order to use with ``pycbc_create_injections``, set the ``approximant``
name to ``'incoherent_from_file'``.
"""
injtype = 'incoherent_from_file'
required_params = ('filename', 'ref_point')
_buffersize = 10
_buffer = None
_rtbuffer = None
def end_times(self):
raise NotImplementedError("IncoherentFromFile times cannot be "
"determined without loading time series")
@staticmethod
def supported_approximants():
return ['incoherent_from_file']
def loadts(self, inj):
"""Loads an injection time series.
After the first time a time series is loaded it will be added to an
internal buffer for faster in case another injection uses the same
series.
"""
if self._buffer is None:
# create the buffer
self._buffer = LimitedSizeDict(size_limit=self._buffersize)
try:
return self._buffer[inj.filename]
except KeyError:
pass
# not in buffer, so load
if inj.filename.endswith('.gwf'):
try:
channel = inj.channel
except AttributeError as _err:
# Py3.XX: uncomment the "from _err" when we drop 2.7
raise ValueError("Must provide a channel for "
"frame files") #from _err
ts = frame.read_frame(inj.filename, channel)
else:
ts = load_timeseries(inj.filename)
# cache
self._buffer[inj.filename] = ts
return ts
def set_ref_time(self, inj, ts):
"""Sets t=0 of the given time series based on what the given
injection's ``ref_point`` is.
"""
try:
ref_point = inj.ref_point
except AttributeError as _err:
# Py3.XX: uncomment the "from _err" when we drop 2.7
raise ValueError("Must provide a ref_point for {} injections"
.format(self.injtype)) #from _err
# try to get from buffer
if self._rtbuffer is None:
self._rtbuffer = LimitedSizeDict(size_limit=self._buffersize)
try:
reftime = self._rtbuffer[inj.filename, ref_point]
except KeyError:
if ref_point == "start":
reftime = 0.
elif ref_point == "end":
reftime = -len(ts)*ts.delta_t
elif ref_point == "center":
reftime = -len(ts)*ts.delta_t/2.
elif ref_point == "absmax":
reftime = -ts.abs_arg_max()*ts.delta_t
elif isinstance(ref_point, (float, int)):
reftime = -float(ref_point)
else:
raise ValueError("Unrecognized ref_point {} provided"
.format(ref_point))
self._rtbuffer[inj.filename, ref_point] = reftime
ts._epoch = reftime
@staticmethod
def slice_and_taper(inj, ts):
"""Slices and tapers a timeseries based on the injection settings.
This assumes that ``set_ref_time`` has been applied to the timeseries
first. A copy of the time series will be returned even if no slicing
or tapering is done.
"""
try:
tstart = inj.slice_start
except AttributeError:
tstart = ts.start_time
try:
tend = inj.slice_end
except AttributeError:
tend = ts.end_time
ts = ts.time_slice(tstart, tend).copy()
# now taper
try:
twidth = inj.left_taper_width
except AttributeError:
twidth = 0
if twidth:
ts = wfutils.td_taper(ts, ts.start_time, ts.start_time+twidth,
side='left')
try:
twidth = inj.right_taper_width
except AttributeError:
twidth = 0
if twidth:
ts = wfutils.td_taper(ts, ts.end_time-twidth, ts.end_time,
side='right')
return ts
def apply(self, strain, detector_name, distance_scale=1,
injection_sample_rate=None, inj_filter_rejector=None):
if inj_filter_rejector is not None:
raise NotImplementedError("IncoherentFromFile injections do not "
"support inj_filter_rejector")
if injection_sample_rate is not None:
delta_t = 1./injection_sample_rate
else:
delta_t = strain.delta_t
injections = self.table
for inj in injections:
# Check if we should inject or not...
# loading the time series like this is a bit brute-force, since
# we really only need to know the delta_t and length of the
# timeseries if the ref_point is anything but absmax, but that
# would require adding logic to figure out how to get that metadata
# based on the filetype and ref_point
ts = self.loadts(inj)
# set the ref time
self.set_ref_time(inj, ts)
# determine if we inject or not based on the times
try:
injtime = inj['{}_gps_time'.format(detector_name).lower()]
except ValueError:
injtime = -np.inf
if np.isnan(injtime):
# nan means don't inject
injtime = -np.inf
start_time = injtime + ts.start_time
end_time = injtime + ts.end_time
inject = (start_time < strain.end_time and
end_time > strain.start_time)
if inject:
ts = self.make_strain_from_inj_object(
inj, delta_t, detector_name,
distance_scale=distance_scale, ts=ts)
if ts.delta_t != strain.delta_t:
ts = resample_to_delta_t(ts, strain.delta_t, method='ldas')
strain.inject(ts, copy=False)
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1, ts=None):
if ts is None:
ts = load_timeseries(inj.filename)
self.set_ref_time(inj, ts)
# slice and taper
ts = self.slice_and_taper(inj, ts)
# shift reference to the detector time
ts._epoch += inj['{}_gps_time'.format(detector_name).lower()]
# resample
ts = resample_to_delta_t(ts, delta_t, method='ldas')
# apply any phase shift
try:
phase_shift = inj[
'{}_phase_shift'.format(detector_name).lower()]
except ValueError:
phase_shift = 0
if phase_shift:
fs = ts.to_frequencyseries()
fs *= np.exp(1j*phase_shift)
ts = fs.to_timeseries()
# apply any scaling
try:
amp_scale = inj[
'{}_amp_scale'.format(detector_name).lower()]
except ValueError:
amp_scale = 1.
amp_scale /= distance_scale
ts *= amp_scale
return ts
hdfinjtypes = {
CBCHDFInjectionSet.injtype: CBCHDFInjectionSet,
RingdownHDFInjectionSet.injtype: RingdownHDFInjectionSet,
IncoherentFromFileHDFInjectionSet.injtype:
IncoherentFromFileHDFInjectionSet,
}
def get_hdf_injtype(sim_file):
"""Gets the HDFInjectionSet class to use with the given file.
This looks for the ``injtype`` in the given file's top level ``attrs``. If
that attribute isn't set, will default to :py:class:`CBCHDFInjectionSet`.
Parameters
----------
sim_file : str
Name of the file. The file must already exist.
Returns
-------
HDFInjectionSet :
The type of HDFInjectionSet to use.
"""
with h5py.File(sim_file, 'r') as fp:
try:
ftype = fp.attrs['injtype']
except KeyError:
ftype = CBCHDFInjectionSet.injtype
try:
return hdfinjtypes[ftype]
except KeyError:
# may get a key error if the file type was stored as unicode instead
# of string; if so, try decoding it
try:
ftype = str(ftype.decode())
except AttributeError:
# not actually a byte error; passing will reraise the KeyError
pass
return hdfinjtypes[ftype]
def hdf_injtype_from_approximant(approximant):
"""Gets the HDFInjectionSet class to use with the given approximant.
Parameters
----------
approximant : str
Name of the approximant.
Returns
-------
HDFInjectionSet :
The type of HDFInjectionSet to use.
"""
retcls = None
for cls in hdfinjtypes.values():
if approximant in cls.supported_approximants():
retcls = cls
if retcls is None:
# none were found, raise an error
raise ValueError("Injection file type unknown for approximant {}"
.format(approximant))
return retcls
class InjectionSet(object):
"""Manages sets of injections and injects them into time series.
Injections are read from either LIGOLW XML files or HDF files.
Parameters
----------
sim_file : string
Path to an hdf file or a LIGOLW XML file that contains a
SimInspiralTable.
\**kwds :
The rest of the keyword arguments are passed to the waveform generation
function when generating injections.
Attributes
----------
table
"""
def __init__(self, sim_file, **kwds):
ext = os.path.basename(sim_file)
if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
self._injhandler = _XMLInjectionSet(sim_file, **kwds)
self.indoc = self._injhandler.indoc
else:
# assume hdf file
self._injhandler = get_hdf_injtype(sim_file)(sim_file, **kwds)
self.table = self._injhandler.table
self.extra_args = self._injhandler.extra_args
self.apply = self._injhandler.apply
self.make_strain_from_inj_object = \
self._injhandler.make_strain_from_inj_object
self.end_times = self._injhandler.end_times
@staticmethod
def write(filename, samples, write_params=None, static_args=None,
injtype=None, **metadata):
"""Writes the injection samples to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to.
samples : io.FieldArray
FieldArray of parameters.
write_params : list, optional
Only write the given parameter names. All given names must be keys
in ``samples``. Default is to write all parameters in ``samples``.
static_args : dict, optional
Dictionary mapping static parameter names to values. These are
written to the ``attrs``.
injtype : str, optional
Specify which `HDFInjectionSet` class to use for writing. If not
provided, will try to determine it by looking for an approximant in
the ``static_args``, followed by the ``samples``.
\**metadata :
All other keyword arguments will be written to the file's attrs.
"""
# DELETE the following "if" once xml is dropped
ext = os.path.basename(filename)
if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
_XMLInjectionSet.write(filename, samples, write_params,
static_args)
else:
# try determine the injtype if it isn't given
if injtype is None:
if static_args is not None and 'approximant' in static_args:
injcls = hdf_injtype_from_approximant(
static_args['approximant'])
elif 'approximant' in samples.fieldnames:
apprxs = np.unique(samples['approximant'])
# make sure they all correspond to the same injection type
injcls = [hdf_injtype_from_approximant(a) for a in apprxs]
if not all(c == injcls[0] for c in injcls):
raise ValueError("injections must all be of the same "
"type")
injcls = injcls[0]
else:
raise ValueError("Could not find an approximant in the "
"static args or samples to determine the "
"injection type. Please specify an "
"injtype instead.")
else:
injcls = hdfinjtypes[injtype]
injcls.write(filename, samples, write_params, static_args,
**metadata)
@staticmethod
def from_cli(opt):
"""Return an instance of InjectionSet configured as specified
on the command line.
"""
if opt.injection_file is None:
return None
kwa = {}
if opt.injection_f_ref is not None:
kwa['f_ref'] = opt.injection_f_ref
if opt.injection_f_final is not None:
kwa['f_final'] = opt.injection_f_final
return InjectionSet(opt.injection_file, **kwa)
class SGBurstInjectionSet(object):
"""Manages sets of sine-Gaussian burst injections: reads injections
from LIGOLW XML files and injects them into time series.
Parameters
----------
sim_file : string
Path to a LIGOLW XML file containing a SimBurstTable
with injection definitions.
Attributes
----------
indoc
table
"""
def __init__(self, sim_file, **kwds):
self.indoc = ligolw_utils.load_filename(
sim_file, False, contenthandler=LIGOLWContentHandler)
self.table = lsctables.SimBurstTable.get_table(self.indoc)
self.extra_args = kwds
def apply(self, strain, detector_name, f_lower=None, distance_scale=1):
"""Add injections (as seen by a particular detector) to a time series.
Parameters
----------
strain : TimeSeries
Time series to inject signals into, of type float32 or float64.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, foat}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
-------
None
Raises
------
TypeError
For invalid types of `strain`.
"""
if strain.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not " \
+ str(strain.dtype))
lalstrain = strain.lal()
#detector = Detector(detector_name)
earth_travel_time = lal.REARTH_SI / lal.C_SI
t0 = float(strain.start_time) - earth_travel_time
t1 = float(strain.end_time) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map[strain.dtype]
for inj in self.table:
# roughly estimate if the injection may overlap with the segment
end_time = inj.time_geocent
#CHECK: This is a hack (10.0s); replace with an accurate estimate
inj_length = 10.0
eccentricity = 0.0
polarization = 0.0
start_time = end_time - 2 * inj_length
if end_time < t0 or start_time > t1:
continue
# compute the waveform time series
hp, hc = sim.SimBurstSineGaussian(float(inj.q),
float(inj.frequency),float(inj.hrss),float(eccentricity),
float(polarization),float(strain.delta_t))
hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch)
hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch)
hp._epoch += float(end_time)
hc._epoch += float(end_time)
if float(hp.start_time) > t1:
continue
# compute the detector response, taper it if requested
# and add it to the strain
strain = wfutils.taper_timeseries(strain, inj.taper)
signal_lal = hp.astype(strain.dtype).lal()
add_injection(lalstrain, signal_lal, None)
strain.data[:] = lalstrain.data.data[:]
| 48,285
| 37.080442
| 79
|
py
|
pycbc
|
pycbc-master/examples/overlap.py
|
from pycbc.waveform import get_td_waveform
from pycbc.filter import match, overlap
from pycbc.psd import aLIGOZeroDetHighPower
# Buffer size in seconds. This is presumed to be
# longer than the longest waveform.
time_buffer = 4
f_low = 30
sample_rate = 4096
# Length of corresponding time series and frequency series
tlen = sample_rate * time_buffer
flen = tlen // 2 + 1
delta_t = 1.0 / sample_rate
delta_f = 1.0 / time_buffer
print("Generating waveform 1")
hp, hc = get_td_waveform(approximant="EOBNRv2",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % hp.duration)
print("Generating waveform 2")
sp, sc = get_td_waveform(approximant="TaylorT4",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % sp.duration)
# Ensure that the waveforms are resized to the same length
sp.resize(tlen)
hp.resize(tlen)
print("Calculating analytic PSD")
psd = aLIGOZeroDetHighPower(flen, delta_f, f_low)
print("Calculating match and overlap")
# Note: This takes a while the first time as an FFT plan is generated
# subsequent calls within the same program will be faster
m, i = match(hp, sp, psd=psd, low_frequency_cutoff=f_low)
o = overlap(hp, sp, psd=psd, low_frequency_cutoff=f_low)
print("Overlap %s" % o)
print("Maximized Overlap %s" % m)
| 1,520
| 28.823529
| 69
|
py
|
pycbc
|
pycbc-master/examples/detector/delay.py
|
from pycbc.detector import Detector
from astropy.utils import iers
# Make sure the documentation can be built without an internet connection
iers.conf.auto_download = False
# The source of the gravitational waves
right_ascension = 0.7
declination = -0.5
# Reference location will be the Hanford detector
# see the `time_delay_from_earth_center` method to use use geocentric time
# as the reference
dref = Detector("H1")
# Time in GPS seconds that the GW passes
time = 100000000
# Time that the GW will (or has) passed through the given detector
for ifo in ["H1", "L1", "V1"]:
d = Detector(ifo)
dt = d.time_delay_from_detector(dref, right_ascension, declination, time)
st = "GW passed through {} {} seconds relative to passing by Hanford"
print(st.format(ifo, dt))
| 785
| 30.44
| 77
|
py
|
pycbc
|
pycbc-master/examples/detector/ant.py
|
from pycbc.detector import Detector
from pycbc.waveform import get_td_waveform
# Time, orientation and location of the source in the sky
ra = 1.7
dec = 1.7
pol = 0.2
inc = 0
time = 1000000000
# We can calcualate the antenna pattern for Hanford at
# the specific sky location
d = Detector("H1")
# We get back the fp and fc antenna pattern weights.
fp, fc = d.antenna_pattern(ra, dec, pol, time)
print("fp={}, fc={}".format(fp, fc))
# These factors allow us to project a signal into what the detector would
# observe
## Generate a waveform
hp, hc = get_td_waveform(approximant="IMRPhenomD", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096, inclination=inc,
distance=400)
## Apply the factors to get the detector frame strain
ht = fp * hp + fc * hc
# The projection process can also take into account the rotation of the
# earth using the project wave function.
hp.start_time = hc.start_time = time
ht = d.project_wave(hp, hc, ra, dec, pol)
| 997
| 27.514286
| 73
|
py
|
pycbc
|
pycbc-master/examples/detector/loc.py
|
from pycbc.detector import Detector, get_available_detectors
# We can list the available detectors. This gives their detector abbreviation
# along with a longer name. Note that some of these are not physical detectors
# but may be useful for testing or study purposes
for abv in get_available_detectors():
d = Detector(abv)
# Note that units are all in radians
print("{} Latitude {} Longitude {}".format(abv,
d.latitude,
d.longitude))
| 550
| 38.357143
| 78
|
py
|
pycbc
|
pycbc-master/examples/detector/travel.py
|
from pycbc.detector import Detector
for ifo1 in ['H1', 'L1', 'V1']:
for ifo2 in ['H1', 'L1', 'V1']:
dt = Detector(ifo1).light_travel_time_to_detector(Detector(ifo2))
print("Direct Time from {} to {} is {} seconds".format(ifo1, ifo2, dt))
| 259
| 36.142857
| 79
|
py
|
pycbc
|
pycbc-master/examples/psd/estimate.py
|
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
# generate some colored gaussian noise
flow = 30.0
delta_f = 1.0 / 16
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
### Generate 128 seconds of noise at 4096 Hz
delta_t = 1.0 / 4096
tsamples = int(128 / delta_t)
ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127)
# Estimate the PSD
# We'll choose 4 seconds PSD samples that are overlapped 50 %
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,
seg_len=seg_len,
seg_stride=seg_stride)
pp.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate')
pp.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd')
pp.xlim(xmin=flow, xmax=2000)
pp.ylim(1e-48, 1e-45)
pp.legend()
pp.grid()
pp.show()
| 884
| 26.65625
| 76
|
py
|
pycbc
|
pycbc-master/examples/psd/analytic.py
|
import matplotlib.pyplot as pp
import pycbc.psd
# List the available analytic psds
print(pycbc.psd.get_lalsim_psd_list())
delta_f = 1.0 / 4
flen = int(1024 / delta_f)
low_frequency_cutoff = 30.0
# One can either call the psd generator by name
p1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, low_frequency_cutoff)
# or by using the name as a string.
p2 = pycbc.psd.from_string('aLIGOZeroDetLowPower', flen, delta_f, low_frequency_cutoff)
pp.plot(p1.sample_frequencies, p1, label='HighPower')
pp.plot(p2.sample_frequencies, p2, label='LowPower')
pp.legend()
pp.show()
| 576
| 25.227273
| 87
|
py
|
pycbc
|
pycbc-master/examples/psd/read.py
|
import matplotlib.pyplot as pp
import pycbc.psd
import pycbc.types
filename = 'example_psd.txt'
# The PSD will be interpolated to the requested frequency spacing
delta_f = 1.0 / 4
length = int(1024 / delta_f)
low_frequency_cutoff = 30.0
psd = pycbc.psd.from_txt(filename, length, delta_f,
low_frequency_cutoff, is_asd_file=False)
pp.loglog(psd.sample_frequencies, psd, label='interpolated')
# The PSD will be read in without modification
psd = pycbc.types.load_frequencyseries('./example_psd.txt')
pp.loglog(psd.sample_frequencies, psd, label='raw')
pp.xlim(xmin=30, xmax=1000)
pp.legend()
pp.xlabel('Hz')
pp.show()
# Save a psd to file, several formats are supported (.txt, .hdf, .npy)
psd.save('tmp_psd.txt')
| 742
| 26.518519
| 70
|
py
|
pycbc
|
pycbc-master/examples/distributions/spin_spatial_distr_example.py
|
import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| 1,933
| 36.921569
| 78
|
py
|
pycbc
|
pycbc-master/examples/distributions/mchirp_q_from_uniform_m1m2_example.py
|
import matplotlib.pyplot as plt
from pycbc import distributions
from pycbc import conversions
import numpy as np
# Create chirp mass and mass ratio distribution object that is uniform
# in mass1 and mass2
minmc = 5
maxmc = 60
mc_distribution = distributions.MchirpfromUniformMass1Mass2(mc=(minmc,maxmc))
# generate q in a symmetric range [min, 1/min] to make mass1 and mass2
# symmetric
minq = 1/4
maxq = 1/minq
q_distribution = distributions.QfromUniformMass1Mass2(q=(minq,maxq))
# Take 100000 random variable samples from this chirp mass and mass ratio
# distribution.
n_size = 100000
mc_samples = mc_distribution.rvs(size=n_size)
q_samples = q_distribution.rvs(size=n_size)
# Convert chirp mass and mass ratio to mass1 and mass2
m1 = conversions.mass1_from_mchirp_q(mc_samples['mc'],q_samples['q'])
m2 = conversions.mass2_from_mchirp_q(mc_samples['mc'],q_samples['q'])
# Check the 1D marginalization of mchirp and q is consistent with the
# expected analytical formula
n_bins = 200
xq = np.linspace(minq,maxq,100)
yq = ((1+xq)/(xq**3))**(2/5)
xmc = np.linspace(minmc,maxmc,100)
ymc = xmc
plt.figure(figsize=(10,10))
# Plot histograms of samples in subplots
plt.subplot(221)
plt.hist2d(mc_samples['mc'], q_samples['q'], bins=n_bins, cmap='Blues')
plt.xlabel('chirp mass')
plt.ylabel('mass ratio')
plt.colorbar(fraction=.05, pad=0.05,label='number of samples')
plt.subplot(222)
plt.hist2d(m1, m2, bins=n_bins, cmap='Blues')
plt.xlabel('mass1')
plt.ylabel('mass2')
plt.colorbar(fraction=.05, pad=0.05,label='number of samples')
plt.subplot(223)
plt.hist(mc_samples['mc'],density=True,bins=100,label='samples')
plt.plot(xmc,ymc*mc_distribution.norm,label='$P(M_c)\propto M_c$')
plt.xlabel('chirp mass')
plt.ylabel('PDF')
plt.legend()
plt.subplot(224)
plt.hist(q_samples['q'],density=True,bins=n_bins,label='samples')
plt.plot(xq,yq*q_distribution.norm,label='$P(q)\propto((1+q)/q^3)^{2/5}$')
plt.xlabel('mass ratio')
plt.ylabel('PDF')
plt.legend()
plt.tight_layout()
plt.show()
| 1,986
| 30.046875
| 77
|
py
|
pycbc
|
pycbc-master/examples/distributions/sampling_from_config_example.py
|
import numpy as np
import matplotlib.pyplot as plt
from pycbc.distributions.utils import draw_samples_from_config
# A path to the .ini file.
CONFIG_PATH = "./pycbc_bbh_prior.ini"
random_seed = np.random.randint(low=0, high=2**32-1)
# Draw a single sample.
sample = draw_samples_from_config(
path=CONFIG_PATH, num=1, seed=random_seed)
# Print all parameters.
print(sample.fieldnames)
print(sample)
# Print a certain parameter, for example 'mass1'.
print(sample[0]['mass1'])
# Draw 1000000 samples, and select all values of a certain parameter.
n_bins = 50
samples = draw_samples_from_config(
path=CONFIG_PATH, num=1000000, seed=random_seed)
fig, axes = plt.subplots(nrows=3, ncols=2)
ax1, ax2, ax3, ax4, ax5, ax6 = axes.flat
ax1.hist(samples[:]['srcmass1'], bins=n_bins)
ax2.hist(samples[:]['srcmass2'], bins=n_bins)
ax3.hist(samples[:]['comoving_volume'], bins=n_bins)
ax4.hist(samples[:]['redshift'], bins=n_bins)
ax5.hist(samples[:]['distance'], bins=n_bins)
ax6.hist(samples[:]['mass1'], bins=n_bins)
ax1.set_title('srcmass1')
ax2.set_title('srcmass2')
ax3.set_title('comoving_volume')
ax4.set_title('redshift')
ax5.set_title('distance')
ax6.set_title('mass1 or mass2')
plt.tight_layout()
plt.show()
| 1,236
| 27.113636
| 69
|
py
|
pycbc
|
pycbc-master/examples/distributions/mass_examples.py
|
import matplotlib.pyplot as plt
from pycbc import distributions
# Create a mass distribution object that is uniform between 0.5 and 1.5
# solar masses.
mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5))
# Take 100000 random variable samples from this uniform mass distribution.
mass1_samples = mass1_distribution.rvs(size=1000000)
# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses
# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar
# masses. Gaussian takes the variance as an input so square the standard
# deviation.
variance = 0.15*0.15
mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2,
mass2_var=variance)
# Take 100000 random variable samples from this gaussian mass distribution.
mass2_samples = mass2_gaussian.rvs(size=1000000)
# We can make pairs of distributions together, instead of apart.
two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0),
mass4=(1.6, 3.0))
two_mass_samples = two_mass_distributions.rvs(size=1000000)
# Choose 50 bins for the histogram subplots.
n_bins = 50
# Plot histograms of samples in subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3, = axes.flat
ax0.hist(mass1_samples['mass1'], bins = n_bins)
ax1.hist(mass2_samples['mass2'], bins = n_bins)
ax2.hist(two_mass_samples['mass3'], bins = n_bins)
ax3.hist(two_mass_samples['mass4'], bins = n_bins)
ax0.set_title('Mass 1 samples')
ax1.set_title('Mass 2 samples')
ax2.set_title('Mass 3 samples')
ax3.set_title('Mass 4 samples')
plt.tight_layout()
plt.show()
| 1,651
| 35.711111
| 77
|
py
|
pycbc
|
pycbc-master/examples/distributions/list_distributions.py
|
from pycbc import distributions
# print all distribution names
print(distributions.distribs.keys())
| 100
| 24.25
| 36
|
py
|
pycbc
|
pycbc-master/examples/distributions/spin_examples.py
|
import matplotlib.pyplot as plt
import numpy
import pycbc.coordinates as co
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in
# units of pi so we use between 0 and 1
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py. Here we are using the
# Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_ bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution. In this
# case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=500000)
# Make spins with unit length for coordinate transformation below.
spin_mag = numpy.ndarray(shape=(500000), dtype=float)
for i in range(0,500000):
spin_mag[i] = 1.
# Use the pycbc.coordinates as co spherical_to_cartesian function to convert
# from spherical polar coordinates to cartesian coordinates
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Choose 50 bins for the histograms.
n_bins = 50
plt.figure(figsize=(10,10))
plt.subplot(2, 2, 1)
plt.hist(spinx, bins = n_bins)
plt.title('Spin x samples')
plt.subplot(2, 2, 2)
plt.hist(spiny, bins = n_bins)
plt.title('Spin y samples')
plt.subplot(2, 2, 3)
plt.hist(spinz, bins = n_bins)
plt.title('Spin z samples')
plt.tight_layout()
plt.show()
| 1,894
| 32.245614
| 78
|
py
|
pycbc
|
pycbc-master/examples/catalog/data.py
|
import matplotlib.pyplot as pp
import pycbc.catalog
m = pycbc.catalog.Merger("GW170817", source='gwtc-1')
fig, axs = pp.subplots(2, 1, sharex=True, sharey=True)
for ifo, ax in zip(["L1", "H1"], axs):
pp.sca(ax)
pp.title(ifo)
# Retreive data around the BNS merger
ts = m.strain(ifo).time_slice(m.time - 15, m.time + 6)
# Whiten the data with a 4s filter
white = ts.whiten(4, 4)
times, freqs, power = white.qtransform(.01, logfsteps=200,
qrange=(110, 110),
frange=(20, 512))
pp.pcolormesh(times, freqs, power**0.5, vmax=5)
pp.yscale('log')
pp.ylabel("Frequency (Hz)")
pp.xlabel("Time (s)")
pp.show()
| 716
| 26.576923
| 62
|
py
|
pycbc
|
pycbc-master/examples/catalog/stat.py
|
import matplotlib.pyplot as pp
import pycbc.catalog
c = pycbc.catalog.Catalog(source='gwtc-2')
mchirp, elow, ehigh = c.median1d('mchirp', return_errors=True)
spin = c.median1d('chi_eff')
pp.errorbar(mchirp, spin, xerr=[-elow, ehigh], fmt='o', markersize=7)
pp.xlabel('Chirp Mass')
pp.xscale('log')
pp.ylabel('Effective Spin')
pp.show()
| 339
| 23.285714
| 69
|
py
|
pycbc
|
pycbc-master/examples/catalog/what.py
|
import pycbc.catalog
c = pycbc.catalog.Catalog(source='gwtc-2')
# Names of mergers in the catalog
print(c.names)
# Approximate GPS time of the mergers
print([c[m].time for m in c])
| 184
| 17.5
| 42
|
py
|
pycbc
|
pycbc-master/examples/multi_inspiral/check_faceon_faceaway_trigs.py
|
#!/usr/bin/env python
# Read a pycbc_multi_inspiral HDF5 trigger file and check that it contains
# triggers compatible with mock GW170817-like injections
# 2022 Andrew Williamson, Tito Dal Canton
import sys
import logging
import h5py
import numpy as np
from pycbc import init_logging
init_logging(True)
gw170817_time = 1187008882
end_times = (np.arange(3) - 1) * 300 + gw170817_time
pols = ['standard', 'left', 'right', 'left+right']
refs = {
'standard': np.array([38.8, 18.4, 39.4]),
'left': np.array([23.5, 17.0, 38.9]),
'right': np.array([38.1, 17.1, 24.3]),
'left+right': np.array([38.1, 17.1, 38.9])
}
status = 0
for pol in pols:
with h5py.File(pol + '.hdf', 'r') as f:
snrs = [f['network/end_time_gc'][:], f['network/coherent_snr'][:]]
# search for compatible trigs
mask = np.logical_and(
abs(end_times - snrs[0]) < 0.1,
snrs[1] > 0.9 * refs[pol],
snrs[1] < 1.1 * refs[pol]
)
n = mask.sum()
result = 'PASS' if n == 3 else 'FAIL'
if result == 'FAIL':
status = 1
logging.info('"%s" polarization: %s (%d/3 triggers found)', pol, result, n)
sys.exit(status)
| 1,158
| 29.5
| 79
|
py
|
pycbc
|
pycbc-master/examples/multi_inspiral/check_gw170817_trigs.py
|
#!/usr/bin/env python
# Read a pycbc_multi_inspiral HDF5 trigger file and check that it contains
# triggers compatible with mock GW170817-like injections
# 2022 Andrew Williamson, Tito Dal Canton
import sys
import logging
import h5py
import numpy as np
from pycbc import init_logging
init_logging(True)
gw170817_time = 1187008882.43
status = 0
with h5py.File('GW170817_test_output.hdf', 'r') as f:
snrs = [
f['network/end_time_gc'][:],
f['network/coherent_snr'][:],
f['network/reweighted_snr'][:],
f['network/slide_id'][:]]
# search for compatible trigs
mask = (
(abs(gw170817_time - snrs[0]) < 0.1)
& (snrs[1] > 25)
& (snrs[2] > 25)
& (snrs[3] == 0)
)
n = mask.sum()
if n > 0:
result = 'PASS'
status = 0
else:
result = 'FAIL'
status = 1
logging.info(
'%s: GW170817 found with coherent SNR = %.2f; reweighted SNR %.2f', result,
snrs[1][mask], snrs[2][mask])
sys.exit(status)
| 957
| 23.564103
| 79
|
py
|
pycbc
|
pycbc-master/examples/filter/fir.py
|
# Apply an FIR filter. The algorithm is written for high performance so if you
# have a large number of taps, it will resort to a FFT based implementation
# under the hood.
import pycbc.types
import pycbc.filter.resample
# Reference time series
ts = pycbc.types.TimeSeries([-1, 1, -1, 1, -1], delta_t=1.0)
# May also be a numpy array
coeff = pycbc.types.Array([1.0, 0, 1.0])
ts_filtered = pycbc.filter.resample.lfilter(coeff, ts)
# If you want to have a zero phase filter provide a symmetric set of coefficients
# The time delay will be compensated for.
ts_filtered2 = pycbc.filter.resample.fir_zero_filter(coeff, ts)
| 624
| 30.25
| 81
|
py
|
pycbc
|
pycbc-master/examples/filter/snr.py
|
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
import pycbc.filter
import pycbc.waveform
# Generate some noise with an advanced ligo psd
flow = 30.0
delta_f = 1.0 / 16
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate 16 seconds of noise at 4096 Hz
delta_t = 1.0 / 4096
tsamples = int(16 / delta_t)
strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127)
stilde = strain.to_frequencyseries()
# Use a waveform as a matched filter
hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD',
mass1=25, mass2=25,
f_lower=flow, delta_f=stilde.delta_f)
hp.resize(len(stilde))
snr = pycbc.filter.matched_filter(hp, stilde, psd=psd,
low_frequency_cutoff=flow)
pp.plot(snr.sample_times, abs(snr))
pp.ylabel('signal-to-noise ratio')
pp.xlabel('time (s)')
pp.show()
| 949
| 26.941176
| 69
|
py
|
pycbc
|
pycbc-master/examples/filter/chisq.py
|
"""This example shows how to calculate the chi^2 discriminator described in
https://arxiv.org/abs/gr-qc/0405045, also known as the "power chi^2" or "Allen
chi^2" discriminator.
"""
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
import pycbc.waveform
import pycbc.vetoes
# Generate some noise with an advanced ligo psd
flow = 30.0
delta_f = 1.0 / 16
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate 16 seconds of noise at 4096 Hz
delta_t = 1.0 / 4096
tsamples = int(16 / delta_t)
strain = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127)
stilde = strain.to_frequencyseries()
# Calculate the power chisq time series
hp, hc = pycbc.waveform.get_fd_waveform(approximant='IMRPhenomD',
mass1=25, mass2=25,
f_lower=flow, delta_f=stilde.delta_f)
hp.resize(len(stilde))
num_bins = 16
chisq = pycbc.vetoes.power_chisq(hp, stilde, num_bins, psd,
low_frequency_cutoff=flow)
# convert to a reduced chisq
chisq /= (num_bins * 2) - 2
pp.plot(chisq.sample_times, chisq)
pp.ylabel('$\chi^2_r$')
pp.xlabel('time (s)')
pp.show()
| 1,198
| 27.547619
| 78
|
py
|
pycbc
|
pycbc-master/examples/filter/pass.py
|
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
import pycbc.filter
# Generate some noise with an advanced ligo psd
flow = 5.0
delta_f = 1.0 / 16
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate 1 seconds of noise at 4096 Hz
delta_t = 1.0 / 4096
tsamples = int(1 / delta_t)
ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127)
pp.plot(ts.sample_times, ts, label='Original')
# Suppress the low frequencies below 30 Hz
ts = pycbc.filter.highpass(ts, 30.0)
pp.plot(ts.sample_times, ts, label='Highpassed')
# Suppress the high frequencies
ts = pycbc.filter.lowpass_fir(ts, 1000.0, 8)
pp.plot(ts.sample_times, ts, label='Highpassed + Lowpassed')
pp.legend()
pp.ylabel('Strain')
pp.xlabel('Time (s)')
pp.show()
| 795
| 24.677419
| 65
|
py
|
pycbc
|
pycbc-master/examples/gw150914/audio.py
|
from pycbc.frame import read_frame
from pycbc.filter import highpass_fir, lowpass_fir
from pycbc.psd import welch, interpolate
from pycbc.types import TimeSeries
try:
from urllib.request import urlretrieve
except ImportError: # python < 3
from urllib import urlretrieve
# Read data and remove low frequency content
fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf'
url = "https://www.gwosc.org/GW150914data/" + fname
urlretrieve(url, filename=fname)
h1 = highpass_fir(read_frame(fname, 'H1:LOSC-STRAIN'), 15.0, 8)
# Calculate the noise spectrum and whiten
psd = interpolate(welch(h1), 1.0 / 32)
white_strain = (h1.to_frequencyseries() / psd ** 0.5 * psd.delta_f).to_timeseries()
# remove some of the high and low frequencies
smooth = highpass_fir(white_strain, 25, 8)
smooth = lowpass_fir(white_strain, 250, 8)
#strech out and shift the frequency upwards to aid human hearing
fdata = smooth.to_frequencyseries()
fdata.roll(int(1200 / fdata.delta_f))
smooth = TimeSeries(fdata.to_timeseries(), delta_t=1.0/1024)
#Take slice around signal
smooth = smooth[len(smooth)//2 - 1500:len(smooth)//2 + 3000]
smooth.save_to_wav('gw150914_h1_chirp.wav')
| 1,150
| 33.878788
| 83
|
py
|
pycbc
|
pycbc-master/examples/gw150914/gw150914_h1_snr.py
|
import matplotlib.pyplot as pp
from urllib.request import urlretrieve
from pycbc.frame import read_frame
from pycbc.filter import highpass_fir, matched_filter
from pycbc.waveform import get_fd_waveform
from pycbc.psd import welch, interpolate
# Read data and remove low frequency content
fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf'
url = "https://www.gwosc.org/GW150914data/" + fname
urlretrieve(url, filename=fname)
h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN')
h1 = highpass_fir(h1, 15, 8)
# Calculate the noise spectrum
psd = interpolate(welch(h1), 1.0 / h1.duration)
# Generate a template to filter with
hp, hc = get_fd_waveform(approximant="IMRPhenomD", mass1=40, mass2=32,
f_lower=20, delta_f=1.0/h1.duration)
hp.resize(len(h1) // 2 + 1)
# Calculate the complex (two-phase SNR)
snr = matched_filter(hp, h1, psd=psd, low_frequency_cutoff=20.0)
# Remove regions corrupted by filter wraparound
snr = snr[len(snr) // 4: len(snr) * 3 // 4]
pp.plot(snr.sample_times, abs(snr))
pp.ylabel('signal-to-noise')
pp.xlabel('GPS Time (s)')
pp.show()
| 1,097
| 31.294118
| 70
|
py
|
pycbc
|
pycbc-master/examples/gw150914/gw150914_shape.py
|
import matplotlib.pyplot as pp
from pycbc.filter import highpass_fir, lowpass_fir
from pycbc.psd import welch, interpolate
from pycbc.catalog import Merger
for ifo in ['H1', 'L1']:
# Read data and remove low frequency content
h1 = Merger("GW150914").strain(ifo)
h1 = highpass_fir(h1, 15, 8)
# Calculate the noise spectrum
psd = interpolate(welch(h1), 1.0 / h1.duration)
# whiten
white_strain = (h1.to_frequencyseries() / psd ** 0.5).to_timeseries()
# remove some of the high and low
smooth = highpass_fir(white_strain, 35, 8)
smooth = lowpass_fir(smooth, 300, 8)
# time shift and flip L1
if ifo == 'L1':
smooth *= -1
smooth.roll(int(.007 / smooth.delta_t))
pp.plot(smooth.sample_times, smooth, label=ifo)
pp.legend()
pp.xlim(1126259462.21, 1126259462.45)
pp.ylim(-150, 150)
pp.ylabel('Smoothed-Whitened Strain')
pp.grid()
pp.xlabel('GPS Time (s)')
pp.show()
| 934
| 24.972222
| 73
|
py
|
pycbc
|
pycbc-master/examples/dataquality/hwinj.py
|
"""This example shows how to determine when a CBC hardware injection is present
in the data from a detector.
"""
import matplotlib.pyplot as pp
from pycbc import dq
start_time = 1126051217
end_time = start_time + 10000000
# Get times that the Livingston detector has CBC injections into the data
segs = dq.query_flag('L1', 'CBC_HW_INJ', start_time, end_time)
pp.figure(figsize=[10, 2])
for seg in segs:
start, end = seg
pp.axvspan(start, end, color='blue')
pp.xlabel('Time (s)')
pp.show()
| 504
| 20.956522
| 79
|
py
|
pycbc
|
pycbc-master/examples/dataquality/on.py
|
"""This example shows how to determine when a detector is active."""
import matplotlib.pyplot as pp
from pycbc import dq
from pycbc.results import ifo_color
start_time = 1126051217
end_time = start_time + 100000
# Get times that the Hanford detector has data
hsegs = dq.query_flag('H1', 'DATA', start_time, end_time)
# Get times that the Livingston detector has data
lsegs = dq.query_flag('L1', 'DATA', start_time, end_time)
pp.figure(figsize=[10,2])
for seg in lsegs:
start, end = seg
pp.axvspan(start, end, color=ifo_color('L1'), ymin=0.1, ymax=0.4)
for seg in hsegs:
start, end = seg
pp.axvspan(start, end, color=ifo_color('H1'), ymin=0.6, ymax=0.9)
pp.xlabel('Time (s)')
pp.show()
| 709
| 24.357143
| 69
|
py
|
pycbc
|
pycbc-master/examples/live/make_singles_fits_file.py
|
"""
This script will make a valid singles-fits file for use in the
pycbc_live CI tests. It doesn't have much physical meaning,
but will give broadly representative numbers for singles.
"""
import h5py
import numpy as np
f = h5py.File('single_trigger_fits.hdf','w')
# Some numbers to design the output
# These are loosely based on the O3a trigger fits file
n_days = 30
n_bins = 5
max_duration = 150.
min_duration = 6.
duty_cycle = 0.7
alpha = 4.
daily_counts_per_bin = 500.
# Set attributes to be something appropriate
f.attrs['start_date'] = 0
f.attrs['end_date'] = n_days
f.attrs['fit_threshold'] = 6.5
f['bins_edges'] = np.logspace(np.log10(min_duration),
np.log10(max_duration),
n_bins + 1)
# Create the fits groups for each ifo
for ifo in ['H1', 'L1', 'V1']:
f.create_group(ifo)
ifo_group = f[ifo]
# Group attributes are set sensibly
ifo_group.attrs['live_time'] = np.round(n_days * 0.7 * 86400.)
ifo_group.attrs['mean_alpha'] = alpha
ifo_group.attrs['total_counts'] = daily_counts_per_bin * n_days * n_bins
# Make daily fits datasets, this doesn't atually matter for the test, but
# means that the file format matches in case of future requirements
ifo_group.create_group('daily_fits')
daily_group = ifo_group['daily_fits']
for bin_no in range(n_bins):
key = "bin_%d" % bin_no
daily_group.create_group(key)
bin_group = daily_group[key]
bin_group['date'] = np.arange(n_days)
bin_group['counts'] = np.array([daily_counts_per_bin] * n_days)
bin_group['fit_coeff'] = np.array([alpha] * n_days)
# Again, not used for current tests, but keeps file format the same
ifo_group.create_group('fixed')
fixed_group = ifo_group['fixed']
fixed_group['fit_coeff'] = np.array([0.] * n_bins)
fixed_group['counts'] = np.array([1.] * n_bins)
# As the daily fit are all the same, the mean and 95th %ile values
# are the same
for k in ['conservative', 'mean']:
ifo_group.create_group(k)
k_group = ifo_group[k]
k_group['fit_coeff'] = np.array([alpha] * n_bins)
k_group['counts'] = np.array([daily_counts_per_bin * n_days] * n_bins)
| 2,238
| 33.984375
| 78
|
py
|
pycbc
|
pycbc-master/examples/live/check_results.py
|
#!/usr/bin/env python
import sys
import argparse
import glob
import logging as log
import numpy as np
import h5py
import pycbc
from pycbc.io import FieldArray
from pycbc.io.ligolw import LIGOLWContentHandler
from ligo.lw.utils import load_filename as load_xml_doc
from ligo.lw import lsctables
from pycbc import conversions as conv
def close(a, b, c):
return abs(a - b) <= c
def check_single_results(args):
single_fail = False
with h5py.File(args.bank, 'r') as bankf:
temp_mass1 = bankf['mass1'][:]
temp_mass2 = bankf['mass2'][:]
temp_s1z = bankf['spin1z'][:]
temp_s2z = bankf['spin2z'][:]
detectors = set(args.detectors)
detectors_with_trigs = set()
trig_paths = sorted(glob.glob('output/????_??_??/*.hdf'))
for trigfp in trig_paths:
with h5py.File(trigfp, 'r') as trigf:
for detector in detectors:
if detector not in trigf:
continue
group = trigf[detector]
if 'psd' not in group:
continue
# check that PSD is sane
psd = group['psd'][:] / pycbc.DYN_RANGE_FAC ** 2
psd_df = group['psd'].attrs['delta_f']
psd_f = np.arange(len(psd)) * psd_df
psd_epoch = group['psd'].attrs['epoch']
in_band_asd = psd[psd_f > args.f_min] ** 0.5
if len(in_band_asd) == 0 or (in_band_asd < 1e-24).any() \
or (in_band_asd > 1e-20).any() \
or not np.isfinite(in_band_asd).all() \
or psd_epoch < args.gps_start or psd_epoch > args.gps_end:
log.info('Invalid PSD in %s %s', trigfp, detector)
single_fail = True
if 'snr' not in group or len(group['snr']) == 0:
continue
detectors_with_trigs.add(detector)
# check that SNR is non-negative and finite
snr = group['snr'][:]
if (snr < 0).any() or not np.isfinite(snr).all():
log.error('Invalid SNR in %s %s', trigfp, detector)
single_fail = True
# check that Allen chi^2 is non-negative and finite
chisq = group['chisq'][:]
chisq_dof = group['chisq_dof'][:]
if (chisq < 0).any() or not np.isfinite(chisq).all() \
or (chisq_dof < 0).any() or not np.isfinite(chisq_dof).all():
log.error('Invalid Allen chi^2 in %s %s', trigfp, detector)
single_fail = True
# check that merger time is within the simulated time range
end_time = group['end_time'][:]
if (end_time < args.gps_start).any() or (end_time > args.gps_end).any():
log.error('Invalid merger time in %s %s', trigfp, detector)
single_fail = True
# check that template parameters are consistent with the bank
trig_mass1 = group['mass1'][:]
trig_mass2 = group['mass2'][:]
trig_s1z = group['spin1z'][:]
trig_s2z = group['spin2z'][:]
trig_temp_id = group['template_id'][:]
test_mass1 = close(temp_mass1[trig_temp_id], trig_mass1, 1e-7)
test_mass2 = close(temp_mass2[trig_temp_id], trig_mass2, 1e-7)
test_s1z = close(temp_s1z[trig_temp_id], trig_s1z, 1e-7)
test_s2z = close(temp_s2z[trig_temp_id], trig_s2z, 1e-7)
test_all = np.logical_and.reduce((test_mass1, test_mass2,
test_s1z, test_s2z))
if not test_all.all():
log.error('Invalid template parameters in %s %s',
trigfp, detector)
single_fail = True
# check that triggers were produced in all detectors
missing = sorted(detectors - detectors_with_trigs)
if missing:
log.error('No triggers found in %s', ', '.join(missing))
single_fail = True
if single_fail:
log.error('Single Trigger Test Failed')
return single_fail
def check_found_events(args):
found_fail = False
# read injections
with h5py.File(args.injections, 'r') as injfile:
inj_mass1 = injfile['mass1'][:]
inj_mass2 = injfile['mass2'][:]
inj_spin1z = injfile['spin1z'][:]
inj_spin2z = injfile['spin2z'][:]
inj_time = injfile['tc'][:]
# gather found triggers
found_trig_paths = sorted(glob.glob('output/????_??_??/candidate_*/*.xml.gz'))
n_found = len(found_trig_paths)
if n_found == 0:
log.error('No triggers detected')
found_fail = True
elif n_found >= 10:
log.error('Too many triggers detected')
found_fail = True
else:
log.info('%d found trigger(s) detected', n_found)
# create field array to store properties of triggers
dtype = [('mass1', float), ('mass2', float),
('spin1z', float), ('spin2z', float),
('tc', float), ('net_snr', float),
('ifar', float)]
trig_props = FieldArray(n_found, dtype=dtype)
# store properties of found triggers
for x, ctrigfp in enumerate(found_trig_paths):
log.info('Checking trigger %s', ctrigfp)
xmldoc = load_xml_doc(
ctrigfp, False, contenthandler=LIGOLWContentHandler)
si_table = lsctables.SnglInspiralTable.get_table(xmldoc)
ci_table = lsctables.CoincInspiralTable.get_table(xmldoc)
trig_props['tc'][x] = si_table[0].end
trig_props['mass1'][x] = si_table[0].mass1
trig_props['mass2'][x] = si_table[0].mass2
trig_props['spin1z'][x] = si_table[0].spin1z
trig_props['spin2z'][x] = si_table[0].spin2z
trig_props['ifar'][x] = conv.sec_to_year(1 / ci_table[0].combined_far)
snr_list = si_table.getColumnByName('snr').asarray()
trig_props['net_snr'][x] = sum(snr_list ** 2) ** 0.5
log.info('Single-detector SNRs: %s', snr_list)
log.info('Network SNR: %f', trig_props['net_snr'][x])
log.info('IFAR: %f', trig_props['ifar'][x])
log.info('Merger time: %f', trig_props['tc'][x])
log.info('Mass 1: %f', trig_props['mass1'][x])
log.info('Mass 2: %f', trig_props['mass2'][x])
log.info('Spin1z: %f', trig_props['spin1z'][x])
log.info('Spin2z: %f', trig_props['spin2z'][x])
# check if injections match trigger params
for i in range(len(inj_mass1)):
has_match = False
for j in range(n_found):
# FIXME should calculate the optimal SNRs of the injections
# and use those for checking net_snr
if (close(inj_time[i], trig_props['tc'][j], 1.0)
and close(inj_mass1[i], trig_props['mass1'][j], 1e-5)
and close(inj_mass2[i], trig_props['mass2'][j], 1e-5)
and close(inj_spin1z[i], trig_props['spin1z'][j], 1e-5)
and close(inj_spin2z[i], trig_props['spin2z'][j], 1e-5)
and close(15.0, trig_props['net_snr'][j], 2.0)):
has_match = True
break
if not has_match:
found_fail = True
log.error('Injection %i was missed', i)
if found_fail:
log.error('Found Trigger Test Failed')
return found_fail
parser = argparse.ArgumentParser()
parser.add_argument('--gps-start', type=float, required=True)
parser.add_argument('--gps-end', type=float, required=True)
parser.add_argument('--f-min', type=float, required=True)
parser.add_argument('--bank', type=str, required=True)
parser.add_argument('--injections', type=str, required=True)
parser.add_argument('--detectors', type=str, required=True, nargs='+')
args = parser.parse_args()
log.basicConfig(level=log.INFO, format='%(asctime)s %(message)s')
single_fail = check_single_results(args)
found_fail = check_found_events(args)
fail = single_fail or found_fail
if fail:
log.error('Test Failed')
else:
log.info('Test Passed')
sys.exit(1 if fail else 0)
| 8,211
| 38.104762
| 88
|
py
|
pycbc
|
pycbc-master/examples/live/generate_injections.py
|
#!/usr/bin/env python
import sys
import numpy as np
from pycbc.io import FieldArray
from pycbc.inject import InjectionSet
dtype = [('mass1', float), ('mass2', float),
('spin1z', float), ('spin2z', float),
('tc', float), ('distance', float),
('ra', float), ('dec', float),
('approximant', 'S32')]
static_params = {'f_lower': 17.,
'f_ref': 17.,
'taper': 'start',
'inclination': 0.,
'coa_phase': 0.,
'polarization': 0.}
samples = FieldArray(3, dtype=dtype)
# masses and spins are intended to match the highest
# and lowest mass templates in the template bank
# Last injection is designed to be found as an EM-bright single
samples['mass1'] = [290.929321, 1.1331687, 2.2756491]
samples['mass2'] = [3.6755455, 1.010624, 1.1077247]
samples['spin1z'] = [0.9934847, 0.029544285, -0.59105825]
samples['spin2z'] = [0.92713535, 0.020993788, 0.047548451]
# distance and sky locations for coincs to have network SNRs ~15
# and for single to pass SNR cuts
samples['tc'] = [1272790100.1, 1272790260.1, 1272790490.2]
samples['distance'] = [178., 79., 47.]
samples['ra'] = [np.deg2rad(45), np.deg2rad(10), np.deg2rad(10)]
samples['dec'] = [np.deg2rad(45), np.deg2rad(-45), np.deg2rad(-45)]
samples['approximant'] = ['SEOBNRv4_opt', 'SpinTaylorT4', 'SpinTaylorT4']
InjectionSet.write('injections.hdf', samples, static_args=static_params,
injtype='cbc', cmd=" ".join(sys.argv))
| 1,508
| 34.093023
| 73
|
py
|
pycbc
|
pycbc-master/examples/search/check_job.py
|
import subprocess
import time
time.sleep(30)
while 1:
time.sleep(60)
subprocess.run(["condor_q", "-bet"])
subprocess.run(["pegasus-status", "submitdir/work/"])
out = subprocess.check_output(["pegasus-status", "submitdir/work/"])
out = str(out)
lines = out.split('\\n')
for i in range(len(lines)):
if 'UNREADY' in lines[i]:
status_line = i + 1
break
stats = lines[status_line].split(' ')
stats = [s for s in stats if s != '']
unready = int(stats[0])
ready = int(stats[1])
pre = int(stats[2])
queued = int(stats[3])
post = int(stats[4])
done = int(stats[5])
failed = int(stats[6])
finished = (unready == 0 and ready == 0 and queued == 0 and post == 0)
passed = finished and failed == 0
if passed:
print("workflow has completed successfully")
time.sleep(30)
exit(0)
if failed != 0:
print("workflow has a failed job, ending now")
subprocess.run(["bash", "./stop"])
# Need to wait here to make sure it fully exits before uploading logs!
time.sleep(30)
exit(1)
| 1,136
| 26.731707
| 78
|
py
|
pycbc
|
pycbc-master/examples/workflow/generic/multilevel_subworkflow_data/simple.py
|
../simple_subworkflow_data/simple.py
| 36
| 36
| 36
|
py
|
pycbc
|
pycbc-master/examples/workflow/generic/simple_subworkflow_data/simple.py
|
""" A minimal pycbc workflow example """
import argparse
import pycbc
import pycbc.workflow as wf
import os
pycbc.init_logging(True)
parser = argparse.ArgumentParser(description=__doc__[1:])
parser.add_argument("--multilevel", action='store_true', default=False)
wf.add_workflow_command_line_group(parser)
wf.add_workflow_settings_cli(parser)
args = parser.parse_args()
input_file = wf.resolve_url_to_file("test_input.txt")
input_file.add_pfn(os.path.abspath('./test_input.txt'), 'local')
cont = wf.Workflow(args, 'cont')
sub1 = wf.Workflow(args, 'sub1')
sub1_1 = wf.Workflow(args, 'sub1_1')
sub2 = wf.Workflow(args, 'sub2')
exe1 = wf.Executable(cont.cp, 'exe1')
SUBSUB = args.multilevel
# Subworkflow 1: generate file that will be needed later
# PATH1: generate that input in a sub-sub workflow
if SUBSUB:
# Subworkflow 1: sub-subworkflow
node = exe1.create_node()
wf1_out_file = wf.File.from_path(os.path.abspath("test_output.txt.2"))
node.add_input_arg(input_file)
node.add_output_arg(wf1_out_file)
sub1_1 += node
sub1 += sub1_1
#PATH2: generate that input within a sub-workflow
else:
node = exe1.create_node()
wf1_out_file = wf.File.from_path(os.path.abspath("test_output.txt.2"))
node.add_input_arg(input_file)
node.add_output_arg(wf1_out_file)
sub1 += node
# Subworkflow 2
# TEST GOAL: Job within subworkflow gets the input file produce in
# some external workflow
node2 = exe1.create_node()
node2.add_input_arg(wf1_out_file)
node2.add_output_arg(wf.File.from_path(os.path.abspath("test_output.txt.3")))
sub2 += node2
# Regular job in top-level workflow
# Test GOAL: job *directly* in workflow gets file produced by subworkflow in
# the same workflow
node2 = exe1.create_node()
node2.add_input_arg(wf1_out_file)
node2.add_output_arg(wf.File.from_path(os.path.abspath("test_output.txt.4")))
cont += node2
cont += sub1
cont += sub2
cont.save()
| 1,914
| 28.015152
| 77
|
py
|
pycbc
|
pycbc-master/examples/workflow/dayhopecheck/dayhopecheck.py
|
#!/usr/bin/env python
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Program for determining the following segment lists and dumping these to a
single summary XML file:
1) Science time, according to the segment database (X1:CBC_DAYHOPE_SCIENCE)
2) Science time - CAT_1, using a supplied veto-definer file to define CAT_1.
If you define a minimum segment length, this is also removed here.
(X1:CBC_DAYHOPE_SCIENCE_OK)
3) SCIENCE_OK - times not available for datafind. Depending on how you set the
.ini file options this can include SCIENCE_OK times that are not found on
the local LDR server, and files that the local LDR server returns but are
not actually visible (using os.path.isfile)
(X1:CBC_DAYHOPE_SCIENCE_AVAILABLE)
4) Time analysable by daily [i,a]hope. This will compute the times that the
daily CBC analysis can analyse, which must be a subset of SCIENCE_AVAILABLE.
The options given in the config file tell the code what data can be
analysed by the daily analysis. (X1:CBC_DAYHOPE_ANALYSABLE)
"""
import pycbc
import pycbc.version
__author__ = "Ian Harry <ian.harry@astro.cf.ac.uk>"
__version__ = pycbc.version.git_verbose_msg
__date__ = pycbc.version.date
__program__ = "dayhopecheck"
import os
import logging
import argparse
from ligo import segments
import pycbc.workflow as _workflow
from ligo.lw import ligolw
from ligo.lw import utils as ligolw_utils
from ligo.lw.utils import process as ligolw_process
from glue.segmentdb import segmentdb_utils
logging.basicConfig(format='%(asctime)s:%(levelname)s : %(message)s', \
level=logging.INFO,datefmt='%I:%M:%S')
# command line options
_desc = __doc__[1:]
parser = argparse.ArgumentParser(description=_desc)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("-d", "--output-dir", required=True,\
help="Path to output directory.")
_workflow.add_workflow_command_line_group(parser)
args = parser.parse_args()
workflow = _workflow.Workflow(args)
currDir = os.path.abspath(args.output_dir)
segDir = os.path.join(currDir,"segments")
dfDir = os.path.join(currDir,"datafind")
print("BEGIN BY GENERATING SCIENCE AND CAT_X VETOES")
def segment_report(sSegs):
fullLen = 0
fullNum = 0
shortLen = 0
shortNum = 0
longLen = 0
longNum = 0
for ifo in sSegs.keys():
for seg in sSegs[ifo]:
fullLen += abs(seg)
fullNum += 1
if abs(seg) > 500:
shortLen+=abs(seg)
shortNum+=1
if abs(seg) > 2000:
longLen+=abs(seg)
longNum+=1
print("For ifo %s there is %d seconds of data in %d segments, %d seconds (%d unique segments) in segments longer than 500s and %d seconds (%d unique segments) longer than 2000s." %(ifo, fullLen, fullNum, shortLen, shortNum, longLen, longNum))
scienceSegs, segsList = _workflow.setup_segment_generation(workflow, segDir)
segment_report(scienceSegs)
print()
print()
print("RUNNING DATAFIND")
datafinds, scienceSegs = _workflow.setup_datafind_workflow(workflow, scienceSegs,
dfDir, segsList)
# This is needed to know what times will be analysed by daily ahope
# Template bank stuff
banks = _workflow.setup_tmpltbank_workflow(workflow, scienceSegs, datafinds,
dfDir)
# Do matched-filtering
insps = _workflow.setup_matchedfltr_workflow(workflow, scienceSegs, datafinds,
banks, dfDir)
# Now construct the summary XML file
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
# FIXME: PROGRAM NAME and dictionary of opts should be variables defined up above
proc_id = ligolw_process.register_to_xmldoc(outdoc, 'dayhopetest',
vars(args) ).process_id
for ifo in workflow.ifos:
# Lets get the segment lists we need
segIfoFiles = segsList.find_output_with_ifo(ifo)
# SCIENCE
sciSegFile = segIfoFiles.find_output_with_tag('SCIENCE')
assert(len(sciSegFile) == 1)
sciSegFile = sciSegFile[0]
sciSegs = sciSegFile.segmentList
# SCIENCE_OK
sciokSegFile = segIfoFiles.find_output_with_tag('SCIENCE_OK')
assert(len(sciokSegFile) == 1)
sciokSegFile = sciokSegFile[0]
sciokSegs = sciokSegFile.segmentList
# SCIENCE_AVAILABLE
sciavailableSegFile = segIfoFiles.find_output_with_tag('SCIENCE_AVAILABLE')
assert(len(sciavailableSegFile) == 1)
sciavailableSegFile = sciavailableSegFile[0]
sciavailableSegs = sciavailableSegFile.segmentList
# ANALYSABLE - This one needs to come from inspiral outs
analysableSegs = insps.get_times_covered_by_files()
# And add these to the output file
# Start with the segment summary
summSegs = segments.segmentlist([workflow.analysis_time])
sci_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id, ifo,
"CBC_DAYHOPE_SCIENCE", 0)
sciok_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id, ifo,
"CBC_DAYHOPE_SCIENCE_OK", 0)
sciavailable_def_id = segmentdb_utils.add_to_segment_definer(outdoc,
proc_id, ifo, "CBC_DAYHOPE_SCIENCE_AVAILABLE", 0)
analysable_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id,
ifo, "CBC_DAYHOPE_ANALYSABLE", 0)
segmentdb_utils.add_to_segment(outdoc, proc_id, sci_def_id, sciSegs)
segmentdb_utils.add_to_segment(outdoc, proc_id, sciok_def_id, sciokSegs)
segmentdb_utils.add_to_segment(outdoc, proc_id, sciavailable_def_id,
sciavailableSegs)
segmentdb_utils.add_to_segment(outdoc, proc_id, analysable_def_id,
analysableSegs)
segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sci_def_id,
summSegs, comment='')
segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sciok_def_id,
summSegs, comment='')
segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sciavailable_def_id,
summSegs, comment='')
segmentdb_utils.add_to_segment_summary(outdoc, proc_id, analysable_def_id,
summSegs, comment='')
ligolw_utils.write_filename(outdoc, "SUMMARY.xml")
| 7,198
| 40.373563
| 250
|
py
|
pycbc
|
pycbc-master/examples/workflow/data_checker/daily_test.py
|
#!/usr/bin/env python
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Description of daily_test
"""
import pycbc
import pycbc.version
__author__ = "Ian Harry <ian.harry@astro.cf.ac.uk>"
__version__ = pycbc.version.git_verbose_msg
__date__ = pycbc.version.date
__program__ = "daily_test"
import os
import logging
import argparse
import pycbc.workflow as _workflow
logging.basicConfig(format='%(asctime)s:%(levelname)s : %(message)s', \
level=logging.INFO,datefmt='%I:%M:%S')
logger = logging.getLogger()
# command line options
_desc = __doc__[1:]
parser = argparse.ArgumentParser(description=_desc)
parser.add_argument('--version', action='version', version=__version__)
_workflow.add_workflow_command_line_group(parser)
args = parser.parse_args()
workflow = _workflow.Workflow(args, 'daily_check')
currDir = os.getcwd()
segDir = os.path.join(currDir,"segments")
dfDirSYR = os.path.join(currDir,"datafindSYR")
dfDirCIT = os.path.join(currDir,"datafindCIT")
dfDirLHO = os.path.join(currDir,"datafindLHO")
dfDirLLO = os.path.join(currDir,"datafindLLO")
dfDirUWM = os.path.join(currDir,"datafindUWM")
print("BEGIN BY GENERATING SCIENCE AND CAT_X VETOES")
def segment_report(sSegs):
fullLen = 0
fullNum = 0
shortLen = 0
shortNum = 0
longLen = 0
longNum = 0
for ifo in sSegs.keys():
for seg in sSegs[ifo]:
fullLen += abs(seg)
fullNum += 1
if abs(seg) > 500:
shortLen+=abs(seg)
shortNum+=1
if abs(seg) > 2000:
longLen+=abs(seg)
longNum+=1
print("For ifo %s there is %d seconds of data in %d segments, %d seconds (%d unique segments) in segments longer than 500s and %d seconds (%d unique segments) longer than 2000s." %(ifo, fullLen, fullNum, shortLen, shortNum, longLen, longNum))
scienceSegs, segsList = _workflow.setup_segment_generation(workflow, segDir)
segment_report(scienceSegs)
print("STARTING DF")
print()
# Start with SYR comparison
# FIXME: Used to use deecopy here, but now that seems to fail so repeating
# segment query calls with logging off. This may be slow!
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
print("RUNNING DATAFIND FOR SYR")
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirSYR, segsList, tag="SYR")
segment_report(scienceSegsS)
print()
print()
print("RUNNING DATAFIND FOR CIT")
logger.disabled = True
scienceSegsC, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsC = _workflow.setup_datafind_workflow(workflow, scienceSegsC,
dfDirCIT, segsList, tag="CIT")
segment_report(scienceSegsC)
print("Frames present a SYR and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at SYR:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at SYR" %(ifo))
print()
# Next do LHO comparison
print()
print("RUNNING DATAFIND FOR LHO")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirLHO, segsList, tag="LHO")
segment_report(scienceSegsS)
print("Frames present at LHO and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at LHO:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at LHO" %(ifo))
print()
# Next do LLO comparison
print()
print("RUNNING DATAFIND FOR LLO")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirLLO, segsList, tag="LLO")
segment_report(scienceSegsS)
print("Frames present at LLO and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at LLO:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at LLO" %(ifo))
print()
# Next do UWM comparison
print()
print("RUNNING DATAFIND FOR UWM")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirUWM, segsList, tag="UWM")
segment_report(scienceSegsS)
print("Frames present at UWM and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at UWM:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at UWM" %(ifo))
print()
| 6,801
| 30.059361
| 250
|
py
|
pycbc
|
pycbc-master/examples/workflow/data_checker/get_data_example.py
|
#!/usr/bin/env python
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
An example of how to query science + CAT_X segments and do datafind with pycbc.workflow.
"""
import pycbc
import pycbc.version
__author__ = "Ian Harry <ian.harry@astro.cf.ac.uk>"
__version__ = pycbc.version.git_verbose_msg
__date__ = pycbc.version.date
__program__ = "get_data_example"
import os
import argparse
import logging
import pycbc.workflow as _workflow
logging.basicConfig(format='%(asctime)s:%(levelname)s : %(message)s',
level=logging.INFO, datefmt='%I:%M:%S')
# command line options
_desc = __doc__[1:]
parser = argparse.ArgumentParser(description=_desc)
parser.add_argument('--version', action='version', version=__version__)
_workflow.add_workflow_command_line_group(parser)
args = parser.parse_args()
workflow = _workflow.Workflow(args)
currDir = os.getcwd()
segDir = os.path.join(currDir,"segments")
if not os.path.exists(segDir+'/logs'):
os.makedirs(segDir+'/logs')
dfDir = os.path.join(currDir,"datafind")
if not os.path.exists(dfDir+'/logs'):
os.makedirs(dfDir+'/logs')
scienceSegs, segsList = _workflow.setup_segment_generation(workflow, segDir)
datafinds, scienceSegs = _workflow.setup_datafind_workflow(workflow, scienceSegs,
dfDir, segsList)
# scienceSegs is a ligo.segment.segmentlist of the times you should analyse.
# It contains science times, that are present on disk, with CAT_1 times
# removed.
| 2,162
| 33.887097
| 88
|
py
|
pycbc
|
pycbc-master/examples/inference/list_parameters.py
|
from pycbc.waveform import parameters
# print base parameters for CBC waveform
print(parameters.fd_waveform_params + parameters.location_params + \
parameters.calibration_params)
| 183
| 35.8
| 68
|
py
|
pycbc
|
pycbc-master/examples/inference/lisa_smbhb_ldc/advanced_plot.py
|
import subprocess
import pickle
import numpy as np
from pycbc.conversions import q_from_mass1_mass2, mchirp_from_mass1_mass2
def spin_ldc2pycbc(mag, pol):
return mag*np.cos(pol)
def plt(index):
with open('./MBHB_params_v2_LISA_frame.pkl', 'rb') as f:
params_true_all = pickle.load(f)
p_index = index
params_true = params_true_all[p_index]
print(params_true)
modes = [(2,2)]
q = q_from_mass1_mass2(params_true['Mass1'], params_true['Mass2'])
mchirp = mchirp_from_mass1_mass2(params_true['Mass1'],params_true['Mass2'])
params = {'approximant': 'BBHX_PhenomD',
'mass1': params_true['Mass1'],
'mass2': params_true['Mass2'],
'inclination': params_true['Inclination'],
'tc_lisa': params_true['CoalescenceTime_LISA'],
'polarization_lisa': params_true['Polarization_LISA'],
'spin1z': spin_ldc2pycbc(params_true['Spin1'], params_true['PolarAngleOfSpin1']),
'spin2z': spin_ldc2pycbc(params_true['Spin2'], params_true['PolarAngleOfSpin2']),
'coa_phase': params_true['PhaseAtCoalescence'],
'distance': params_true['Distance'],
'eclipticlatitude_lisa': params_true['EclipticLatitude_LISA'],
'eclipticlongitude_lisa': params_true['EclipticLongitude_LISA'],
'mchirp': mchirp,
'q': q,
'mode_array': modes
}
plot_code = f"""
pycbc_inference_plot_posterior \
--input-file lisa_smbhb_ldc_pe.hdf \
--output-file lisa_smbhb_mass_tc_{p_index}.png \
--z-arg snr --plot-scatter --plot-marginal \
--plot-contours --contour-color black \
--parameters \
mass1_from_mchirp_q(mchirp,q):mass1 \
mass2_from_mchirp_q(mchirp,q):mass2 \
tc \
--expected-parameters \
mass1_from_mchirp_q(mchirp,q):{params['mass1']} \
mass2_from_mchirp_q(mchirp,q):{params['mass2']} \
tc:{params['tc_lisa']} \
"""
return plot_code
# The index of first SMBHB in LDC Sangria (0-14) is 0.
p = [0]
for i in p:
process = subprocess.Popen(plt(i).split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print('rel{} image created'.format(i))
| 2,428
| 36.369231
| 95
|
py
|
pycbc
|
pycbc-master/examples/waveform/what_waveform.py
|
from pycbc.waveform import td_approximants, fd_approximants
# List of td approximants that are available
print(td_approximants())
# List of fd approximants that are currently available
print(fd_approximants())
# Note that these functions only print what is available for your current
# processing context. If a waveform is implemented in CUDA or OpenCL, it will
# only be listed when running under a CUDA or OpenCL Scheme.
| 426
| 34.583333
| 77
|
py
|
pycbc
|
pycbc-master/examples/waveform/plot_waveform.py
|
import matplotlib.pyplot as pp
from pycbc.waveform import get_td_waveform
for apx in ['SEOBNRv4', 'IMRPhenomD']:
hp, hc = get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
spin1z=0.9,
delta_t=1.0/4096,
f_lower=40)
pp.plot(hp.sample_times, hp, label=apx)
pp.ylabel('Strain')
pp.xlabel('Time (s)')
pp.legend()
pp.show()
| 498
| 25.263158
| 50
|
py
|
pycbc
|
pycbc-master/examples/waveform/plot_freq.py
|
import matplotlib.pyplot as pp
from pycbc import waveform
for phase_order in [2, 3, 4, 5, 6, 7]:
hp, hc = waveform.get_td_waveform(approximant='SpinTaylorT4',
mass1=10, mass2=10,
phase_order=phase_order,
delta_t=1.0/4096,
f_lower=100)
hp, hc = hp.trim_zeros(), hc.trim_zeros()
amp = waveform.utils.amplitude_from_polarizations(hp, hc)
f = waveform.utils.frequency_from_polarizations(hp, hc)
pp.plot(f.sample_times, f, label="PN Order = %s" % phase_order)
pp.ylabel('Frequency (Hz)')
pp.xlabel('Time (s)')
pp.legend(loc='upper left')
pp.show()
| 700
| 30.863636
| 67
|
py
|
pycbc
|
pycbc-master/examples/waveform/add_waveform.py
|
import numpy
import matplotlib.pyplot as pp
import pycbc.waveform
from pycbc.types import TimeSeries
def test_waveform(**args):
flow = args['f_lower'] # Required parameter
dt = args['delta_t'] # Required parameter
fpeak = args['fpeak'] # A new parameter for my model
t = numpy.arange(0, 10, dt)
f = t/t.max() * (fpeak - flow) + flow
a = t
wf = numpy.exp(2.0j * numpy.pi * f * t) * a
# Return product should be a pycbc time series in this case for
# each GW polarization
#
#
# Note that by convention, the time at 0 is a fiducial reference.
# For CBC waveforms, this would be set to where the merger occurs
offset = - len(t) * dt
wf = TimeSeries(wf, delta_t=dt, epoch=offset)
return wf.real(), wf.imag()
# This tells pycbc about our new waveform so we can call it from standard
# pycbc functions. If this were a frequency-domain model, select 'frequency'
# instead of 'time' to this function call.
pycbc.waveform.add_custom_waveform('test', test_waveform, 'time', force=True)
# Let's plot what our new waveform looks like
hp, hc = pycbc.waveform.get_td_waveform(approximant="test",
f_lower=20, fpeak=50,
delta_t=1.0/4096)
pp.figure(0)
pp.plot(hp.sample_times, hp)
pp.xlabel('Time (s)')
pp.figure(1)
hf = hp.to_frequencyseries()
pp.plot(hf.sample_frequencies, hf.real())
pp.xlabel('Frequency (Hz)')
pp.xscale('log')
pp.xlim(20, 100)
pp.show()
| 1,497
| 29.571429
| 77
|
py
|
pycbc
|
pycbc-master/examples/waveform/match_waveform.py
|
from pycbc.waveform import get_td_waveform
from pycbc.filter import match
from pycbc.psd import aLIGOZeroDetHighPower
f_low = 30
sample_rate = 4096
# Generate the two waveforms to compare
hp, hc = get_td_waveform(approximant="EOBNRv2",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/sample_rate)
sp, sc = get_td_waveform(approximant="TaylorT4",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/sample_rate)
# Resize the waveforms to the same length
tlen = max(len(sp), len(hp))
sp.resize(tlen)
hp.resize(tlen)
# Generate the aLIGO ZDHP PSD
delta_f = 1.0 / sp.duration
flen = tlen//2 + 1
psd = aLIGOZeroDetHighPower(flen, delta_f, f_low)
# Note: This takes a while the first time as an FFT plan is generated
# subsequent calls are much faster.
m, i = match(hp, sp, psd=psd, low_frequency_cutoff=f_low)
print('The match is: {:.4f}'.format(m))
| 1,045
| 28.885714
| 69
|
py
|
pycbc
|
pycbc-master/examples/waveform/plot_detwaveform.py
|
import matplotlib.pyplot as pp
from pycbc.waveform import get_td_waveform
from pycbc.detector import Detector
apx = 'SEOBNRv4'
# NOTE: Inclination runs from 0 to pi, with poles at 0 and pi
# coa_phase runs from 0 to 2 pi.
hp, hc = get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
spin1z=0.9,
spin2z=0.4,
inclination=1.23,
coa_phase=2.45,
delta_t=1.0/4096,
f_lower=40)
det_h1 = Detector('H1')
det_l1 = Detector('L1')
det_v1 = Detector('V1')
# Choose a GPS end time, sky location, and polarization phase for the merger
# NOTE: Right ascension and polarization phase runs from 0 to 2pi
# Declination runs from pi/2. to -pi/2 with the poles at pi/2. and -pi/2.
end_time = 1192529720
declination = 0.65
right_ascension = 4.67
polarization = 2.34
hp.start_time += end_time
hc.start_time += end_time
signal_h1 = det_h1.project_wave(hp, hc, right_ascension, declination, polarization)
signal_l1 = det_l1.project_wave(hp, hc, right_ascension, declination, polarization)
signal_v1 = det_v1.project_wave(hp, hc, right_ascension, declination, polarization)
pp.plot(signal_h1.sample_times, signal_h1, label='H1')
pp.plot(signal_l1.sample_times, signal_l1, label='L1')
pp.plot(signal_v1.sample_times, signal_v1, label='V1')
pp.ylabel('Strain')
pp.xlabel('Time (s)')
pp.legend()
pp.show()
| 1,500
| 32.355556
| 84
|
py
|
pycbc
|
pycbc-master/examples/waveform/plot_phase.py
|
import matplotlib.pyplot as pp
from pycbc import waveform
for apx in ['SEOBNRv4', 'TaylorT4', 'IMRPhenomB']:
hp, hc = waveform.get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
delta_t=1.0/4096,
f_lower=40)
hp, hc = hp.trim_zeros(), hc.trim_zeros()
amp = waveform.utils.amplitude_from_polarizations(hp, hc)
phase = waveform.utils.phase_from_polarizations(hp, hc)
pp.plot(phase, amp, label=apx)
pp.ylabel('GW Strain Amplitude')
pp.xlabel('GW Phase (radians)')
pp.legend(loc='upper left')
pp.show()
| 657
| 28.909091
| 61
|
py
|
pycbc
|
pycbc-master/examples/waveform/plot_fd_td.py
|
"""Plot a time domain and Fourier domain waveform together in the time domain.
Note that without special cleanup the Fourier domain waveform will exhibit
the Gibb's phenomenon. (http://en.wikipedia.org/wiki/Gibbs_phenomenon)
"""
import matplotlib.pyplot as pp
from pycbc import types, fft, waveform
# Get a time domain waveform
hp, hc = waveform.get_td_waveform(approximant='SEOBNRv4',
mass1=6, mass2=6, delta_t=1.0/4096, f_lower=40)
# Get a frequency domain waveform
sptilde, sctilde = waveform. get_fd_waveform(approximant="TaylorF2",
mass1=6, mass2=6, delta_f=1.0/4, f_lower=40)
# FFT it to the time-domain
tlen = int(1.0 / hp.delta_t / sptilde.delta_f)
sptilde.resize(tlen/2 + 1)
sp = types.TimeSeries(types.zeros(tlen), delta_t=hp.delta_t)
fft.ifft(sptilde, sp)
pp.plot(sp.sample_times, sp, label="TaylorF2 (IFFT)")
pp.plot(hp.sample_times, hp, label='SEOBNRv4')
pp.ylabel('Strain')
pp.xlabel('Time (s)')
pp.legend()
pp.show()
| 997
| 31.193548
| 78
|
py
|
pycbc
|
pycbc-master/examples/inspiral/check_GW150914_detection.py
|
#!/usr/bin/env python
# Read a pycbc_inspiral HDF5 trigger file and check that it contains triggers
# compatible with GW150914
# 2016 Tito Dal Canton
import sys
import h5py
import numpy as np
gw150914_time = 1126259462.4
gw150914_snr = {'H1': 19.71, 'L1': 13.28}
gw150914_chi2r = {'H1': 1.05, 'L1': 0.45}
f = h5py.File(sys.argv[1], 'r')
detector = tuple(f.keys())[0]
end_times = f[detector]['end_time'][:]
snrs = f[detector]['snr'][:]
chi2rs = f[detector]['chisq'][:] / (2 * f[detector]['chisq_dof'][:] - 2)
# search for trigs compatible with GW150914
mask = np.logical_and.reduce([abs(end_times - gw150914_time) < 0.1,
snrs > 0.8 * gw150914_snr[detector],
snrs < 1.2 * gw150914_snr[detector],
chi2rs > 0.8 * gw150914_chi2r[detector],
chi2rs < 1.2 * gw150914_chi2r[detector]])
if mask.any():
print('Pass: %d GW150914-like triggers' % sum(mask))
print('end_time snr reduced_chi2')
for t, s, c in zip(end_times[mask], snrs[mask], chi2rs[mask]):
print('%.3f %.3f %.3f' % (t, s, c))
sys.exit(0)
else:
print('Fail: no GW150914-like triggers')
sys.exit(1)
| 1,207
| 32.555556
| 77
|
py
|
pycbc
|
pycbc-master/examples/noise/frequency.py
|
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide
flow = 30.0
delta_f = 1.0 / 64
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Here the noise is generated directly in the frequency domain so it matches
# the parameters of the PSD you give.
fs = pycbc.noise.frequency_noise_from_psd(psd, seed=127)
pp.loglog(fs.sample_frequencies, abs(fs**2.0), label='Magnitude')
pp.loglog(fs.sample_frequencies, abs(psd), label='Magnitude')
pp.xlim(flow, psd.sample_frequencies[-1])
pp.legend()
pp.ylabel('Strain^2 / Hz')
pp.xlabel('Frequency (Hz)')
pp.show()
| 668
| 28.086957
| 76
|
py
|
pycbc
|
pycbc-master/examples/noise/timeseries.py
|
import matplotlib.pyplot as pp
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide
flow = 30.0
delta_f = 1.0 / 16
flen = int(2048 / delta_f) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate 32 seconds of noise at 4096 Hz
delta_t = 1.0 / 4096
tsamples = int(32 / delta_t)
ts = pycbc.noise.noise_from_psd(tsamples, delta_t, psd, seed=127)
pp.plot(ts.sample_times, ts)
pp.ylabel('Strain')
pp.xlabel('Time (s)')
pp.show()
| 488
| 22.285714
| 65
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.