repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.read | def read(cls, source, *args, **kwargs):
"""Read data into a `StateVector`
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
bits : `list`, optional
list of bits names for this `StateVector`, give `None` at
any point in the list to mask that bit
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
gap : `str`, optional
how to handle gaps in the cache, one of
- 'ignore': do nothing, let the undelying reader method handle it
- 'warn': do nothing except print a warning to the screen
- 'raise': raise an exception upon finding a gap (default)
- 'pad': insert a value to fill the gaps
pad : `float`, optional
value with which to fill gaps in the source data, only used if
gap is not given, or `gap='pad'` is given
Examples
--------
To read the S6 state vector, with names for all the bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', 'Conlog OK', 'Locked',
'No injections', 'No Excitations'],
dtype='uint32')
then you can convert these to segments
>>> segments = sv.to_dqflags()
or to read just the interferometer operations bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', None, 'Locked'], dtype='uint32')
Running `to_dqflags` on this example would only give 2 flags, rather
than all five.
Alternatively the `bits` attribute can be reset after reading, but
before any further operations.
Notes
-----"""
return super(StateVector, cls).read(source, *args, **kwargs) | python | def read(cls, source, *args, **kwargs):
"""Read data into a `StateVector`
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
bits : `list`, optional
list of bits names for this `StateVector`, give `None` at
any point in the list to mask that bit
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
gap : `str`, optional
how to handle gaps in the cache, one of
- 'ignore': do nothing, let the undelying reader method handle it
- 'warn': do nothing except print a warning to the screen
- 'raise': raise an exception upon finding a gap (default)
- 'pad': insert a value to fill the gaps
pad : `float`, optional
value with which to fill gaps in the source data, only used if
gap is not given, or `gap='pad'` is given
Examples
--------
To read the S6 state vector, with names for all the bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', 'Conlog OK', 'Locked',
'No injections', 'No Excitations'],
dtype='uint32')
then you can convert these to segments
>>> segments = sv.to_dqflags()
or to read just the interferometer operations bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', None, 'Locked'], dtype='uint32')
Running `to_dqflags` on this example would only give 2 flags, rather
than all five.
Alternatively the `bits` attribute can be reset after reading, but
before any further operations.
Notes
-----"""
return super(StateVector, cls).read(source, *args, **kwargs) | [
"def",
"read",
"(",
"cls",
",",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"StateVector",
",",
"cls",
")",
".",
"read",
"(",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Read data into a `StateVector`
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
bits : `list`, optional
list of bits names for this `StateVector`, give `None` at
any point in the list to mask that bit
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
gap : `str`, optional
how to handle gaps in the cache, one of
- 'ignore': do nothing, let the undelying reader method handle it
- 'warn': do nothing except print a warning to the screen
- 'raise': raise an exception upon finding a gap (default)
- 'pad': insert a value to fill the gaps
pad : `float`, optional
value with which to fill gaps in the source data, only used if
gap is not given, or `gap='pad'` is given
Examples
--------
To read the S6 state vector, with names for all the bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', 'Conlog OK', 'Locked',
'No injections', 'No Excitations'],
dtype='uint32')
then you can convert these to segments
>>> segments = sv.to_dqflags()
or to read just the interferometer operations bits::
>>> sv = StateVector.read(
'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR',
bits=['Science mode', None, 'Locked'], dtype='uint32')
Running `to_dqflags` on this example would only give 2 flags, rather
than all five.
Alternatively the `bits` attribute can be reset after reading, but
before any further operations.
Notes
----- | [
"Read",
"data",
"into",
"a",
"StateVector"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L612-L688 | train | 211,200 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.to_dqflags | def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False):
"""Convert this `StateVector` into a `~gwpy.segments.DataQualityDict`
The `StateTimeSeries` for each bit is converted into a
`~gwpy.segments.DataQualityFlag` with the bits combined into a dict.
Parameters
----------
minlen : `int`, optional, default: 1
minimum number of consecutive `True` values to identify as a
`Segment`. This is useful to ignore single bit flips,
for example.
bits : `list`, optional
a list of bit indices or bit names to select, defaults to
`~StateVector.bits`
Returns
-------
DataQualityFlag list : `list`
a list of `~gwpy.segments.flag.DataQualityFlag`
reprensentations for each bit in this `StateVector`
See Also
--------
:meth:`StateTimeSeries.to_dqflag`
for details on the segment representation method for
`StateVector` bits
"""
from ..segments import DataQualityDict
out = DataQualityDict()
bitseries = self.get_bit_series(bits=bits)
for bit, sts in bitseries.items():
out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round,
dtype=dtype,
description=self.bits.description[bit])
return out | python | def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False):
"""Convert this `StateVector` into a `~gwpy.segments.DataQualityDict`
The `StateTimeSeries` for each bit is converted into a
`~gwpy.segments.DataQualityFlag` with the bits combined into a dict.
Parameters
----------
minlen : `int`, optional, default: 1
minimum number of consecutive `True` values to identify as a
`Segment`. This is useful to ignore single bit flips,
for example.
bits : `list`, optional
a list of bit indices or bit names to select, defaults to
`~StateVector.bits`
Returns
-------
DataQualityFlag list : `list`
a list of `~gwpy.segments.flag.DataQualityFlag`
reprensentations for each bit in this `StateVector`
See Also
--------
:meth:`StateTimeSeries.to_dqflag`
for details on the segment representation method for
`StateVector` bits
"""
from ..segments import DataQualityDict
out = DataQualityDict()
bitseries = self.get_bit_series(bits=bits)
for bit, sts in bitseries.items():
out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round,
dtype=dtype,
description=self.bits.description[bit])
return out | [
"def",
"to_dqflags",
"(",
"self",
",",
"bits",
"=",
"None",
",",
"minlen",
"=",
"1",
",",
"dtype",
"=",
"float",
",",
"round",
"=",
"False",
")",
":",
"from",
".",
".",
"segments",
"import",
"DataQualityDict",
"out",
"=",
"DataQualityDict",
"(",
")",
... | Convert this `StateVector` into a `~gwpy.segments.DataQualityDict`
The `StateTimeSeries` for each bit is converted into a
`~gwpy.segments.DataQualityFlag` with the bits combined into a dict.
Parameters
----------
minlen : `int`, optional, default: 1
minimum number of consecutive `True` values to identify as a
`Segment`. This is useful to ignore single bit flips,
for example.
bits : `list`, optional
a list of bit indices or bit names to select, defaults to
`~StateVector.bits`
Returns
-------
DataQualityFlag list : `list`
a list of `~gwpy.segments.flag.DataQualityFlag`
reprensentations for each bit in this `StateVector`
See Also
--------
:meth:`StateTimeSeries.to_dqflag`
for details on the segment representation method for
`StateVector` bits | [
"Convert",
"this",
"StateVector",
"into",
"a",
"~gwpy",
".",
"segments",
".",
"DataQualityDict"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L690-L726 | train | 211,201 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.fetch | def fetch(cls, channel, start, end, bits=None, host=None, port=None,
verbose=False, connection=None, type=Nds2ChannelType.any()):
"""Fetch data from NDS into a `StateVector`.
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
bits : `Bits`, `list`, optional
definition of bits for this `StateVector`
host : `str`, optional
URL of NDS server to use, defaults to observatory site host
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `True`
check channels exist in database before asking for data
connection : `nds2.connection`
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type
"""
new = cls.DictClass.fetch(
[channel], start, end, host=host, port=port,
verbose=verbose, connection=connection)[channel]
if bits:
new.bits = bits
return new | python | def fetch(cls, channel, start, end, bits=None, host=None, port=None,
verbose=False, connection=None, type=Nds2ChannelType.any()):
"""Fetch data from NDS into a `StateVector`.
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
bits : `Bits`, `list`, optional
definition of bits for this `StateVector`
host : `str`, optional
URL of NDS server to use, defaults to observatory site host
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `True`
check channels exist in database before asking for data
connection : `nds2.connection`
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type
"""
new = cls.DictClass.fetch(
[channel], start, end, host=host, port=port,
verbose=verbose, connection=connection)[channel]
if bits:
new.bits = bits
return new | [
"def",
"fetch",
"(",
"cls",
",",
"channel",
",",
"start",
",",
"end",
",",
"bits",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"connection",
"=",
"None",
",",
"type",
"=",
"Nds2ChannelType",
"... | Fetch data from NDS into a `StateVector`.
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
bits : `Bits`, `list`, optional
definition of bits for this `StateVector`
host : `str`, optional
URL of NDS server to use, defaults to observatory site host
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `True`
check channels exist in database before asking for data
connection : `nds2.connection`
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type | [
"Fetch",
"data",
"from",
"NDS",
"into",
"a",
"StateVector",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L729-L775 | train | 211,202 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.plot | def plot(self, format='segments', bits=None, **kwargs):
"""Plot the data for this `StateVector`
Parameters
----------
format : `str`, optional, default: ``'segments'``
The type of plot to make, either 'segments' to plot the
SegmentList for each bit, or 'timeseries' to plot the raw
data for this `StateVector`
bits : `list`, optional
A list of bit indices or bit names, defaults to
`~StateVector.bits`. This argument is ignored if ``format`` is
not ``'segments'``
**kwargs
Other keyword arguments to be passed to either
`~gwpy.plot.SegmentAxes.plot` or
`~gwpy.plot.Axes.plot`, depending
on ``format``.
Returns
-------
plot : `~gwpy.plot.Plot`
output plot object
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_flag
for documentation of keyword arguments used in rendering each
statevector flag.
"""
if format == 'timeseries':
return super(StateVector, self).plot(**kwargs)
if format == 'segments':
from ..plot import Plot
kwargs.setdefault('xscale', 'auto-gps')
return Plot(*self.to_dqflags(bits=bits).values(),
projection='segments', **kwargs)
raise ValueError("'format' argument must be one of: 'timeseries' or "
"'segments'") | python | def plot(self, format='segments', bits=None, **kwargs):
"""Plot the data for this `StateVector`
Parameters
----------
format : `str`, optional, default: ``'segments'``
The type of plot to make, either 'segments' to plot the
SegmentList for each bit, or 'timeseries' to plot the raw
data for this `StateVector`
bits : `list`, optional
A list of bit indices or bit names, defaults to
`~StateVector.bits`. This argument is ignored if ``format`` is
not ``'segments'``
**kwargs
Other keyword arguments to be passed to either
`~gwpy.plot.SegmentAxes.plot` or
`~gwpy.plot.Axes.plot`, depending
on ``format``.
Returns
-------
plot : `~gwpy.plot.Plot`
output plot object
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_flag
for documentation of keyword arguments used in rendering each
statevector flag.
"""
if format == 'timeseries':
return super(StateVector, self).plot(**kwargs)
if format == 'segments':
from ..plot import Plot
kwargs.setdefault('xscale', 'auto-gps')
return Plot(*self.to_dqflags(bits=bits).values(),
projection='segments', **kwargs)
raise ValueError("'format' argument must be one of: 'timeseries' or "
"'segments'") | [
"def",
"plot",
"(",
"self",
",",
"format",
"=",
"'segments'",
",",
"bits",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"format",
"==",
"'timeseries'",
":",
"return",
"super",
"(",
"StateVector",
",",
"self",
")",
".",
"plot",
"(",
"*",
"*... | Plot the data for this `StateVector`
Parameters
----------
format : `str`, optional, default: ``'segments'``
The type of plot to make, either 'segments' to plot the
SegmentList for each bit, or 'timeseries' to plot the raw
data for this `StateVector`
bits : `list`, optional
A list of bit indices or bit names, defaults to
`~StateVector.bits`. This argument is ignored if ``format`` is
not ``'segments'``
**kwargs
Other keyword arguments to be passed to either
`~gwpy.plot.SegmentAxes.plot` or
`~gwpy.plot.Axes.plot`, depending
on ``format``.
Returns
-------
plot : `~gwpy.plot.Plot`
output plot object
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
gwpy.plot.SegmentAxes.plot_flag
for documentation of keyword arguments used in rendering each
statevector flag. | [
"Plot",
"the",
"data",
"for",
"this",
"StateVector"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L829-L875 | train | 211,203 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.resample | def resample(self, rate):
"""Resample this `StateVector` to a new rate
Because of the nature of a state-vector, downsampling is done
by taking the logical 'and' of all original samples in each new
sampling interval, while upsampling is achieved by repeating
samples.
Parameters
----------
rate : `float`
rate to which to resample this `StateVector`, must be a
divisor of the original sample rate (when downsampling)
or a multiple of the original (when upsampling).
Returns
-------
vector : `StateVector`
resampled version of the input `StateVector`
"""
rate1 = self.sample_rate.value
if isinstance(rate, units.Quantity):
rate2 = rate.value
else:
rate2 = float(rate)
# upsample
if (rate2 / rate1).is_integer():
raise NotImplementedError("StateVector upsampling has not "
"been implemented yet, sorry.")
# downsample
elif (rate1 / rate2).is_integer():
factor = int(rate1 / rate2)
# reshape incoming data to one column per new sample
newsize = int(self.size / factor)
old = self.value.reshape((newsize, self.size // newsize))
# work out number of bits
if self.bits:
nbits = len(self.bits)
else:
max_ = self.value.max()
nbits = int(ceil(log(max_, 2))) if max_ else 1
bits = range(nbits)
# construct an iterator over the columns of the old array
itr = numpy.nditer(
[old, None],
flags=['external_loop', 'reduce_ok'],
op_axes=[None, [0, -1]],
op_flags=[['readonly'], ['readwrite', 'allocate']])
dtype = self.dtype
type_ = self.dtype.type
# for each new sample, each bit is logical AND of old samples
# bit is ON,
for x, y in itr:
y[...] = numpy.sum([type_((x >> bit & 1).all() * (2 ** bit))
for bit in bits], dtype=self.dtype)
new = StateVector(itr.operands[1], dtype=dtype)
new.__metadata_finalize__(self)
new._unit = self.unit
new.sample_rate = rate2
return new
# error for non-integer resampling factors
elif rate1 < rate2:
raise ValueError("New sample rate must be multiple of input "
"series rate if upsampling a StateVector")
else:
raise ValueError("New sample rate must be divisor of input "
"series rate if downsampling a StateVector") | python | def resample(self, rate):
"""Resample this `StateVector` to a new rate
Because of the nature of a state-vector, downsampling is done
by taking the logical 'and' of all original samples in each new
sampling interval, while upsampling is achieved by repeating
samples.
Parameters
----------
rate : `float`
rate to which to resample this `StateVector`, must be a
divisor of the original sample rate (when downsampling)
or a multiple of the original (when upsampling).
Returns
-------
vector : `StateVector`
resampled version of the input `StateVector`
"""
rate1 = self.sample_rate.value
if isinstance(rate, units.Quantity):
rate2 = rate.value
else:
rate2 = float(rate)
# upsample
if (rate2 / rate1).is_integer():
raise NotImplementedError("StateVector upsampling has not "
"been implemented yet, sorry.")
# downsample
elif (rate1 / rate2).is_integer():
factor = int(rate1 / rate2)
# reshape incoming data to one column per new sample
newsize = int(self.size / factor)
old = self.value.reshape((newsize, self.size // newsize))
# work out number of bits
if self.bits:
nbits = len(self.bits)
else:
max_ = self.value.max()
nbits = int(ceil(log(max_, 2))) if max_ else 1
bits = range(nbits)
# construct an iterator over the columns of the old array
itr = numpy.nditer(
[old, None],
flags=['external_loop', 'reduce_ok'],
op_axes=[None, [0, -1]],
op_flags=[['readonly'], ['readwrite', 'allocate']])
dtype = self.dtype
type_ = self.dtype.type
# for each new sample, each bit is logical AND of old samples
# bit is ON,
for x, y in itr:
y[...] = numpy.sum([type_((x >> bit & 1).all() * (2 ** bit))
for bit in bits], dtype=self.dtype)
new = StateVector(itr.operands[1], dtype=dtype)
new.__metadata_finalize__(self)
new._unit = self.unit
new.sample_rate = rate2
return new
# error for non-integer resampling factors
elif rate1 < rate2:
raise ValueError("New sample rate must be multiple of input "
"series rate if upsampling a StateVector")
else:
raise ValueError("New sample rate must be divisor of input "
"series rate if downsampling a StateVector") | [
"def",
"resample",
"(",
"self",
",",
"rate",
")",
":",
"rate1",
"=",
"self",
".",
"sample_rate",
".",
"value",
"if",
"isinstance",
"(",
"rate",
",",
"units",
".",
"Quantity",
")",
":",
"rate2",
"=",
"rate",
".",
"value",
"else",
":",
"rate2",
"=",
... | Resample this `StateVector` to a new rate
Because of the nature of a state-vector, downsampling is done
by taking the logical 'and' of all original samples in each new
sampling interval, while upsampling is achieved by repeating
samples.
Parameters
----------
rate : `float`
rate to which to resample this `StateVector`, must be a
divisor of the original sample rate (when downsampling)
or a multiple of the original (when upsampling).
Returns
-------
vector : `StateVector`
resampled version of the input `StateVector` | [
"Resample",
"this",
"StateVector",
"to",
"a",
"new",
"rate"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L877-L943 | train | 211,204 |
gwpy/gwpy | gwpy/segments/segments.py | SegmentList.read | def read(cls, source, format=None, coalesce=False, **kwargs):
# pylint: disable=redefined-builtin
"""Read segments from file into a `SegmentList`
Parameters
----------
filename : `str`
path of file to read
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
coalesce : `bool`, optional
if `True` coalesce the segment list before returning,
otherwise return exactly as contained in file(s).
**kwargs
other keyword arguments depend on the format, see the online
documentation for details (:ref:`gwpy-segments-io`)
Returns
-------
segmentlist : `SegmentList`
`SegmentList` active and known segments read from file.
Notes
-----"""
def combiner(listofseglists):
"""Combine `SegmentList` from each file into a single object
"""
out = cls(seg for seglist in listofseglists for seg in seglist)
if coalesce:
return out.coalesce()
return out
return io_read_multi(combiner, cls, source, format=format, **kwargs) | python | def read(cls, source, format=None, coalesce=False, **kwargs):
# pylint: disable=redefined-builtin
"""Read segments from file into a `SegmentList`
Parameters
----------
filename : `str`
path of file to read
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
coalesce : `bool`, optional
if `True` coalesce the segment list before returning,
otherwise return exactly as contained in file(s).
**kwargs
other keyword arguments depend on the format, see the online
documentation for details (:ref:`gwpy-segments-io`)
Returns
-------
segmentlist : `SegmentList`
`SegmentList` active and known segments read from file.
Notes
-----"""
def combiner(listofseglists):
"""Combine `SegmentList` from each file into a single object
"""
out = cls(seg for seglist in listofseglists for seg in seglist)
if coalesce:
return out.coalesce()
return out
return io_read_multi(combiner, cls, source, format=format, **kwargs) | [
"def",
"read",
"(",
"cls",
",",
"source",
",",
"format",
"=",
"None",
",",
"coalesce",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=redefined-builtin",
"def",
"combiner",
"(",
"listofseglists",
")",
":",
"\"\"\"Combine `SegmentList` from e... | Read segments from file into a `SegmentList`
Parameters
----------
filename : `str`
path of file to read
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
coalesce : `bool`, optional
if `True` coalesce the segment list before returning,
otherwise return exactly as contained in file(s).
**kwargs
other keyword arguments depend on the format, see the online
documentation for details (:ref:`gwpy-segments-io`)
Returns
-------
segmentlist : `SegmentList`
`SegmentList` active and known segments read from file.
Notes
----- | [
"Read",
"segments",
"from",
"file",
"into",
"a",
"SegmentList"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/segments.py#L150-L187 | train | 211,205 |
gwpy/gwpy | gwpy/segments/segments.py | SegmentList.write | def write(self, target, *args, **kwargs):
"""Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs) | python | def write(self, target, *args, **kwargs):
"""Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs) | [
"def",
"write",
"(",
"self",
",",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"io_registry",
".",
"write",
"(",
"self",
",",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
----- | [
"Write",
"this",
"SegmentList",
"to",
"a",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/segments.py#L189-L202 | train | 211,206 |
gwpy/gwpy | gwpy/table/io/cwb.py | table_from_cwb | def table_from_cwb(source, *args, **kwargs):
"""Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs) | python | def table_from_cwb(source, *args, **kwargs):
"""Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs) | [
"def",
"table_from_cwb",
"(",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"EventTable",
".",
"read",
"(",
"source",
",",
"'waveburst'",
",",
"*",
"args",
",",
"format",
"=",
"'root'",
",",
"*",
"*",
"kwargs",
")"
] | Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults. | [
"Read",
"an",
"EventTable",
"from",
"a",
"Coherent",
"WaveBurst",
"ROOT",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/cwb.py#L35-L41 | train | 211,207 |
gwpy/gwpy | gwpy/plot/plot.py | get_backend_mod | def get_backend_mod(name=None):
"""Returns the imported module for the given backend name
Parameters
----------
name : `str`, optional
the name of the backend, defaults to the current backend.
Returns
-------
backend_mod: `module`
the module as returned by :func:`importlib.import_module`
Examples
--------
>>> from gwpy.plot.plot import get_backend_mod
>>> print(get_backend_mod('agg'))
<module 'matplotlib.backends.backend_agg' from ... >
"""
if name is None:
name = get_backend()
backend_name = (name[9:] if name.startswith("module://") else
"matplotlib.backends.backend_{}".format(name.lower()))
return importlib.import_module(backend_name) | python | def get_backend_mod(name=None):
"""Returns the imported module for the given backend name
Parameters
----------
name : `str`, optional
the name of the backend, defaults to the current backend.
Returns
-------
backend_mod: `module`
the module as returned by :func:`importlib.import_module`
Examples
--------
>>> from gwpy.plot.plot import get_backend_mod
>>> print(get_backend_mod('agg'))
<module 'matplotlib.backends.backend_agg' from ... >
"""
if name is None:
name = get_backend()
backend_name = (name[9:] if name.startswith("module://") else
"matplotlib.backends.backend_{}".format(name.lower()))
return importlib.import_module(backend_name) | [
"def",
"get_backend_mod",
"(",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"get_backend",
"(",
")",
"backend_name",
"=",
"(",
"name",
"[",
"9",
":",
"]",
"if",
"name",
".",
"startswith",
"(",
"\"module://\"",
")",
"el... | Returns the imported module for the given backend name
Parameters
----------
name : `str`, optional
the name of the backend, defaults to the current backend.
Returns
-------
backend_mod: `module`
the module as returned by :func:`importlib.import_module`
Examples
--------
>>> from gwpy.plot.plot import get_backend_mod
>>> print(get_backend_mod('agg'))
<module 'matplotlib.backends.backend_agg' from ... > | [
"Returns",
"the",
"imported",
"module",
"for",
"the",
"given",
"backend",
"name"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L62-L85 | train | 211,208 |
gwpy/gwpy | gwpy/plot/plot.py | _group_axes_data | def _group_axes_data(inputs, separate=None, flat=False):
"""Determine the number of axes from the input args to this `Plot`
Parameters
----------
inputs : `list` of array-like data sets
A list of data arrays, or a list of lists of data sets
sep : `bool`, optional
Plot each set of data on a separate `Axes`
flat : `bool`, optional
Return a flattened list of data objects
Returns
-------
axesdata : `list` of lists of array-like data
A `list` with one element per required `Axes` containing the
array-like data sets for those `Axes`, unless ``flat=True``
is given.
Notes
-----
The logic for this method is as follows:
- if a `list` of data arrays are given, and `separate=False`, use 1 `Axes`
- if a `list` of data arrays are given, and `separate=True`, use N `Axes,
one for each data array
- if a nested `list` of data arrays are given, ignore `sep` and
use one `Axes` for each group of arrays.
Examples
--------
>>> from gwpy.plot import Plot
>>> Plot._group_axes_data([1, 2], separate=False)
[[1, 2]]
>>> Plot._group_axes_data([1, 2], separate=True)
[[1], [2]]
>>> Plot._group_axes_data([[1, 2], 3])
[[1, 2], [3]]
"""
# determine auto-separation
if separate is None and inputs:
# if given a nested list of data, multiple axes are required
if any(isinstance(x, iterable_types + (dict,)) for x in inputs):
separate = True
# if data are of different types, default to separate
elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721
separate = True
# build list of lists
out = []
for x in inputs:
if isinstance(x, dict): # unwrap dict
x = list(x.values())
# new group from iterable, notes:
# the iterable is presumed to be a list of independent data
# structures, unless its a list of scalars in which case we
# should plot them all as one
if (
isinstance(x, (KeysView, ValuesView)) or
isinstance(x, (list, tuple)) and (
not x or not numpy.isscalar(x[0]))
):
out.append(x)
# dataset starts a new group
elif separate or not out:
out.append([x])
# dataset joins current group
else: # append input to most recent group
out[-1].append(x)
if flat:
return [s for group in out for s in group]
return out | python | def _group_axes_data(inputs, separate=None, flat=False):
"""Determine the number of axes from the input args to this `Plot`
Parameters
----------
inputs : `list` of array-like data sets
A list of data arrays, or a list of lists of data sets
sep : `bool`, optional
Plot each set of data on a separate `Axes`
flat : `bool`, optional
Return a flattened list of data objects
Returns
-------
axesdata : `list` of lists of array-like data
A `list` with one element per required `Axes` containing the
array-like data sets for those `Axes`, unless ``flat=True``
is given.
Notes
-----
The logic for this method is as follows:
- if a `list` of data arrays are given, and `separate=False`, use 1 `Axes`
- if a `list` of data arrays are given, and `separate=True`, use N `Axes,
one for each data array
- if a nested `list` of data arrays are given, ignore `sep` and
use one `Axes` for each group of arrays.
Examples
--------
>>> from gwpy.plot import Plot
>>> Plot._group_axes_data([1, 2], separate=False)
[[1, 2]]
>>> Plot._group_axes_data([1, 2], separate=True)
[[1], [2]]
>>> Plot._group_axes_data([[1, 2], 3])
[[1, 2], [3]]
"""
# determine auto-separation
if separate is None and inputs:
# if given a nested list of data, multiple axes are required
if any(isinstance(x, iterable_types + (dict,)) for x in inputs):
separate = True
# if data are of different types, default to separate
elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721
separate = True
# build list of lists
out = []
for x in inputs:
if isinstance(x, dict): # unwrap dict
x = list(x.values())
# new group from iterable, notes:
# the iterable is presumed to be a list of independent data
# structures, unless its a list of scalars in which case we
# should plot them all as one
if (
isinstance(x, (KeysView, ValuesView)) or
isinstance(x, (list, tuple)) and (
not x or not numpy.isscalar(x[0]))
):
out.append(x)
# dataset starts a new group
elif separate or not out:
out.append([x])
# dataset joins current group
else: # append input to most recent group
out[-1].append(x)
if flat:
return [s for group in out for s in group]
return out | [
"def",
"_group_axes_data",
"(",
"inputs",
",",
"separate",
"=",
"None",
",",
"flat",
"=",
"False",
")",
":",
"# determine auto-separation",
"if",
"separate",
"is",
"None",
"and",
"inputs",
":",
"# if given a nested list of data, multiple axes are required",
"if",
"any... | Determine the number of axes from the input args to this `Plot`
Parameters
----------
inputs : `list` of array-like data sets
A list of data arrays, or a list of lists of data sets
sep : `bool`, optional
Plot each set of data on a separate `Axes`
flat : `bool`, optional
Return a flattened list of data objects
Returns
-------
axesdata : `list` of lists of array-like data
A `list` with one element per required `Axes` containing the
array-like data sets for those `Axes`, unless ``flat=True``
is given.
Notes
-----
The logic for this method is as follows:
- if a `list` of data arrays are given, and `separate=False`, use 1 `Axes`
- if a `list` of data arrays are given, and `separate=True`, use N `Axes,
one for each data array
- if a nested `list` of data arrays are given, ignore `sep` and
use one `Axes` for each group of arrays.
Examples
--------
>>> from gwpy.plot import Plot
>>> Plot._group_axes_data([1, 2], separate=False)
[[1, 2]]
>>> Plot._group_axes_data([1, 2], separate=True)
[[1], [2]]
>>> Plot._group_axes_data([[1, 2], 3])
[[1, 2], [3]] | [
"Determine",
"the",
"number",
"of",
"axes",
"from",
"the",
"input",
"args",
"to",
"this",
"Plot"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L527-L605 | train | 211,209 |
gwpy/gwpy | gwpy/plot/plot.py | Plot._init_axes | def _init_axes(self, data, method='plot',
xscale=None, sharex=False, sharey=False,
geometry=None, separate=None, **kwargs):
"""Populate this figure with data, creating `Axes` as necessary
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# parse keywords
axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
key in kwargs}
# handle geometry and group axes
if geometry is not None and geometry[0] * geometry[1] == len(data):
separate = True
axes_groups = _group_axes_data(data, separate=separate)
if geometry is None:
geometry = (len(axes_groups), 1)
nrows, ncols = geometry
if axes_groups and nrows * ncols != len(axes_groups):
# mismatching data and geometry
raise ValueError("cannot group data into {0} axes with a "
"{1}x{2} grid".format(len(axes_groups), nrows,
ncols))
# create grid spec
gs = GridSpec(nrows, ncols)
axarr = numpy.empty((nrows, ncols), dtype=object)
# set default labels
defxlabel = 'xlabel' not in axes_kw
defylabel = 'ylabel' not in axes_kw
flatdata = [s for group in axes_groups for s in group]
for axis in ('x', 'y'):
unit = _common_axis_unit(flatdata, axis=axis)
if unit:
axes_kw.setdefault('{}label'.format(axis),
unit.to_string('latex_inline_dimensional'))
# create axes for each group and draw each data object
for group, (row, col) in zip_longest(
axes_groups, itertools.product(range(nrows), range(ncols)),
fillvalue=[]):
# create Axes
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
axes_kw["sharex"] = shared_with[sharex]
axes_kw["sharey"] = shared_with[sharey]
axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)
# plot data
plot_func = getattr(ax, method)
if method in ('imshow', 'pcolormesh'):
for obj in group:
plot_func(obj, **kwargs)
elif group:
plot_func(*group, **kwargs)
# set default axis labels
for axis, share, pos, n, def_ in (
(ax.xaxis, sharex, row, nrows, defxlabel),
(ax.yaxis, sharey, col, ncols, defylabel),
):
# hide label if shared axis and not bottom left panel
if share == 'all' and pos < n - 1:
axis.set_label_text('')
# otherwise set default status
else:
axis.isDefault_label = def_
return self.axes | python | def _init_axes(self, data, method='plot',
xscale=None, sharex=False, sharey=False,
geometry=None, separate=None, **kwargs):
"""Populate this figure with data, creating `Axes` as necessary
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# parse keywords
axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
key in kwargs}
# handle geometry and group axes
if geometry is not None and geometry[0] * geometry[1] == len(data):
separate = True
axes_groups = _group_axes_data(data, separate=separate)
if geometry is None:
geometry = (len(axes_groups), 1)
nrows, ncols = geometry
if axes_groups and nrows * ncols != len(axes_groups):
# mismatching data and geometry
raise ValueError("cannot group data into {0} axes with a "
"{1}x{2} grid".format(len(axes_groups), nrows,
ncols))
# create grid spec
gs = GridSpec(nrows, ncols)
axarr = numpy.empty((nrows, ncols), dtype=object)
# set default labels
defxlabel = 'xlabel' not in axes_kw
defylabel = 'ylabel' not in axes_kw
flatdata = [s for group in axes_groups for s in group]
for axis in ('x', 'y'):
unit = _common_axis_unit(flatdata, axis=axis)
if unit:
axes_kw.setdefault('{}label'.format(axis),
unit.to_string('latex_inline_dimensional'))
# create axes for each group and draw each data object
for group, (row, col) in zip_longest(
axes_groups, itertools.product(range(nrows), range(ncols)),
fillvalue=[]):
# create Axes
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
axes_kw["sharex"] = shared_with[sharex]
axes_kw["sharey"] = shared_with[sharey]
axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)
# plot data
plot_func = getattr(ax, method)
if method in ('imshow', 'pcolormesh'):
for obj in group:
plot_func(obj, **kwargs)
elif group:
plot_func(*group, **kwargs)
# set default axis labels
for axis, share, pos, n, def_ in (
(ax.xaxis, sharex, row, nrows, defxlabel),
(ax.yaxis, sharey, col, ncols, defylabel),
):
# hide label if shared axis and not bottom left panel
if share == 'all' and pos < n - 1:
axis.set_label_text('')
# otherwise set default status
else:
axis.isDefault_label = def_
return self.axes | [
"def",
"_init_axes",
"(",
"self",
",",
"data",
",",
"method",
"=",
"'plot'",
",",
"xscale",
"=",
"None",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"False",
",",
"geometry",
"=",
"None",
",",
"separate",
"=",
"None",
",",
"*",
"*",
"kwargs",
... | Populate this figure with data, creating `Axes` as necessary | [
"Populate",
"this",
"figure",
"with",
"data",
"creating",
"Axes",
"as",
"necessary"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L143-L216 | train | 211,210 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.refresh | def refresh(self):
"""Refresh the current figure
"""
for cbar in self.colorbars:
cbar.draw_all()
self.canvas.draw() | python | def refresh(self):
"""Refresh the current figure
"""
for cbar in self.colorbars:
cbar.draw_all()
self.canvas.draw() | [
"def",
"refresh",
"(",
"self",
")",
":",
"for",
"cbar",
"in",
"self",
".",
"colorbars",
":",
"cbar",
".",
"draw_all",
"(",
")",
"self",
".",
"canvas",
".",
"draw",
"(",
")"
] | Refresh the current figure | [
"Refresh",
"the",
"current",
"figure"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L233-L238 | train | 211,211 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.close | def close(self):
"""Close the plot and release its memory.
"""
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
# avoid matplotlib/matplotlib#9970
ax.set_xscale('linear')
ax.set_yscale('linear')
# clear the axes
ax.cla()
# close the figure
close(self) | python | def close(self):
"""Close the plot and release its memory.
"""
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
# avoid matplotlib/matplotlib#9970
ax.set_xscale('linear')
ax.set_yscale('linear')
# clear the axes
ax.cla()
# close the figure
close(self) | [
"def",
"close",
"(",
"self",
")",
":",
"from",
"matplotlib",
".",
"pyplot",
"import",
"close",
"for",
"ax",
"in",
"self",
".",
"axes",
"[",
":",
":",
"-",
"1",
"]",
":",
"# avoid matplotlib/matplotlib#9970",
"ax",
".",
"set_xscale",
"(",
"'linear'",
")",... | Close the plot and release its memory. | [
"Close",
"the",
"plot",
"and",
"release",
"its",
"memory",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L303-L314 | train | 211,212 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.get_axes | def get_axes(self, projection=None):
"""Find all `Axes`, optionally matching the given projection
Parameters
----------
projection : `str`
name of axes types to return
Returns
-------
axlist : `list` of `~matplotlib.axes.Axes`
"""
if projection is None:
return self.axes
return [ax for ax in self.axes if ax.name == projection.lower()] | python | def get_axes(self, projection=None):
"""Find all `Axes`, optionally matching the given projection
Parameters
----------
projection : `str`
name of axes types to return
Returns
-------
axlist : `list` of `~matplotlib.axes.Axes`
"""
if projection is None:
return self.axes
return [ax for ax in self.axes if ax.name == projection.lower()] | [
"def",
"get_axes",
"(",
"self",
",",
"projection",
"=",
"None",
")",
":",
"if",
"projection",
"is",
"None",
":",
"return",
"self",
".",
"axes",
"return",
"[",
"ax",
"for",
"ax",
"in",
"self",
".",
"axes",
"if",
"ax",
".",
"name",
"==",
"projection",
... | Find all `Axes`, optionally matching the given projection
Parameters
----------
projection : `str`
name of axes types to return
Returns
-------
axlist : `list` of `~matplotlib.axes.Axes` | [
"Find",
"all",
"Axes",
"optionally",
"matching",
"the",
"given",
"projection"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L318-L332 | train | 211,213 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.colorbar | def colorbar(self, mappable=None, cax=None, ax=None, fraction=0.,
label=None, emit=True, **kwargs):
"""Add a colorbar to the current `Plot`
A colorbar must be associated with an `Axes` on this `Plot`,
and an existing mappable element (e.g. an image).
Parameters
----------
mappable : matplotlib data collection
Collection against which to map the colouring
cax : `~matplotlib.axes.Axes`
Axes on which to draw colorbar
ax : `~matplotlib.axes.Axes`
Axes relative to which to position colorbar
fraction : `float`, optional
Fraction of original axes to use for colorbar, give `fraction=0`
to not resize the original axes at all.
emit : `bool`, optional
If `True` update all mappables on `Axes` to match the same
colouring as the colorbar.
**kwargs
other keyword arguments to be passed to the
:meth:`~matplotlib.figure.Figure.colorbar`
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
matplotlib.figure.Figure.colorbar
matplotlib.colorbar.Colorbar
Examples
--------
>>> import numpy
>>> from gwpy.plot import Plot
To plot a simple image and add a colorbar:
>>> plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> plot.colorbar(label='Value')
>>> plot.show()
Colorbars can also be generated by directly referencing the parent
axes:
>>> Plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> ax.colorbar(label='Value')
>>> plot.show()
"""
# pre-process kwargs
mappable, kwargs = gcbar.process_colorbar_kwargs(
self, mappable, ax, cax=cax, fraction=fraction, **kwargs)
# generate colour bar
cbar = super(Plot, self).colorbar(mappable, **kwargs)
self.colorbars.append(cbar)
if label: # mpl<1.3 doesn't accept label in Colorbar constructor
cbar.set_label(label)
# update mappables for this axis
if emit:
ax = kwargs.pop('ax')
norm = mappable.norm
cmap = mappable.get_cmap()
for map_ in ax.collections + ax.images:
map_.set_norm(norm)
map_.set_cmap(cmap)
return cbar | python | def colorbar(self, mappable=None, cax=None, ax=None, fraction=0.,
label=None, emit=True, **kwargs):
"""Add a colorbar to the current `Plot`
A colorbar must be associated with an `Axes` on this `Plot`,
and an existing mappable element (e.g. an image).
Parameters
----------
mappable : matplotlib data collection
Collection against which to map the colouring
cax : `~matplotlib.axes.Axes`
Axes on which to draw colorbar
ax : `~matplotlib.axes.Axes`
Axes relative to which to position colorbar
fraction : `float`, optional
Fraction of original axes to use for colorbar, give `fraction=0`
to not resize the original axes at all.
emit : `bool`, optional
If `True` update all mappables on `Axes` to match the same
colouring as the colorbar.
**kwargs
other keyword arguments to be passed to the
:meth:`~matplotlib.figure.Figure.colorbar`
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
matplotlib.figure.Figure.colorbar
matplotlib.colorbar.Colorbar
Examples
--------
>>> import numpy
>>> from gwpy.plot import Plot
To plot a simple image and add a colorbar:
>>> plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> plot.colorbar(label='Value')
>>> plot.show()
Colorbars can also be generated by directly referencing the parent
axes:
>>> Plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> ax.colorbar(label='Value')
>>> plot.show()
"""
# pre-process kwargs
mappable, kwargs = gcbar.process_colorbar_kwargs(
self, mappable, ax, cax=cax, fraction=fraction, **kwargs)
# generate colour bar
cbar = super(Plot, self).colorbar(mappable, **kwargs)
self.colorbars.append(cbar)
if label: # mpl<1.3 doesn't accept label in Colorbar constructor
cbar.set_label(label)
# update mappables for this axis
if emit:
ax = kwargs.pop('ax')
norm = mappable.norm
cmap = mappable.get_cmap()
for map_ in ax.collections + ax.images:
map_.set_norm(norm)
map_.set_cmap(cmap)
return cbar | [
"def",
"colorbar",
"(",
"self",
",",
"mappable",
"=",
"None",
",",
"cax",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"fraction",
"=",
"0.",
",",
"label",
"=",
"None",
",",
"emit",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# pre-process kwargs... | Add a colorbar to the current `Plot`
A colorbar must be associated with an `Axes` on this `Plot`,
and an existing mappable element (e.g. an image).
Parameters
----------
mappable : matplotlib data collection
Collection against which to map the colouring
cax : `~matplotlib.axes.Axes`
Axes on which to draw colorbar
ax : `~matplotlib.axes.Axes`
Axes relative to which to position colorbar
fraction : `float`, optional
Fraction of original axes to use for colorbar, give `fraction=0`
to not resize the original axes at all.
emit : `bool`, optional
If `True` update all mappables on `Axes` to match the same
colouring as the colorbar.
**kwargs
other keyword arguments to be passed to the
:meth:`~matplotlib.figure.Figure.colorbar`
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
matplotlib.figure.Figure.colorbar
matplotlib.colorbar.Colorbar
Examples
--------
>>> import numpy
>>> from gwpy.plot import Plot
To plot a simple image and add a colorbar:
>>> plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> plot.colorbar(label='Value')
>>> plot.show()
Colorbars can also be generated by directly referencing the parent
axes:
>>> Plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> ax.colorbar(label='Value')
>>> plot.show() | [
"Add",
"a",
"colorbar",
"to",
"the",
"current",
"Plot"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L336-L417 | train | 211,214 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.add_colorbar | def add_colorbar(self, *args, **kwargs):
"""DEPRECATED, use `Plot.colorbar` instead
"""
warnings.warn(
"{0}.add_colorbar was renamed {0}.colorbar, this warnings will "
"result in an error in the future".format(type(self).__name__),
DeprecationWarning)
return self.colorbar(*args, **kwargs) | python | def add_colorbar(self, *args, **kwargs):
"""DEPRECATED, use `Plot.colorbar` instead
"""
warnings.warn(
"{0}.add_colorbar was renamed {0}.colorbar, this warnings will "
"result in an error in the future".format(type(self).__name__),
DeprecationWarning)
return self.colorbar(*args, **kwargs) | [
"def",
"add_colorbar",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"{0}.add_colorbar was renamed {0}.colorbar, this warnings will \"",
"\"result in an error in the future\"",
".",
"format",
"(",
"type",
"(",
"self"... | DEPRECATED, use `Plot.colorbar` instead | [
"DEPRECATED",
"use",
"Plot",
".",
"colorbar",
"instead"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L419-L426 | train | 211,215 |
gwpy/gwpy | gwpy/plot/plot.py | Plot.add_segments_bar | def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1,
sharex=True, location='bottom', **plotargs):
"""Add a segment bar `Plot` indicating state information.
By default, segments are displayed in a thin horizontal set of Axes
sitting immediately below the x-axis of the main,
similarly to a colorbar.
Parameters
----------
segments : `~gwpy.segments.DataQualityFlag`
A data-quality flag, or `SegmentList` denoting state segments
about this Plot
ax : `Axes`, optional
Specific `Axes` relative to which to position new `Axes`,
defaults to :func:`~matplotlib.pyplot.gca()`
height : `float, `optional
Height of the new axes, as a fraction of the anchor axes
pad : `float`, optional
Padding between the new axes and the anchor, as a fraction of
the anchor axes dimension
sharex : `True`, `~matplotlib.axes.Axes`, optional
Either `True` to set ``sharex=ax`` for the new segment axes,
or an `Axes` to use directly
location : `str`, optional
Location for new segment axes, defaults to ``'bottom'``,
acceptable values are ``'top'`` or ``'bottom'``.
**plotargs
extra keyword arguments are passed to
:meth:`~gwpy.plot.SegmentAxes.plot`
"""
# get axes to anchor against
if not ax:
ax = self.gca()
# set options for new axes
axes_kw = {
'pad': pad,
'add_to_figure': True,
'sharex': ax if sharex is True else sharex or None,
'axes_class': get_projection_class('segments'),
}
# map X-axis limit from old axes
if axes_kw['sharex'] is ax and not ax.get_autoscalex_on():
axes_kw['xlim'] = ax.get_xlim()
# if axes uses GPS scaling, copy the epoch as well
try:
axes_kw['epoch'] = ax.get_epoch()
except AttributeError:
pass
# add new axes
if ax.get_axes_locator():
divider = ax.get_axes_locator()._axes_divider
else:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
if location not in {'top', 'bottom'}:
raise ValueError("Segments can only be positoned at 'top' or "
"'bottom'.")
segax = divider.append_axes(location, height, **axes_kw)
# update anchor axes
if axes_kw['sharex'] is ax and location == 'bottom':
# map label
segax.set_xlabel(ax.get_xlabel())
segax.xaxis.isDefault_label = ax.xaxis.isDefault_label
ax.set_xlabel("")
# hide ticks on original axes
setp(ax.get_xticklabels(), visible=False)
# plot segments
segax.plot(segments, **plotargs)
segax.grid(b=False, which='both', axis='y')
segax.autoscale(axis='y', tight=True)
return segax | python | def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1,
sharex=True, location='bottom', **plotargs):
"""Add a segment bar `Plot` indicating state information.
By default, segments are displayed in a thin horizontal set of Axes
sitting immediately below the x-axis of the main,
similarly to a colorbar.
Parameters
----------
segments : `~gwpy.segments.DataQualityFlag`
A data-quality flag, or `SegmentList` denoting state segments
about this Plot
ax : `Axes`, optional
Specific `Axes` relative to which to position new `Axes`,
defaults to :func:`~matplotlib.pyplot.gca()`
height : `float, `optional
Height of the new axes, as a fraction of the anchor axes
pad : `float`, optional
Padding between the new axes and the anchor, as a fraction of
the anchor axes dimension
sharex : `True`, `~matplotlib.axes.Axes`, optional
Either `True` to set ``sharex=ax`` for the new segment axes,
or an `Axes` to use directly
location : `str`, optional
Location for new segment axes, defaults to ``'bottom'``,
acceptable values are ``'top'`` or ``'bottom'``.
**plotargs
extra keyword arguments are passed to
:meth:`~gwpy.plot.SegmentAxes.plot`
"""
# get axes to anchor against
if not ax:
ax = self.gca()
# set options for new axes
axes_kw = {
'pad': pad,
'add_to_figure': True,
'sharex': ax if sharex is True else sharex or None,
'axes_class': get_projection_class('segments'),
}
# map X-axis limit from old axes
if axes_kw['sharex'] is ax and not ax.get_autoscalex_on():
axes_kw['xlim'] = ax.get_xlim()
# if axes uses GPS scaling, copy the epoch as well
try:
axes_kw['epoch'] = ax.get_epoch()
except AttributeError:
pass
# add new axes
if ax.get_axes_locator():
divider = ax.get_axes_locator()._axes_divider
else:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
if location not in {'top', 'bottom'}:
raise ValueError("Segments can only be positoned at 'top' or "
"'bottom'.")
segax = divider.append_axes(location, height, **axes_kw)
# update anchor axes
if axes_kw['sharex'] is ax and location == 'bottom':
# map label
segax.set_xlabel(ax.get_xlabel())
segax.xaxis.isDefault_label = ax.xaxis.isDefault_label
ax.set_xlabel("")
# hide ticks on original axes
setp(ax.get_xticklabels(), visible=False)
# plot segments
segax.plot(segments, **plotargs)
segax.grid(b=False, which='both', axis='y')
segax.autoscale(axis='y', tight=True)
return segax | [
"def",
"add_segments_bar",
"(",
"self",
",",
"segments",
",",
"ax",
"=",
"None",
",",
"height",
"=",
"0.14",
",",
"pad",
"=",
"0.1",
",",
"sharex",
"=",
"True",
",",
"location",
"=",
"'bottom'",
",",
"*",
"*",
"plotargs",
")",
":",
"# get axes to ancho... | Add a segment bar `Plot` indicating state information.
By default, segments are displayed in a thin horizontal set of Axes
sitting immediately below the x-axis of the main,
similarly to a colorbar.
Parameters
----------
segments : `~gwpy.segments.DataQualityFlag`
A data-quality flag, or `SegmentList` denoting state segments
about this Plot
ax : `Axes`, optional
Specific `Axes` relative to which to position new `Axes`,
defaults to :func:`~matplotlib.pyplot.gca()`
height : `float, `optional
Height of the new axes, as a fraction of the anchor axes
pad : `float`, optional
Padding between the new axes and the anchor, as a fraction of
the anchor axes dimension
sharex : `True`, `~matplotlib.axes.Axes`, optional
Either `True` to set ``sharex=ax`` for the new segment axes,
or an `Axes` to use directly
location : `str`, optional
Location for new segment axes, defaults to ``'bottom'``,
acceptable values are ``'top'`` or ``'bottom'``.
**plotargs
extra keyword arguments are passed to
:meth:`~gwpy.plot.SegmentAxes.plot` | [
"Add",
"a",
"segment",
"bar",
"Plot",
"indicating",
"state",
"information",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/plot.py#L430-L514 | train | 211,216 |
gwpy/gwpy | gwpy/table/io/hacr.py | get_hacr_channels | def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
"""Return the names of all channels present in the given HACR database
"""
# connect if needed
if connection is None:
if gps is None:
gps = from_gps('now')
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
# query
out = query("select channel from job where monitorName = 'chacr'")
return [r[0] for r in out] | python | def get_hacr_channels(db=None, gps=None, connection=None,
**conectkwargs):
"""Return the names of all channels present in the given HACR database
"""
# connect if needed
if connection is None:
if gps is None:
gps = from_gps('now')
if db is None:
db = get_database_names(gps, gps)[0]
connection = connect(db=db, **conectkwargs)
# query
out = query("select channel from job where monitorName = 'chacr'")
return [r[0] for r in out] | [
"def",
"get_hacr_channels",
"(",
"db",
"=",
"None",
",",
"gps",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"conectkwargs",
")",
":",
"# connect if needed",
"if",
"connection",
"is",
"None",
":",
"if",
"gps",
"is",
"None",
":",
"gps",
"... | Return the names of all channels present in the given HACR database | [
"Return",
"the",
"names",
"of",
"all",
"channels",
"present",
"in",
"the",
"given",
"HACR",
"database"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/hacr.py#L75-L88 | train | 211,217 |
gwpy/gwpy | gwpy/table/io/hacr.py | get_hacr_triggers | def get_hacr_triggers(channel, start, end, columns=HACR_COLUMNS, pid=None,
monitor='chacr', selection=None, **connectkwargs):
"""Fetch a table of HACR triggers in the given interval
"""
if columns is None:
columns = HACR_COLUMNS
columns = list(columns)
span = Segment(*map(to_gps, (start, end)))
# parse selection for SQL query (removing leading 'where ')
selectionstr = 'and %s' % format_db_selection(selection, engine=None)[6:]
# get database names and loop over each on
databases = get_database_names(start, end)
rows = []
for db in databases:
conn = connect(db, **connectkwargs)
cursor = conn.cursor()
# find process ID(s) for this channel
pids = query("select process_id, gps_start, gps_stop "
"from job where monitorName = %r and channel = %r"
% (monitor, str(channel)), connection=conn)
for p, s, e in pids:
# validate this process id
if pid is not None and int(p) != int(pid):
continue
tspan = Segment(float(s), float(e))
if not tspan.intersects(span):
continue
# execute trigger query
q = ('select %s from mhacr where process_id = %d and '
'gps_start > %s and gps_start < %d %s order by gps_start asc'
% (', '.join(columns), int(p), span[0], span[1],
selectionstr))
n = cursor.execute(q)
if n == 0:
continue
# get new events, convert to recarray, and append to table
rows.extend(cursor.fetchall())
return EventTable(rows=rows, names=columns) | python | def get_hacr_triggers(channel, start, end, columns=HACR_COLUMNS, pid=None,
monitor='chacr', selection=None, **connectkwargs):
"""Fetch a table of HACR triggers in the given interval
"""
if columns is None:
columns = HACR_COLUMNS
columns = list(columns)
span = Segment(*map(to_gps, (start, end)))
# parse selection for SQL query (removing leading 'where ')
selectionstr = 'and %s' % format_db_selection(selection, engine=None)[6:]
# get database names and loop over each on
databases = get_database_names(start, end)
rows = []
for db in databases:
conn = connect(db, **connectkwargs)
cursor = conn.cursor()
# find process ID(s) for this channel
pids = query("select process_id, gps_start, gps_stop "
"from job where monitorName = %r and channel = %r"
% (monitor, str(channel)), connection=conn)
for p, s, e in pids:
# validate this process id
if pid is not None and int(p) != int(pid):
continue
tspan = Segment(float(s), float(e))
if not tspan.intersects(span):
continue
# execute trigger query
q = ('select %s from mhacr where process_id = %d and '
'gps_start > %s and gps_start < %d %s order by gps_start asc'
% (', '.join(columns), int(p), span[0], span[1],
selectionstr))
n = cursor.execute(q)
if n == 0:
continue
# get new events, convert to recarray, and append to table
rows.extend(cursor.fetchall())
return EventTable(rows=rows, names=columns) | [
"def",
"get_hacr_triggers",
"(",
"channel",
",",
"start",
",",
"end",
",",
"columns",
"=",
"HACR_COLUMNS",
",",
"pid",
"=",
"None",
",",
"monitor",
"=",
"'chacr'",
",",
"selection",
"=",
"None",
",",
"*",
"*",
"connectkwargs",
")",
":",
"if",
"columns",
... | Fetch a table of HACR triggers in the given interval | [
"Fetch",
"a",
"table",
"of",
"HACR",
"triggers",
"in",
"the",
"given",
"interval"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/hacr.py#L91-L130 | train | 211,218 |
gwpy/gwpy | gwpy/table/io/hacr.py | connect | def connect(db, host=HACR_DATABASE_SERVER, user=HACR_DATABASE_USER,
passwd=HACR_DATABASE_PASSWD):
"""Connect to the given SQL database
"""
try:
import pymysql
except ImportError as e:
e.args = ('pymysql is required to fetch HACR triggers',)
raise
return pymysql.connect(host=host, user=user, passwd=passwd, db=db) | python | def connect(db, host=HACR_DATABASE_SERVER, user=HACR_DATABASE_USER,
passwd=HACR_DATABASE_PASSWD):
"""Connect to the given SQL database
"""
try:
import pymysql
except ImportError as e:
e.args = ('pymysql is required to fetch HACR triggers',)
raise
return pymysql.connect(host=host, user=user, passwd=passwd, db=db) | [
"def",
"connect",
"(",
"db",
",",
"host",
"=",
"HACR_DATABASE_SERVER",
",",
"user",
"=",
"HACR_DATABASE_USER",
",",
"passwd",
"=",
"HACR_DATABASE_PASSWD",
")",
":",
"try",
":",
"import",
"pymysql",
"except",
"ImportError",
"as",
"e",
":",
"e",
".",
"args",
... | Connect to the given SQL database | [
"Connect",
"to",
"the",
"given",
"SQL",
"database"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/hacr.py#L139-L148 | train | 211,219 |
gwpy/gwpy | gwpy/table/io/hacr.py | query | def query(querystr, connection=None, **connectkwargs):
"""Execute a query of the given SQL database
"""
if connection is None:
connection = connect(**connectkwargs)
cursor = connection.cursor()
cursor.execute(querystr)
return cursor.fetchall() | python | def query(querystr, connection=None, **connectkwargs):
"""Execute a query of the given SQL database
"""
if connection is None:
connection = connect(**connectkwargs)
cursor = connection.cursor()
cursor.execute(querystr)
return cursor.fetchall() | [
"def",
"query",
"(",
"querystr",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"connectkwargs",
")",
":",
"if",
"connection",
"is",
"None",
":",
"connection",
"=",
"connect",
"(",
"*",
"*",
"connectkwargs",
")",
"cursor",
"=",
"connection",
".",
"cursor... | Execute a query of the given SQL database | [
"Execute",
"a",
"query",
"of",
"the",
"given",
"SQL",
"database"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/hacr.py#L151-L158 | train | 211,220 |
gwpy/gwpy | gwpy/plot/bode.py | BodePlot.add_filter | def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
"""Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, 'Hz').value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
# parse filter (without digital conversions)
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
# calculate frequency response
w, mag, phase = lti.bode(w=frequencies)
# convert from decibels
if not dB:
mag = 10 ** (mag / 10.)
# draw
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline | python | def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
"""Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, 'Hz').value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
# parse filter (without digital conversions)
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
# calculate frequency response
w, mag, phase = lti.bode(w=frequencies)
# convert from decibels
if not dB:
mag = 10 ** (mag / 10.)
# draw
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline | [
"def",
"add_filter",
"(",
"self",
",",
"filter_",
",",
"frequencies",
"=",
"None",
",",
"dB",
"=",
"True",
",",
"analog",
"=",
"False",
",",
"sample_rate",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"analog",
":",
"if",
"not",
"sa... | Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter. | [
"Add",
"a",
"linear",
"time",
"-",
"invariant",
"filter",
"to",
"this",
"BodePlot"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/bode.py#L169-L226 | train | 211,221 |
gwpy/gwpy | gwpy/plot/bode.py | BodePlot.add_frequencyseries | def add_frequencyseries(self, spectrum, dB=True, power=False, **kwargs):
"""Plot the magnitude and phase of a complex-valued `FrequencySeries`
Parameters
----------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
the (complex-valued) `FrequencySeries` to display
db : `bool`, optional, default: `True`
if `True`, display magnitude in decibels, otherwise display
amplitude.
power : `bool`, optional, default: `False`
give `True` to incidate that ``spectrum`` holds power values,
so ``dB = 10 * log(abs(spectrum))``, otherwise
``db = 20 * log(abs(spectrum))``. This argument is ignored if
``db=False``.
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
# parse spectrum arguments
kwargs.setdefault('label', spectrum.name)
# get magnitude
mag = numpy.absolute(spectrum.value)
if dB:
mag = to_db(mag)
if not power:
mag *= 2.
# get phase
phase = numpy.angle(spectrum.value, deg=True)
# plot
w = spectrum.frequencies.value
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline | python | def add_frequencyseries(self, spectrum, dB=True, power=False, **kwargs):
"""Plot the magnitude and phase of a complex-valued `FrequencySeries`
Parameters
----------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
the (complex-valued) `FrequencySeries` to display
db : `bool`, optional, default: `True`
if `True`, display magnitude in decibels, otherwise display
amplitude.
power : `bool`, optional, default: `False`
give `True` to incidate that ``spectrum`` holds power values,
so ``dB = 10 * log(abs(spectrum))``, otherwise
``db = 20 * log(abs(spectrum))``. This argument is ignored if
``db=False``.
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
# parse spectrum arguments
kwargs.setdefault('label', spectrum.name)
# get magnitude
mag = numpy.absolute(spectrum.value)
if dB:
mag = to_db(mag)
if not power:
mag *= 2.
# get phase
phase = numpy.angle(spectrum.value, deg=True)
# plot
w = spectrum.frequencies.value
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline | [
"def",
"add_frequencyseries",
"(",
"self",
",",
"spectrum",
",",
"dB",
"=",
"True",
",",
"power",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# parse spectrum arguments",
"kwargs",
".",
"setdefault",
"(",
"'label'",
",",
"spectrum",
".",
"name",
")",... | Plot the magnitude and phase of a complex-valued `FrequencySeries`
Parameters
----------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
the (complex-valued) `FrequencySeries` to display
db : `bool`, optional, default: `True`
if `True`, display magnitude in decibels, otherwise display
amplitude.
power : `bool`, optional, default: `False`
give `True` to incidate that ``spectrum`` holds power values,
so ``dB = 10 * log(abs(spectrum))``, otherwise
``db = 20 * log(abs(spectrum))``. This argument is ignored if
``db=False``.
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter. | [
"Plot",
"the",
"magnitude",
"and",
"phase",
"of",
"a",
"complex",
"-",
"valued",
"FrequencySeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/bode.py#L228-L269 | train | 211,222 |
gwpy/gwpy | gwpy/detector/io/omega.py | read_omega_scan_config | def read_omega_scan_config(source):
"""Parse an Omega-scan configuration file into a `ChannelList`
Parameters
----------
source : `str`
path of Omega configuration file to parse
Returns
-------
channels : `ChannelList`
the list of channels (in order) as parsed
Raises
------
RuntimeError
if this method finds a line it cannot parse sensibly
"""
out = ChannelList()
append = out.append
if isinstance(source, FILE_LIKE):
close = False
else:
source = open(source, 'r')
close = True
try:
section = None
while True:
try:
line = next(source)
except StopIteration:
break
if line == '' or line == '\n' or line.startswith('#'):
continue
elif line.startswith('['):
section = line[1:-2]
elif line.startswith('{'):
append(parse_omega_channel(source, section))
else:
raise RuntimeError("Failed to parse Omega config line:\n%s"
% line)
finally:
if close:
source.close()
return out | python | def read_omega_scan_config(source):
"""Parse an Omega-scan configuration file into a `ChannelList`
Parameters
----------
source : `str`
path of Omega configuration file to parse
Returns
-------
channels : `ChannelList`
the list of channels (in order) as parsed
Raises
------
RuntimeError
if this method finds a line it cannot parse sensibly
"""
out = ChannelList()
append = out.append
if isinstance(source, FILE_LIKE):
close = False
else:
source = open(source, 'r')
close = True
try:
section = None
while True:
try:
line = next(source)
except StopIteration:
break
if line == '' or line == '\n' or line.startswith('#'):
continue
elif line.startswith('['):
section = line[1:-2]
elif line.startswith('{'):
append(parse_omega_channel(source, section))
else:
raise RuntimeError("Failed to parse Omega config line:\n%s"
% line)
finally:
if close:
source.close()
return out | [
"def",
"read_omega_scan_config",
"(",
"source",
")",
":",
"out",
"=",
"ChannelList",
"(",
")",
"append",
"=",
"out",
".",
"append",
"if",
"isinstance",
"(",
"source",
",",
"FILE_LIKE",
")",
":",
"close",
"=",
"False",
"else",
":",
"source",
"=",
"open",
... | Parse an Omega-scan configuration file into a `ChannelList`
Parameters
----------
source : `str`
path of Omega configuration file to parse
Returns
-------
channels : `ChannelList`
the list of channels (in order) as parsed
Raises
------
RuntimeError
if this method finds a line it cannot parse sensibly | [
"Parse",
"an",
"Omega",
"-",
"scan",
"configuration",
"file",
"into",
"a",
"ChannelList"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/omega.py#L41-L85 | train | 211,223 |
gwpy/gwpy | gwpy/detector/io/omega.py | parse_omega_channel | def parse_omega_channel(fobj, section=None):
"""Parse a `Channel` from an Omega-scan configuration file
Parameters
----------
fobj : `file`
the open file-like object to parse
section : `str`
name of section in which this channel should be recorded
Returns
-------
channel : `Channel`
the channel as parsed from this `file`
"""
params = OrderedDict()
while True:
line = next(fobj)
if line == '}\n':
break
key, value = line.split(':', 1)
params[key.strip().rstrip()] = omega_param(value)
out = Channel(params.get('channelName'),
sample_rate=params.get('sampleFrequency'),
frametype=params.get('frameType'),
frequency_range=params.get('searchFrequencyRange'))
out.group = section
out.params = params
return out | python | def parse_omega_channel(fobj, section=None):
"""Parse a `Channel` from an Omega-scan configuration file
Parameters
----------
fobj : `file`
the open file-like object to parse
section : `str`
name of section in which this channel should be recorded
Returns
-------
channel : `Channel`
the channel as parsed from this `file`
"""
params = OrderedDict()
while True:
line = next(fobj)
if line == '}\n':
break
key, value = line.split(':', 1)
params[key.strip().rstrip()] = omega_param(value)
out = Channel(params.get('channelName'),
sample_rate=params.get('sampleFrequency'),
frametype=params.get('frameType'),
frequency_range=params.get('searchFrequencyRange'))
out.group = section
out.params = params
return out | [
"def",
"parse_omega_channel",
"(",
"fobj",
",",
"section",
"=",
"None",
")",
":",
"params",
"=",
"OrderedDict",
"(",
")",
"while",
"True",
":",
"line",
"=",
"next",
"(",
"fobj",
")",
"if",
"line",
"==",
"'}\\n'",
":",
"break",
"key",
",",
"value",
"=... | Parse a `Channel` from an Omega-scan configuration file
Parameters
----------
fobj : `file`
the open file-like object to parse
section : `str`
name of section in which this channel should be recorded
Returns
-------
channel : `Channel`
the channel as parsed from this `file` | [
"Parse",
"a",
"Channel",
"from",
"an",
"Omega",
"-",
"scan",
"configuration",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/omega.py#L88-L116 | train | 211,224 |
gwpy/gwpy | gwpy/detector/io/omega.py | omega_param | def omega_param(val):
"""Parse a value from an Omega-scan configuration file
This method tries to parse matlab-syntax parameters into a `str`,
`float`, or `tuple`
"""
val = val.strip().rstrip()
if val.startswith(('"', "'")):
return str(val[1:-1])
if val.startswith('['):
return tuple(map(float, val[1:-1].split()))
return float(val) | python | def omega_param(val):
"""Parse a value from an Omega-scan configuration file
This method tries to parse matlab-syntax parameters into a `str`,
`float`, or `tuple`
"""
val = val.strip().rstrip()
if val.startswith(('"', "'")):
return str(val[1:-1])
if val.startswith('['):
return tuple(map(float, val[1:-1].split()))
return float(val) | [
"def",
"omega_param",
"(",
"val",
")",
":",
"val",
"=",
"val",
".",
"strip",
"(",
")",
".",
"rstrip",
"(",
")",
"if",
"val",
".",
"startswith",
"(",
"(",
"'\"'",
",",
"\"'\"",
")",
")",
":",
"return",
"str",
"(",
"val",
"[",
"1",
":",
"-",
"1... | Parse a value from an Omega-scan configuration file
This method tries to parse matlab-syntax parameters into a `str`,
`float`, or `tuple` | [
"Parse",
"a",
"value",
"from",
"an",
"Omega",
"-",
"scan",
"configuration",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/omega.py#L119-L130 | train | 211,225 |
gwpy/gwpy | gwpy/detector/io/omega.py | write_omega_scan_config | def write_omega_scan_config(channellist, fobj, header=True):
"""Write a `ChannelList` to an Omega-pipeline scan configuration file
This method is dumb and assumes the channels are sorted in the right
order already
"""
if isinstance(fobj, FILE_LIKE):
close = False
else:
fobj = open(fobj, 'w')
close = True
try:
# print header
if header:
print('# Q Scan configuration file', file=fobj)
print('# Generated with GWpy from a ChannelList', file=fobj)
group = None
for channel in channellist:
# print header
if channel.group != group:
group = channel.group
print('\n[%s]' % group, file=fobj)
print("", file=fobj)
print_omega_channel(channel, file=fobj)
finally:
if close:
fobj.close() | python | def write_omega_scan_config(channellist, fobj, header=True):
"""Write a `ChannelList` to an Omega-pipeline scan configuration file
This method is dumb and assumes the channels are sorted in the right
order already
"""
if isinstance(fobj, FILE_LIKE):
close = False
else:
fobj = open(fobj, 'w')
close = True
try:
# print header
if header:
print('# Q Scan configuration file', file=fobj)
print('# Generated with GWpy from a ChannelList', file=fobj)
group = None
for channel in channellist:
# print header
if channel.group != group:
group = channel.group
print('\n[%s]' % group, file=fobj)
print("", file=fobj)
print_omega_channel(channel, file=fobj)
finally:
if close:
fobj.close() | [
"def",
"write_omega_scan_config",
"(",
"channellist",
",",
"fobj",
",",
"header",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"fobj",
",",
"FILE_LIKE",
")",
":",
"close",
"=",
"False",
"else",
":",
"fobj",
"=",
"open",
"(",
"fobj",
",",
"'w'",
")",... | Write a `ChannelList` to an Omega-pipeline scan configuration file
This method is dumb and assumes the channels are sorted in the right
order already | [
"Write",
"a",
"ChannelList",
"to",
"an",
"Omega",
"-",
"pipeline",
"scan",
"configuration",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/omega.py#L135-L161 | train | 211,226 |
gwpy/gwpy | gwpy/detector/io/omega.py | print_omega_channel | def print_omega_channel(channel, file=sys.stdout):
"""Print a `Channel` in Omega-pipeline scan format
"""
print('{', file=file)
try:
params = channel.params.copy()
except AttributeError:
params = OrderedDict()
params.setdefault('channelName', str(channel))
params.setdefault('alwaysPlotFlag', int(params.pop('important', False)))
if channel.frametype:
params.setdefault('frameType', channel.frametype)
if channel.sample_rate is not None:
params.setdefault('sampleFrequency',
channel.sample_rate.to('Hz').value)
if channel.frequency_range is not None:
low, high = channel.frequency_range.to('Hz').value
params.setdefault('searchFrequencyRange', (low, high))
if 'qlow' in params or 'qhigh' in params:
qlow = params.pop('qlow', 'sqrt(11)')
qhigh = params.pop('qhigh', 64)
params.setdefault('searchQRange', (qlow, qhigh))
# write params
for key in ['channelName', 'frameType']:
if key not in params:
raise KeyError("No %r defined for %s" % (key, str(channel)))
for key, value in params.items():
key = '%s:' % str(key)
if isinstance(value, tuple):
value = '[%s]' % ' '.join(map(str, value))
elif isinstance(value, float) and value.is_integer():
value = int(value)
elif isinstance(value, str):
value = repr(value)
print(' {0: <30} {1}'.format(key, value), file=file)
print('}', file=file) | python | def print_omega_channel(channel, file=sys.stdout):
"""Print a `Channel` in Omega-pipeline scan format
"""
print('{', file=file)
try:
params = channel.params.copy()
except AttributeError:
params = OrderedDict()
params.setdefault('channelName', str(channel))
params.setdefault('alwaysPlotFlag', int(params.pop('important', False)))
if channel.frametype:
params.setdefault('frameType', channel.frametype)
if channel.sample_rate is not None:
params.setdefault('sampleFrequency',
channel.sample_rate.to('Hz').value)
if channel.frequency_range is not None:
low, high = channel.frequency_range.to('Hz').value
params.setdefault('searchFrequencyRange', (low, high))
if 'qlow' in params or 'qhigh' in params:
qlow = params.pop('qlow', 'sqrt(11)')
qhigh = params.pop('qhigh', 64)
params.setdefault('searchQRange', (qlow, qhigh))
# write params
for key in ['channelName', 'frameType']:
if key not in params:
raise KeyError("No %r defined for %s" % (key, str(channel)))
for key, value in params.items():
key = '%s:' % str(key)
if isinstance(value, tuple):
value = '[%s]' % ' '.join(map(str, value))
elif isinstance(value, float) and value.is_integer():
value = int(value)
elif isinstance(value, str):
value = repr(value)
print(' {0: <30} {1}'.format(key, value), file=file)
print('}', file=file) | [
"def",
"print_omega_channel",
"(",
"channel",
",",
"file",
"=",
"sys",
".",
"stdout",
")",
":",
"print",
"(",
"'{'",
",",
"file",
"=",
"file",
")",
"try",
":",
"params",
"=",
"channel",
".",
"params",
".",
"copy",
"(",
")",
"except",
"AttributeError",
... | Print a `Channel` in Omega-pipeline scan format | [
"Print",
"a",
"Channel",
"in",
"Omega",
"-",
"pipeline",
"scan",
"format"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/omega.py#L165-L200 | train | 211,227 |
gwpy/gwpy | gwpy/io/nds2.py | _get_nds2_name | def _get_nds2_name(channel):
"""Returns the NDS2-formatted name for a channel
Understands how to format NDS name strings from
`gwpy.detector.Channel` and `nds2.channel` objects
"""
if hasattr(channel, 'ndsname'): # gwpy.detector.Channel
return channel.ndsname
if hasattr(channel, 'channel_type'): # nds2.channel
return '%s,%s' % (channel.name,
channel.channel_type_to_string(channel.channel_type))
return str(channel) | python | def _get_nds2_name(channel):
"""Returns the NDS2-formatted name for a channel
Understands how to format NDS name strings from
`gwpy.detector.Channel` and `nds2.channel` objects
"""
if hasattr(channel, 'ndsname'): # gwpy.detector.Channel
return channel.ndsname
if hasattr(channel, 'channel_type'): # nds2.channel
return '%s,%s' % (channel.name,
channel.channel_type_to_string(channel.channel_type))
return str(channel) | [
"def",
"_get_nds2_name",
"(",
"channel",
")",
":",
"if",
"hasattr",
"(",
"channel",
",",
"'ndsname'",
")",
":",
"# gwpy.detector.Channel",
"return",
"channel",
".",
"ndsname",
"if",
"hasattr",
"(",
"channel",
",",
"'channel_type'",
")",
":",
"# nds2.channel",
... | Returns the NDS2-formatted name for a channel
Understands how to format NDS name strings from
`gwpy.detector.Channel` and `nds2.channel` objects | [
"Returns",
"the",
"NDS2",
"-",
"formatted",
"name",
"for",
"a",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L180-L191 | train | 211,228 |
gwpy/gwpy | gwpy/io/nds2.py | parse_nds_env | def parse_nds_env(env='NDSSERVER'):
"""Parse the NDSSERVER environment variable into a list of hosts
Parameters
----------
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
Returns
-------
hostiter : `list` of `tuple`
a list of (unique) ``(str, int)`` tuples for each host:port
pair
"""
hosts = []
for host in os.getenv(env).split(','):
try:
host, port = host.rsplit(':', 1)
except ValueError:
port = None
else:
port = int(port)
if (host, port) not in hosts:
hosts.append((host, port))
return hosts | python | def parse_nds_env(env='NDSSERVER'):
"""Parse the NDSSERVER environment variable into a list of hosts
Parameters
----------
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
Returns
-------
hostiter : `list` of `tuple`
a list of (unique) ``(str, int)`` tuples for each host:port
pair
"""
hosts = []
for host in os.getenv(env).split(','):
try:
host, port = host.rsplit(':', 1)
except ValueError:
port = None
else:
port = int(port)
if (host, port) not in hosts:
hosts.append((host, port))
return hosts | [
"def",
"parse_nds_env",
"(",
"env",
"=",
"'NDSSERVER'",
")",
":",
"hosts",
"=",
"[",
"]",
"for",
"host",
"in",
"os",
".",
"getenv",
"(",
"env",
")",
".",
"split",
"(",
"','",
")",
":",
"try",
":",
"host",
",",
"port",
"=",
"host",
".",
"rsplit",
... | Parse the NDSSERVER environment variable into a list of hosts
Parameters
----------
env : `str`, optional
environment variable name to use for server order,
default ``'NDSSERVER'``. The contents of this variable should
be a comma-separated list of `host:port` strings, e.g.
``'nds1.server.com:80,nds2.server.com:80'``
Returns
-------
hostiter : `list` of `tuple`
a list of (unique) ``(str, int)`` tuples for each host:port
pair | [
"Parse",
"the",
"NDSSERVER",
"environment",
"variable",
"into",
"a",
"list",
"of",
"hosts"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L202-L229 | train | 211,229 |
gwpy/gwpy | gwpy/io/nds2.py | connect | def connect(host, port=None):
"""Open an `nds2.connection` to a given host and port
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host
"""
import nds2
# pylint: disable=no-member
# set default port for NDS1 connections (required, I think)
if port is None and NDS1_HOSTNAME.match(host):
port = 8088
if port is None:
return nds2.connection(host)
return nds2.connection(host, port) | python | def connect(host, port=None):
"""Open an `nds2.connection` to a given host and port
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host
"""
import nds2
# pylint: disable=no-member
# set default port for NDS1 connections (required, I think)
if port is None and NDS1_HOSTNAME.match(host):
port = 8088
if port is None:
return nds2.connection(host)
return nds2.connection(host, port) | [
"def",
"connect",
"(",
"host",
",",
"port",
"=",
"None",
")",
":",
"import",
"nds2",
"# pylint: disable=no-member",
"# set default port for NDS1 connections (required, I think)",
"if",
"port",
"is",
"None",
"and",
"NDS1_HOSTNAME",
".",
"match",
"(",
"host",
")",
":"... | Open an `nds2.connection` to a given host and port
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host | [
"Open",
"an",
"nds2",
".",
"connection",
"to",
"a",
"given",
"host",
"and",
"port"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L281-L306 | train | 211,230 |
gwpy/gwpy | gwpy/io/nds2.py | auth_connect | def auth_connect(host, port=None):
"""Open an `nds2.connection` handling simple authentication errors
This method will catch exceptions related to kerberos authentication,
and execute a kinit() for the user before attempting to connect again.
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host
"""
try:
return connect(host, port)
except RuntimeError as exc:
if 'Request SASL authentication' not in str(exc):
raise
warnings.warn('Error authenticating against {0}:{1}'.format(host, port),
NDSWarning)
kinit()
return connect(host, port) | python | def auth_connect(host, port=None):
"""Open an `nds2.connection` handling simple authentication errors
This method will catch exceptions related to kerberos authentication,
and execute a kinit() for the user before attempting to connect again.
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host
"""
try:
return connect(host, port)
except RuntimeError as exc:
if 'Request SASL authentication' not in str(exc):
raise
warnings.warn('Error authenticating against {0}:{1}'.format(host, port),
NDSWarning)
kinit()
return connect(host, port) | [
"def",
"auth_connect",
"(",
"host",
",",
"port",
"=",
"None",
")",
":",
"try",
":",
"return",
"connect",
"(",
"host",
",",
"port",
")",
"except",
"RuntimeError",
"as",
"exc",
":",
"if",
"'Request SASL authentication'",
"not",
"in",
"str",
"(",
"exc",
")"... | Open an `nds2.connection` handling simple authentication errors
This method will catch exceptions related to kerberos authentication,
and execute a kinit() for the user before attempting to connect again.
Parameters
----------
host : `str`
name of server with which to connect
port : `int`, optional
connection port
Returns
-------
connection : `nds2.connection`
a new open connection to the given NDS host | [
"Open",
"an",
"nds2",
".",
"connection",
"handling",
"simple",
"authentication",
"errors"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L309-L336 | train | 211,231 |
gwpy/gwpy | gwpy/io/nds2.py | open_connection | def open_connection(func):
"""Decorate a function to create a `nds2.connection` if required
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
if kwargs.get('connection', None) is None:
try:
host = kwargs.pop('host')
except KeyError:
raise TypeError("one of `connection` or `host` is required "
"to query NDS2 server")
kwargs['connection'] = auth_connect(host, kwargs.pop('port', None))
return func(*args, **kwargs)
return wrapped_func | python | def open_connection(func):
"""Decorate a function to create a `nds2.connection` if required
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
if kwargs.get('connection', None) is None:
try:
host = kwargs.pop('host')
except KeyError:
raise TypeError("one of `connection` or `host` is required "
"to query NDS2 server")
kwargs['connection'] = auth_connect(host, kwargs.pop('port', None))
return func(*args, **kwargs)
return wrapped_func | [
"def",
"open_connection",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"if",
"kwargs",
".",
"get",
"(",
"'connection'",
",",
"None"... | Decorate a function to create a `nds2.connection` if required | [
"Decorate",
"a",
"function",
"to",
"create",
"a",
"nds2",
".",
"connection",
"if",
"required"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L339-L352 | train | 211,232 |
gwpy/gwpy | gwpy/io/nds2.py | parse_nds2_enums | def parse_nds2_enums(func):
"""Decorate a function to translate a type string into an integer
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
for kwd, enum_ in (('type', Nds2ChannelType),
('dtype', Nds2DataType)):
if kwargs.get(kwd, None) is None:
kwargs[kwd] = enum_.any()
elif not isinstance(kwargs[kwd], int):
kwargs[kwd] = enum_.find(kwargs[kwd]).value
return func(*args, **kwargs)
return wrapped_func | python | def parse_nds2_enums(func):
"""Decorate a function to translate a type string into an integer
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
for kwd, enum_ in (('type', Nds2ChannelType),
('dtype', Nds2DataType)):
if kwargs.get(kwd, None) is None:
kwargs[kwd] = enum_.any()
elif not isinstance(kwargs[kwd], int):
kwargs[kwd] = enum_.find(kwargs[kwd]).value
return func(*args, **kwargs)
return wrapped_func | [
"def",
"parse_nds2_enums",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"for",
"kwd",
",",
"enum_",
"in",
"(",
"(",
"'type'",
","... | Decorate a function to translate a type string into an integer | [
"Decorate",
"a",
"function",
"to",
"translate",
"a",
"type",
"string",
"into",
"an",
"integer"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L355-L367 | train | 211,233 |
gwpy/gwpy | gwpy/io/nds2.py | reset_epoch | def reset_epoch(func):
"""Wrap a function to reset the epoch when finished
This is useful for functions that wish to use `connection.set_epoch`.
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
connection = kwargs.get('connection', None)
epoch = connection.current_epoch() if connection else None
try:
return func(*args, **kwargs)
finally:
if epoch is not None:
connection.set_epoch(epoch.gps_start, epoch.gps_stop)
return wrapped_func | python | def reset_epoch(func):
"""Wrap a function to reset the epoch when finished
This is useful for functions that wish to use `connection.set_epoch`.
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
connection = kwargs.get('connection', None)
epoch = connection.current_epoch() if connection else None
try:
return func(*args, **kwargs)
finally:
if epoch is not None:
connection.set_epoch(epoch.gps_start, epoch.gps_stop)
return wrapped_func | [
"def",
"reset_epoch",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"connection",
"=",
"kwargs",
".",
"get",
"(",
"'connection'",
",... | Wrap a function to reset the epoch when finished
This is useful for functions that wish to use `connection.set_epoch`. | [
"Wrap",
"a",
"function",
"to",
"reset",
"the",
"epoch",
"when",
"finished"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L370-L384 | train | 211,234 |
gwpy/gwpy | gwpy/io/nds2.py | find_channels | def find_channels(channels, connection=None, host=None, port=None,
sample_rate=None, type=Nds2ChannelType.any(),
dtype=Nds2DataType.any(), unique=False, epoch='ALL'):
# pylint: disable=unused-argument,redefined-builtin
"""Query an NDS2 server for channel information
Parameters
----------
channels : `list` of `str`
list of channel names to query, each can include bash-style globs
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
sample_rate : `int`, `float`, `tuple`, optional
a single number, representing a specific sample rate to match,
or a tuple representing a ``(low, high)` interval to match
type : `int`, optional
the NDS2 channel type to match
dtype : `int`, optional
the NDS2 data type to match
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
epoch : `str`, `tuple` of `int`, optional
the NDS epoch to restrict to, either the name of a known epoch,
or a 2-tuple of GPS ``[start, stop)`` times
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
Examples
--------
>>> from gwpy.io.nds2 import find_channels
>>> find_channels(['G1:DER_DATA_H'], host='nds.ligo.caltech.edu')
[<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>]
"""
# set epoch
if not isinstance(epoch, tuple):
epoch = (epoch or 'All',)
connection.set_epoch(*epoch)
# format sample_rate as tuple for find_channels call
if isinstance(sample_rate, (int, float)):
sample_rate = (sample_rate, sample_rate)
elif sample_rate is None:
sample_rate = tuple()
# query for channels
out = []
for name in _get_nds2_names(channels):
out.extend(_find_channel(connection, name, type, dtype, sample_rate,
unique=unique))
return out | python | def find_channels(channels, connection=None, host=None, port=None,
sample_rate=None, type=Nds2ChannelType.any(),
dtype=Nds2DataType.any(), unique=False, epoch='ALL'):
# pylint: disable=unused-argument,redefined-builtin
"""Query an NDS2 server for channel information
Parameters
----------
channels : `list` of `str`
list of channel names to query, each can include bash-style globs
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
sample_rate : `int`, `float`, `tuple`, optional
a single number, representing a specific sample rate to match,
or a tuple representing a ``(low, high)` interval to match
type : `int`, optional
the NDS2 channel type to match
dtype : `int`, optional
the NDS2 data type to match
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
epoch : `str`, `tuple` of `int`, optional
the NDS epoch to restrict to, either the name of a known epoch,
or a 2-tuple of GPS ``[start, stop)`` times
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
Examples
--------
>>> from gwpy.io.nds2 import find_channels
>>> find_channels(['G1:DER_DATA_H'], host='nds.ligo.caltech.edu')
[<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>]
"""
# set epoch
if not isinstance(epoch, tuple):
epoch = (epoch or 'All',)
connection.set_epoch(*epoch)
# format sample_rate as tuple for find_channels call
if isinstance(sample_rate, (int, float)):
sample_rate = (sample_rate, sample_rate)
elif sample_rate is None:
sample_rate = tuple()
# query for channels
out = []
for name in _get_nds2_names(channels):
out.extend(_find_channel(connection, name, type, dtype, sample_rate,
unique=unique))
return out | [
"def",
"find_channels",
"(",
"channels",
",",
"connection",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"sample_rate",
"=",
"None",
",",
"type",
"=",
"Nds2ChannelType",
".",
"any",
"(",
")",
",",
"dtype",
"=",
"Nds2DataType",
... | Query an NDS2 server for channel information
Parameters
----------
channels : `list` of `str`
list of channel names to query, each can include bash-style globs
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
sample_rate : `int`, `float`, `tuple`, optional
a single number, representing a specific sample rate to match,
or a tuple representing a ``(low, high)` interval to match
type : `int`, optional
the NDS2 channel type to match
dtype : `int`, optional
the NDS2 data type to match
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
epoch : `str`, `tuple` of `int`, optional
the NDS epoch to restrict to, either the name of a known epoch,
or a 2-tuple of GPS ``[start, stop)`` times
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
Examples
--------
>>> from gwpy.io.nds2 import find_channels
>>> find_channels(['G1:DER_DATA_H'], host='nds.ligo.caltech.edu')
[<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>] | [
"Query",
"an",
"NDS2",
"server",
"for",
"channel",
"information"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L392-L462 | train | 211,235 |
gwpy/gwpy | gwpy/io/nds2.py | _find_channel | def _find_channel(connection, name, ctype, dtype, sample_rate, unique=False):
"""Internal method to find a single channel
Parameters
----------
connection : `nds2.connection`, optional
open NDS2 connection to use for query
name : `str`
the name of the channel to find
ctype : `int`
the NDS2 channel type to match
dtype : `int`
the NDS2 data type to match
sample_rate : `tuple`
a pre-formatted rate tuple (see `find_channels`)
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects, if `unique=True` is given the list
is guaranteed to have only one element.
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
"""
# parse channel type from name,
# e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced'
name, ctype = _strip_ctype(name, ctype, connection.get_protocol())
# query NDS2
found = connection.find_channels(name, ctype, dtype, *sample_rate)
# if don't care about defaults, just return now
if not unique:
return found
# if two results, remove 'online' copy (if present)
# (if no online channels present, this does nothing)
if len(found) == 2:
found = [c for c in found if
c.channel_type != Nds2ChannelType.ONLINE.value]
# if not unique result, panic
if len(found) != 1:
raise ValueError("unique NDS2 channel match not found for %r"
% name)
return found | python | def _find_channel(connection, name, ctype, dtype, sample_rate, unique=False):
"""Internal method to find a single channel
Parameters
----------
connection : `nds2.connection`, optional
open NDS2 connection to use for query
name : `str`
the name of the channel to find
ctype : `int`
the NDS2 channel type to match
dtype : `int`
the NDS2 data type to match
sample_rate : `tuple`
a pre-formatted rate tuple (see `find_channels`)
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects, if `unique=True` is given the list
is guaranteed to have only one element.
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method
"""
# parse channel type from name,
# e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced'
name, ctype = _strip_ctype(name, ctype, connection.get_protocol())
# query NDS2
found = connection.find_channels(name, ctype, dtype, *sample_rate)
# if don't care about defaults, just return now
if not unique:
return found
# if two results, remove 'online' copy (if present)
# (if no online channels present, this does nothing)
if len(found) == 2:
found = [c for c in found if
c.channel_type != Nds2ChannelType.ONLINE.value]
# if not unique result, panic
if len(found) != 1:
raise ValueError("unique NDS2 channel match not found for %r"
% name)
return found | [
"def",
"_find_channel",
"(",
"connection",
",",
"name",
",",
"ctype",
",",
"dtype",
",",
"sample_rate",
",",
"unique",
"=",
"False",
")",
":",
"# parse channel type from name,",
"# e.g. 'L1:GDS-CALIB_STRAIN,reduced' -> 'L1:GDS-CALIB_STRAIN', 'reduced'",
"name",
",",
"ctyp... | Internal method to find a single channel
Parameters
----------
connection : `nds2.connection`, optional
open NDS2 connection to use for query
name : `str`
the name of the channel to find
ctype : `int`
the NDS2 channel type to match
dtype : `int`
the NDS2 data type to match
sample_rate : `tuple`
a pre-formatted rate tuple (see `find_channels`)
unique : `bool`, optional, default: `False`
require one (and only one) match per channel
Returns
-------
channels : `list` of `nds2.channel`
list of NDS2 channel objects, if `unique=True` is given the list
is guaranteed to have only one element.
See also
--------
nds2.connection.find_channels
for documentation on the underlying query method | [
"Internal",
"method",
"to",
"find",
"a",
"single",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L465-L521 | train | 211,236 |
gwpy/gwpy | gwpy/io/nds2.py | _strip_ctype | def _strip_ctype(name, ctype, protocol=2):
"""Strip the ctype from a channel name for the given nds server version
This is needed because NDS1 servers store trend channels _including_
the suffix, but not raw channels, and NDS2 doesn't do this.
"""
# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')
try:
name, ctypestr = name.rsplit(',', 1)
except ValueError:
pass
else:
ctype = Nds2ChannelType.find(ctypestr).value
# NDS1 stores channels with trend suffix, so we put it back:
if protocol == 1 and ctype in (
Nds2ChannelType.STREND.value,
Nds2ChannelType.MTREND.value
):
name += ',{0}'.format(ctypestr)
return name, ctype | python | def _strip_ctype(name, ctype, protocol=2):
"""Strip the ctype from a channel name for the given nds server version
This is needed because NDS1 servers store trend channels _including_
the suffix, but not raw channels, and NDS2 doesn't do this.
"""
# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')
try:
name, ctypestr = name.rsplit(',', 1)
except ValueError:
pass
else:
ctype = Nds2ChannelType.find(ctypestr).value
# NDS1 stores channels with trend suffix, so we put it back:
if protocol == 1 and ctype in (
Nds2ChannelType.STREND.value,
Nds2ChannelType.MTREND.value
):
name += ',{0}'.format(ctypestr)
return name, ctype | [
"def",
"_strip_ctype",
"(",
"name",
",",
"ctype",
",",
"protocol",
"=",
"2",
")",
":",
"# parse channel type from name (e.g. 'L1:GDS-CALIB_STRAIN,reduced')",
"try",
":",
"name",
",",
"ctypestr",
"=",
"name",
".",
"rsplit",
"(",
"','",
",",
"1",
")",
"except",
... | Strip the ctype from a channel name for the given nds server version
This is needed because NDS1 servers store trend channels _including_
the suffix, but not raw channels, and NDS2 doesn't do this. | [
"Strip",
"the",
"ctype",
"from",
"a",
"channel",
"name",
"for",
"the",
"given",
"nds",
"server",
"version"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L524-L543 | train | 211,237 |
gwpy/gwpy | gwpy/io/nds2.py | get_availability | def get_availability(channels, start, end,
connection=None, host=None, port=None):
# pylint: disable=unused-argument
"""Query an NDS2 server for data availability
Parameters
----------
channels : `list` of `str`
list of channel names to query; this list is mapped to NDS channel
names using :func:`find_channels`.
start : `int`
GPS start time of query
end : `int`
GPS end time of query
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
Returns
-------
segdict : `~gwpy.segments.SegmentListDict`
dict of ``(name, SegmentList)`` pairs
Raises
------
ValueError
if the given channel name cannot be mapped uniquely to a name
in the NDS server database.
See also
--------
nds2.connection.get_availability
for documentation on the underlying query method
"""
from ..segments import (Segment, SegmentList, SegmentListDict)
connection.set_epoch(start, end)
# map user-given real names to NDS names
names = list(map(
_get_nds2_name, find_channels(channels, epoch=(start, end),
connection=connection, unique=True),
))
# query for availability
result = connection.get_availability(names)
# map to segment types
out = SegmentListDict()
for name, result in zip(channels, result):
out[name] = SegmentList([Segment(s.gps_start, s.gps_stop) for s in
result.simple_list()])
return out | python | def get_availability(channels, start, end,
connection=None, host=None, port=None):
# pylint: disable=unused-argument
"""Query an NDS2 server for data availability
Parameters
----------
channels : `list` of `str`
list of channel names to query; this list is mapped to NDS channel
names using :func:`find_channels`.
start : `int`
GPS start time of query
end : `int`
GPS end time of query
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
Returns
-------
segdict : `~gwpy.segments.SegmentListDict`
dict of ``(name, SegmentList)`` pairs
Raises
------
ValueError
if the given channel name cannot be mapped uniquely to a name
in the NDS server database.
See also
--------
nds2.connection.get_availability
for documentation on the underlying query method
"""
from ..segments import (Segment, SegmentList, SegmentListDict)
connection.set_epoch(start, end)
# map user-given real names to NDS names
names = list(map(
_get_nds2_name, find_channels(channels, epoch=(start, end),
connection=connection, unique=True),
))
# query for availability
result = connection.get_availability(names)
# map to segment types
out = SegmentListDict()
for name, result in zip(channels, result):
out[name] = SegmentList([Segment(s.gps_start, s.gps_stop) for s in
result.simple_list()])
return out | [
"def",
"get_availability",
"(",
"channels",
",",
"start",
",",
"end",
",",
"connection",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"from",
".",
".",
"segments",
"import",
"(",
"Segment",
... | Query an NDS2 server for data availability
Parameters
----------
channels : `list` of `str`
list of channel names to query; this list is mapped to NDS channel
names using :func:`find_channels`.
start : `int`
GPS start time of query
end : `int`
GPS end time of query
connection : `nds2.connection`, optional
open NDS2 connection to use for query
host : `str`, optional
name of NDS2 server to query, required if ``connection`` is not
given
port : `int`, optional
port number on host to use for NDS2 connection
Returns
-------
segdict : `~gwpy.segments.SegmentListDict`
dict of ``(name, SegmentList)`` pairs
Raises
------
ValueError
if the given channel name cannot be mapped uniquely to a name
in the NDS server database.
See also
--------
nds2.connection.get_availability
for documentation on the underlying query method | [
"Query",
"an",
"NDS2",
"server",
"for",
"data",
"availability"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L548-L605 | train | 211,238 |
gwpy/gwpy | gwpy/io/nds2.py | minute_trend_times | def minute_trend_times(start, end):
"""Expand a [start, end) interval for use in querying for minute trends
NDS2 requires start and end times for minute trends to be a multiple of
60 (to exactly match the time of a minute-trend sample), so this function
expands the given ``[start, end)`` interval to the nearest multiples.
Parameters
----------
start : `int`
GPS start time of query
end : `int`
GPS end time of query
Returns
-------
mstart : `int`
``start`` rounded down to nearest multiple of 60
mend : `int`
``end`` rounded up to nearest multiple of 60
"""
if start % 60:
start = int(start) // 60 * 60
if end % 60:
end = int(end) // 60 * 60 + 60
return int(start), int(end) | python | def minute_trend_times(start, end):
"""Expand a [start, end) interval for use in querying for minute trends
NDS2 requires start and end times for minute trends to be a multiple of
60 (to exactly match the time of a minute-trend sample), so this function
expands the given ``[start, end)`` interval to the nearest multiples.
Parameters
----------
start : `int`
GPS start time of query
end : `int`
GPS end time of query
Returns
-------
mstart : `int`
``start`` rounded down to nearest multiple of 60
mend : `int`
``end`` rounded up to nearest multiple of 60
"""
if start % 60:
start = int(start) // 60 * 60
if end % 60:
end = int(end) // 60 * 60 + 60
return int(start), int(end) | [
"def",
"minute_trend_times",
"(",
"start",
",",
"end",
")",
":",
"if",
"start",
"%",
"60",
":",
"start",
"=",
"int",
"(",
"start",
")",
"//",
"60",
"*",
"60",
"if",
"end",
"%",
"60",
":",
"end",
"=",
"int",
"(",
"end",
")",
"//",
"60",
"*",
"... | Expand a [start, end) interval for use in querying for minute trends
NDS2 requires start and end times for minute trends to be a multiple of
60 (to exactly match the time of a minute-trend sample), so this function
expands the given ``[start, end)`` interval to the nearest multiples.
Parameters
----------
start : `int`
GPS start time of query
end : `int`
GPS end time of query
Returns
-------
mstart : `int`
``start`` rounded down to nearest multiple of 60
mend : `int`
``end`` rounded up to nearest multiple of 60 | [
"Expand",
"a",
"[",
"start",
"end",
")",
"interval",
"for",
"use",
"in",
"querying",
"for",
"minute",
"trends"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L608-L634 | train | 211,239 |
gwpy/gwpy | gwpy/io/nds2.py | Nds2ChannelType.find | def find(cls, name):
"""Returns the NDS2 channel type corresponding to the given name
"""
try:
return cls._member_map_[name]
except KeyError:
for ctype in cls._member_map_.values():
if ctype.name == name:
return ctype
raise ValueError('%s is not a valid %s' % (name, cls.__name__)) | python | def find(cls, name):
"""Returns the NDS2 channel type corresponding to the given name
"""
try:
return cls._member_map_[name]
except KeyError:
for ctype in cls._member_map_.values():
if ctype.name == name:
return ctype
raise ValueError('%s is not a valid %s' % (name, cls.__name__)) | [
"def",
"find",
"(",
"cls",
",",
"name",
")",
":",
"try",
":",
"return",
"cls",
".",
"_member_map_",
"[",
"name",
"]",
"except",
"KeyError",
":",
"for",
"ctype",
"in",
"cls",
".",
"_member_map_",
".",
"values",
"(",
")",
":",
"if",
"ctype",
".",
"na... | Returns the NDS2 channel type corresponding to the given name | [
"Returns",
"the",
"NDS2",
"channel",
"type",
"corresponding",
"to",
"the",
"given",
"name"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L98-L107 | train | 211,240 |
gwpy/gwpy | gwpy/io/nds2.py | Nds2DataType.find | def find(cls, dtype):
"""Returns the NDS2 type corresponding to the given python type
"""
try:
return cls._member_map_[dtype]
except KeyError:
try:
dtype = numpy.dtype(dtype).type
except TypeError:
for ndstype in cls._member_map_.values():
if ndstype.value is dtype:
return ndstype
else:
for ndstype in cls._member_map_.values():
if ndstype.value and ndstype.numpy_dtype is dtype:
return ndstype
raise ValueError('%s is not a valid %s' % (dtype, cls.__name__)) | python | def find(cls, dtype):
"""Returns the NDS2 type corresponding to the given python type
"""
try:
return cls._member_map_[dtype]
except KeyError:
try:
dtype = numpy.dtype(dtype).type
except TypeError:
for ndstype in cls._member_map_.values():
if ndstype.value is dtype:
return ndstype
else:
for ndstype in cls._member_map_.values():
if ndstype.value and ndstype.numpy_dtype is dtype:
return ndstype
raise ValueError('%s is not a valid %s' % (dtype, cls.__name__)) | [
"def",
"find",
"(",
"cls",
",",
"dtype",
")",
":",
"try",
":",
"return",
"cls",
".",
"_member_map_",
"[",
"dtype",
"]",
"except",
"KeyError",
":",
"try",
":",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"dtype",
")",
".",
"type",
"except",
"TypeError",... | Returns the NDS2 type corresponding to the given python type | [
"Returns",
"the",
"NDS2",
"type",
"corresponding",
"to",
"the",
"given",
"python",
"type"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L139-L155 | train | 211,241 |
gwpy/gwpy | gwpy/io/datafind.py | reconnect | def reconnect(connection):
"""Open a new datafind connection based on an existing connection
This is required because of https://git.ligo.org/lscsoft/glue/issues/1
Parameters
----------
connection : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
a connection object (doesn't need to be open)
Returns
-------
newconn : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
the new open connection to the same `host:port` server
"""
if isinstance(connection, FflConnection):
return type(connection)(connection.ffldir)
kw = {'context': connection._context} if connection.port != 80 else {}
return connection.__class__(connection.host, port=connection.port, **kw) | python | def reconnect(connection):
"""Open a new datafind connection based on an existing connection
This is required because of https://git.ligo.org/lscsoft/glue/issues/1
Parameters
----------
connection : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
a connection object (doesn't need to be open)
Returns
-------
newconn : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
the new open connection to the same `host:port` server
"""
if isinstance(connection, FflConnection):
return type(connection)(connection.ffldir)
kw = {'context': connection._context} if connection.port != 80 else {}
return connection.__class__(connection.host, port=connection.port, **kw) | [
"def",
"reconnect",
"(",
"connection",
")",
":",
"if",
"isinstance",
"(",
"connection",
",",
"FflConnection",
")",
":",
"return",
"type",
"(",
"connection",
")",
"(",
"connection",
".",
"ffldir",
")",
"kw",
"=",
"{",
"'context'",
":",
"connection",
".",
... | Open a new datafind connection based on an existing connection
This is required because of https://git.ligo.org/lscsoft/glue/issues/1
Parameters
----------
connection : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
a connection object (doesn't need to be open)
Returns
-------
newconn : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection`
the new open connection to the same `host:port` server | [
"Open",
"a",
"new",
"datafind",
"connection",
"based",
"on",
"an",
"existing",
"connection"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L240-L258 | train | 211,242 |
gwpy/gwpy | gwpy/io/datafind.py | _type_priority | def _type_priority(ifo, ftype, trend=None):
"""Prioritise the given GWF type based on its name or trend status.
This is essentially an ad-hoc ordering function based on internal knowledge
of how LIGO does GWF type naming.
"""
# if looking for a trend channel, prioritise the matching type
for trendname, trend_regex in [
('m-trend', MINUTE_TREND_TYPE),
('s-trend', SECOND_TREND_TYPE),
]:
if trend == trendname and trend_regex.match(ftype):
return 0, len(ftype)
# otherwise rank this type according to priority
for reg, prio in {
HIGH_PRIORITY_TYPE: 1,
re.compile(r'[A-Z]\d_C'): 6,
LOW_PRIORITY_TYPE: 10,
MINUTE_TREND_TYPE: 10,
SECOND_TREND_TYPE: 10,
}.items():
if reg.search(ftype):
return prio, len(ftype)
return 5, len(ftype) | python | def _type_priority(ifo, ftype, trend=None):
"""Prioritise the given GWF type based on its name or trend status.
This is essentially an ad-hoc ordering function based on internal knowledge
of how LIGO does GWF type naming.
"""
# if looking for a trend channel, prioritise the matching type
for trendname, trend_regex in [
('m-trend', MINUTE_TREND_TYPE),
('s-trend', SECOND_TREND_TYPE),
]:
if trend == trendname and trend_regex.match(ftype):
return 0, len(ftype)
# otherwise rank this type according to priority
for reg, prio in {
HIGH_PRIORITY_TYPE: 1,
re.compile(r'[A-Z]\d_C'): 6,
LOW_PRIORITY_TYPE: 10,
MINUTE_TREND_TYPE: 10,
SECOND_TREND_TYPE: 10,
}.items():
if reg.search(ftype):
return prio, len(ftype)
return 5, len(ftype) | [
"def",
"_type_priority",
"(",
"ifo",
",",
"ftype",
",",
"trend",
"=",
"None",
")",
":",
"# if looking for a trend channel, prioritise the matching type",
"for",
"trendname",
",",
"trend_regex",
"in",
"[",
"(",
"'m-trend'",
",",
"MINUTE_TREND_TYPE",
")",
",",
"(",
... | Prioritise the given GWF type based on its name or trend status.
This is essentially an ad-hoc ordering function based on internal knowledge
of how LIGO does GWF type naming. | [
"Prioritise",
"the",
"given",
"GWF",
"type",
"based",
"on",
"its",
"name",
"or",
"trend",
"status",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L261-L286 | train | 211,243 |
gwpy/gwpy | gwpy/io/datafind.py | on_tape | def on_tape(*files):
"""Determine whether any of the given files are on tape
Parameters
----------
*files : `str`
one or more paths to GWF files
Returns
-------
True/False : `bool`
`True` if any of the files are determined to be on tape,
otherwise `False`
"""
for path in files:
try:
if os.stat(path).st_blocks == 0:
return True
except AttributeError: # windows doesn't have st_blocks
return False
return False | python | def on_tape(*files):
"""Determine whether any of the given files are on tape
Parameters
----------
*files : `str`
one or more paths to GWF files
Returns
-------
True/False : `bool`
`True` if any of the files are determined to be on tape,
otherwise `False`
"""
for path in files:
try:
if os.stat(path).st_blocks == 0:
return True
except AttributeError: # windows doesn't have st_blocks
return False
return False | [
"def",
"on_tape",
"(",
"*",
"files",
")",
":",
"for",
"path",
"in",
"files",
":",
"try",
":",
"if",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_blocks",
"==",
"0",
":",
"return",
"True",
"except",
"AttributeError",
":",
"# windows doesn't have st_block... | Determine whether any of the given files are on tape
Parameters
----------
*files : `str`
one or more paths to GWF files
Returns
-------
True/False : `bool`
`True` if any of the files are determined to be on tape,
otherwise `False` | [
"Determine",
"whether",
"any",
"of",
"the",
"given",
"files",
"are",
"on",
"tape"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L289-L309 | train | 211,244 |
gwpy/gwpy | gwpy/io/datafind.py | with_connection | def with_connection(func):
"""Decorate a function to open a new datafind connection if required
This method will inspect the ``connection`` keyword, and if `None`
(or missing), will use the ``host`` and ``port`` keywords to open
a new connection and pass it as ``connection=<new>`` to ``func``.
"""
@wraps(func)
def wrapped(*args, **kwargs):
if kwargs.get('connection') is None:
kwargs['connection'] = _choose_connection(host=kwargs.get('host'),
port=kwargs.get('port'))
try:
return func(*args, **kwargs)
except HTTPException:
kwargs['connection'] = reconnect(kwargs['connection'])
return func(*args, **kwargs)
return wrapped | python | def with_connection(func):
"""Decorate a function to open a new datafind connection if required
This method will inspect the ``connection`` keyword, and if `None`
(or missing), will use the ``host`` and ``port`` keywords to open
a new connection and pass it as ``connection=<new>`` to ``func``.
"""
@wraps(func)
def wrapped(*args, **kwargs):
if kwargs.get('connection') is None:
kwargs['connection'] = _choose_connection(host=kwargs.get('host'),
port=kwargs.get('port'))
try:
return func(*args, **kwargs)
except HTTPException:
kwargs['connection'] = reconnect(kwargs['connection'])
return func(*args, **kwargs)
return wrapped | [
"def",
"with_connection",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'connection'",
")",
"is",
"None",
":",
"kwargs",
"[",
"'connec... | Decorate a function to open a new datafind connection if required
This method will inspect the ``connection`` keyword, and if `None`
(or missing), will use the ``host`` and ``port`` keywords to open
a new connection and pass it as ``connection=<new>`` to ``func``. | [
"Decorate",
"a",
"function",
"to",
"open",
"a",
"new",
"datafind",
"connection",
"if",
"required"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L320-L337 | train | 211,245 |
gwpy/gwpy | gwpy/io/datafind.py | find_best_frametype | def find_best_frametype(channel, start, end,
frametype_match=None, allow_tape=True,
connection=None, host=None, port=None):
"""Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00'
"""
try:
return find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
allow_tape=allow_tape, on_gaps='error',
connection=connection, host=host, port=port)
except RuntimeError: # gaps (or something else went wrong)
ftout = find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
return_all=True, allow_tape=allow_tape,
on_gaps='ignore', connection=connection,
host=host, port=port)
try:
if isinstance(ftout, dict):
return {key: ftout[key][0] for key in ftout}
return ftout[0]
except IndexError:
raise ValueError("Cannot find any valid frametypes for channel(s)") | python | def find_best_frametype(channel, start, end,
frametype_match=None, allow_tape=True,
connection=None, host=None, port=None):
"""Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00'
"""
try:
return find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
allow_tape=allow_tape, on_gaps='error',
connection=connection, host=host, port=port)
except RuntimeError: # gaps (or something else went wrong)
ftout = find_frametype(channel, gpstime=(start, end),
frametype_match=frametype_match,
return_all=True, allow_tape=allow_tape,
on_gaps='ignore', connection=connection,
host=host, port=port)
try:
if isinstance(ftout, dict):
return {key: ftout[key][0] for key in ftout}
return ftout[0]
except IndexError:
raise ValueError("Cannot find any valid frametypes for channel(s)") | [
"def",
"find_best_frametype",
"(",
"channel",
",",
"start",
",",
"end",
",",
"frametype_match",
"=",
"None",
",",
"allow_tape",
"=",
"True",
",",
"connection",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"try",
":",
"retu... | Intelligently select the best frametype from which to read this channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of period of interest,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
frametype_match : `str`, optiona
regular expression to use for frametype `str` matching
allow_tape : `bool`, optional
do not test types whose frame files are stored on tape (not on
spinning disk)
Returns
-------
frametype : `str`
the best matching frametype for the ``channel`` in the
``[start, end)`` interval
Raises
------
ValueError
if no valid frametypes are found
Examples
--------
>>> from gwpy.io.datafind import find_best_frametype
>>> find_best_frametype('L1:GDS-CALIB_STRAIN', 1126259460, 1126259464)
'L1_HOFT_C00' | [
"Intelligently",
"select",
"the",
"best",
"frametype",
"from",
"which",
"to",
"read",
"this",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L547-L611 | train | 211,246 |
gwpy/gwpy | gwpy/io/datafind.py | find_types | def find_types(observatory, match=None, trend=None,
connection=None, **connection_kw):
"""Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
"""
return sorted(connection.find_types(observatory, match=match),
key=lambda x: _type_priority(observatory, x, trend=trend)) | python | def find_types(observatory, match=None, trend=None,
connection=None, **connection_kw):
"""Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
"""
return sorted(connection.find_types(observatory, match=match),
key=lambda x: _type_priority(observatory, x, trend=trend)) | [
"def",
"find_types",
"(",
"observatory",
",",
"match",
"=",
"None",
",",
"trend",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"connection_kw",
")",
":",
"return",
"sorted",
"(",
"connection",
".",
"find_types",
"(",
"observatory",
",",
"m... | Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s) | [
"Find",
"the",
"available",
"data",
"types",
"for",
"a",
"given",
"observatory",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L615-L626 | train | 211,247 |
gwpy/gwpy | gwpy/io/datafind.py | find_urls | def find_urls(observatory, frametype, start, end, on_gaps='error',
connection=None, **connection_kw):
"""Find the URLs of files of a given data type in a GPS interval.
See also
--------
gwdatafind.http.HTTPConnection.find_urls
FflConnection.find_urls
for details on the underlying method(s)
"""
return connection.find_urls(observatory, frametype, start, end,
on_gaps=on_gaps) | python | def find_urls(observatory, frametype, start, end, on_gaps='error',
connection=None, **connection_kw):
"""Find the URLs of files of a given data type in a GPS interval.
See also
--------
gwdatafind.http.HTTPConnection.find_urls
FflConnection.find_urls
for details on the underlying method(s)
"""
return connection.find_urls(observatory, frametype, start, end,
on_gaps=on_gaps) | [
"def",
"find_urls",
"(",
"observatory",
",",
"frametype",
",",
"start",
",",
"end",
",",
"on_gaps",
"=",
"'error'",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"connection_kw",
")",
":",
"return",
"connection",
".",
"find_urls",
"(",
"observatory",
",",... | Find the URLs of files of a given data type in a GPS interval.
See also
--------
gwdatafind.http.HTTPConnection.find_urls
FflConnection.find_urls
for details on the underlying method(s) | [
"Find",
"the",
"URLs",
"of",
"files",
"of",
"a",
"given",
"data",
"type",
"in",
"a",
"GPS",
"interval",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L630-L641 | train | 211,248 |
gwpy/gwpy | gwpy/io/datafind.py | FflConnection.ffl_path | def ffl_path(self, site, frametype):
"""Returns the path of the FFL file for the given site and frametype
Examples
--------
>>> from gwpy.io.datafind import FflConnection
>>> conn = FflConnection()
>>> print(conn.ffl_path('V', 'V1Online'))
/virgoData/ffl/V1Online.ffl
"""
try:
return self.paths[(site, frametype)]
except KeyError:
self._find_paths()
return self.paths[(site, frametype)] | python | def ffl_path(self, site, frametype):
"""Returns the path of the FFL file for the given site and frametype
Examples
--------
>>> from gwpy.io.datafind import FflConnection
>>> conn = FflConnection()
>>> print(conn.ffl_path('V', 'V1Online'))
/virgoData/ffl/V1Online.ffl
"""
try:
return self.paths[(site, frametype)]
except KeyError:
self._find_paths()
return self.paths[(site, frametype)] | [
"def",
"ffl_path",
"(",
"self",
",",
"site",
",",
"frametype",
")",
":",
"try",
":",
"return",
"self",
".",
"paths",
"[",
"(",
"site",
",",
"frametype",
")",
"]",
"except",
"KeyError",
":",
"self",
".",
"_find_paths",
"(",
")",
"return",
"self",
".",... | Returns the path of the FFL file for the given site and frametype
Examples
--------
>>> from gwpy.io.datafind import FflConnection
>>> conn = FflConnection()
>>> print(conn.ffl_path('V', 'V1Online'))
/virgoData/ffl/V1Online.ffl | [
"Returns",
"the",
"path",
"of",
"the",
"FFL",
"file",
"for",
"the",
"given",
"site",
"and",
"frametype"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L163-L177 | train | 211,249 |
gwpy/gwpy | gwpy/io/datafind.py | FflConnection.find_types | def find_types(self, site=None, match=r'^(?!lastfile|spectro|\.).*'):
"""Return the list of known data types.
This is just the basename of each FFL file found in the
FFL directory (minus the ``.ffl`` extension)
"""
self._find_paths()
types = [tag for (site_, tag) in self.paths if site in (None, site_)]
if match is not None:
match = re.compile(match)
return list(filter(match.search, types))
return types | python | def find_types(self, site=None, match=r'^(?!lastfile|spectro|\.).*'):
"""Return the list of known data types.
This is just the basename of each FFL file found in the
FFL directory (minus the ``.ffl`` extension)
"""
self._find_paths()
types = [tag for (site_, tag) in self.paths if site in (None, site_)]
if match is not None:
match = re.compile(match)
return list(filter(match.search, types))
return types | [
"def",
"find_types",
"(",
"self",
",",
"site",
"=",
"None",
",",
"match",
"=",
"r'^(?!lastfile|spectro|\\.).*'",
")",
":",
"self",
".",
"_find_paths",
"(",
")",
"types",
"=",
"[",
"tag",
"for",
"(",
"site_",
",",
"tag",
")",
"in",
"self",
".",
"paths",... | Return the list of known data types.
This is just the basename of each FFL file found in the
FFL directory (minus the ``.ffl`` extension) | [
"Return",
"the",
"list",
"of",
"known",
"data",
"types",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L179-L190 | train | 211,250 |
gwpy/gwpy | gwpy/io/datafind.py | FflConnection.find_urls | def find_urls(self, site, frametype, gpsstart, gpsend,
match=None, on_gaps='warn'):
"""Find all files of the given type in the [start, end) GPS interval.
"""
span = Segment(gpsstart, gpsend)
cache = [e for e in self._read_ffl_cache(site, frametype) if
e.observatory == site and e.description == frametype and
e.segment.intersects(span)]
urls = [e.path for e in cache]
missing = SegmentList([span]) - cache_segments(cache)
if match:
match = re.compile(match)
urls = list(filter(match.search, urls))
# no missing data or don't care, return
if on_gaps == 'ignore' or not missing:
return urls
# handle missing data
msg = 'Missing segments: \n{0}'.format('\n'.join(map(str, missing)))
if on_gaps == 'warn':
warnings.warn(msg)
return urls
raise RuntimeError(msg) | python | def find_urls(self, site, frametype, gpsstart, gpsend,
match=None, on_gaps='warn'):
"""Find all files of the given type in the [start, end) GPS interval.
"""
span = Segment(gpsstart, gpsend)
cache = [e for e in self._read_ffl_cache(site, frametype) if
e.observatory == site and e.description == frametype and
e.segment.intersects(span)]
urls = [e.path for e in cache]
missing = SegmentList([span]) - cache_segments(cache)
if match:
match = re.compile(match)
urls = list(filter(match.search, urls))
# no missing data or don't care, return
if on_gaps == 'ignore' or not missing:
return urls
# handle missing data
msg = 'Missing segments: \n{0}'.format('\n'.join(map(str, missing)))
if on_gaps == 'warn':
warnings.warn(msg)
return urls
raise RuntimeError(msg) | [
"def",
"find_urls",
"(",
"self",
",",
"site",
",",
"frametype",
",",
"gpsstart",
",",
"gpsend",
",",
"match",
"=",
"None",
",",
"on_gaps",
"=",
"'warn'",
")",
":",
"span",
"=",
"Segment",
"(",
"gpsstart",
",",
"gpsend",
")",
"cache",
"=",
"[",
"e",
... | Find all files of the given type in the [start, end) GPS interval. | [
"Find",
"all",
"files",
"of",
"the",
"given",
"type",
"in",
"the",
"[",
"start",
"end",
")",
"GPS",
"interval",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L192-L216 | train | 211,251 |
gwpy/gwpy | gwpy/types/io/ligolw.py | read_series | def read_series(source, name, match=None):
"""Read a `Series` from LIGO_LW-XML
Parameters
----------
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`
file path or open LIGO_LW-format XML file
name : `str`
name of the relevant `LIGO_LW` element to read
match : `dict`, optional
dict of (key, value) `Param` pairs to match correct LIGO_LW element,
this is useful if a single file contains multiple `LIGO_LW` elements
with the same name
"""
from ligo.lw.ligolw import (LIGO_LW, Time, Array, Dim)
from ligo.lw.param import get_param
# read document
xmldoc = read_ligolw(source, contenthandler=series_contenthandler())
# parse match dict
if match is None:
match = dict()
def _is_match(elem):
try:
if elem.Name != name:
return False
except AttributeError: # Name is not set
return False
for key, value in match.items():
try:
if get_param(elem, key).pcdata != value:
return False
except ValueError: # no Param with this Name
return False
return True
# parse out correct element
matches = filter(_is_match, xmldoc.getElementsByTagName(LIGO_LW.tagName))
try:
elem, = matches
except ValueError as exc:
if not matches:
exc.args = ("no LIGO_LW elements found matching request",)
else:
exc.args = ('multiple LIGO_LW elements found matching request, '
'please consider using `match=` to select the '
'correct element',)
raise
# get data
array, = elem.getElementsByTagName(Array.tagName)
# parse dimensions
dims = array.getElementsByTagName(Dim.tagName)
xdim = dims[0]
x0 = xdim.Start
dx = xdim.Scale
xunit = xdim.Unit
try:
ndim = dims[1].n
except IndexError:
pass
else:
if ndim > 2:
raise ValueError("Cannot parse LIGO_LW Array with {} "
"dimensions".format(ndim))
# parse metadata
array_kw = {
'name': array.Name,
'unit': array.Unit,
'xunit': xunit,
}
try:
array_kw['epoch'] = to_gps(
elem.getElementsByTagName(Time.tagName)[0].pcdata)
except IndexError:
pass
for key in ('channel',):
try:
array_kw[key] = get_param(elem, key)
except ValueError:
pass
# build Series
try:
xindex, value = array.array
except ValueError: # not two dimensions stored
return Series(array.array[0], x0=x0, dx=dx, **array_kw)
return Series(value, xindex=xindex, **array_kw) | python | def read_series(source, name, match=None):
"""Read a `Series` from LIGO_LW-XML
Parameters
----------
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`
file path or open LIGO_LW-format XML file
name : `str`
name of the relevant `LIGO_LW` element to read
match : `dict`, optional
dict of (key, value) `Param` pairs to match correct LIGO_LW element,
this is useful if a single file contains multiple `LIGO_LW` elements
with the same name
"""
from ligo.lw.ligolw import (LIGO_LW, Time, Array, Dim)
from ligo.lw.param import get_param
# read document
xmldoc = read_ligolw(source, contenthandler=series_contenthandler())
# parse match dict
if match is None:
match = dict()
def _is_match(elem):
try:
if elem.Name != name:
return False
except AttributeError: # Name is not set
return False
for key, value in match.items():
try:
if get_param(elem, key).pcdata != value:
return False
except ValueError: # no Param with this Name
return False
return True
# parse out correct element
matches = filter(_is_match, xmldoc.getElementsByTagName(LIGO_LW.tagName))
try:
elem, = matches
except ValueError as exc:
if not matches:
exc.args = ("no LIGO_LW elements found matching request",)
else:
exc.args = ('multiple LIGO_LW elements found matching request, '
'please consider using `match=` to select the '
'correct element',)
raise
# get data
array, = elem.getElementsByTagName(Array.tagName)
# parse dimensions
dims = array.getElementsByTagName(Dim.tagName)
xdim = dims[0]
x0 = xdim.Start
dx = xdim.Scale
xunit = xdim.Unit
try:
ndim = dims[1].n
except IndexError:
pass
else:
if ndim > 2:
raise ValueError("Cannot parse LIGO_LW Array with {} "
"dimensions".format(ndim))
# parse metadata
array_kw = {
'name': array.Name,
'unit': array.Unit,
'xunit': xunit,
}
try:
array_kw['epoch'] = to_gps(
elem.getElementsByTagName(Time.tagName)[0].pcdata)
except IndexError:
pass
for key in ('channel',):
try:
array_kw[key] = get_param(elem, key)
except ValueError:
pass
# build Series
try:
xindex, value = array.array
except ValueError: # not two dimensions stored
return Series(array.array[0], x0=x0, dx=dx, **array_kw)
return Series(value, xindex=xindex, **array_kw) | [
"def",
"read_series",
"(",
"source",
",",
"name",
",",
"match",
"=",
"None",
")",
":",
"from",
"ligo",
".",
"lw",
".",
"ligolw",
"import",
"(",
"LIGO_LW",
",",
"Time",
",",
"Array",
",",
"Dim",
")",
"from",
"ligo",
".",
"lw",
".",
"param",
"import"... | Read a `Series` from LIGO_LW-XML
Parameters
----------
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`
file path or open LIGO_LW-format XML file
name : `str`
name of the relevant `LIGO_LW` element to read
match : `dict`, optional
dict of (key, value) `Param` pairs to match correct LIGO_LW element,
this is useful if a single file contains multiple `LIGO_LW` elements
with the same name | [
"Read",
"a",
"Series",
"from",
"LIGO_LW",
"-",
"XML"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/ligolw.py#L48-L141 | train | 211,252 |
gwpy/gwpy | gwpy/cli/coherence.py | Coherence.make_plot | def make_plot(self):
"""Generate the coherence plot from all time series
"""
args = self.args
fftlength = float(args.secpfft)
overlap = args.overlap
self.log(2, "Calculating spectrum secpfft: %s, overlap: %s" %
(fftlength, overlap))
if overlap is not None:
overlap *= fftlength
self.log(3, 'Reference channel: ' + self.ref_chan)
# group data by segment
groups = OrderedDict()
for series in self.timeseries:
seg = series.span
try:
groups[seg][series.channel.name] = series
except KeyError:
groups[seg] = OrderedDict()
groups[seg][series.channel.name] = series
# -- plot
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca()
self.spectra = []
# calculate coherence
for seg in groups:
refts = groups[seg].pop(self.ref_chan)
for name in groups[seg]:
series = groups[seg][name]
coh = series.coherence(refts, fftlength=fftlength,
overlap=overlap, window=args.window)
label = name
if len(self.start_list) > 1:
label += ', {0}'.format(series.epoch.gps)
if self.usetex:
label = label_to_latex(label)
ax.plot(coh, label=label)
self.spectra.append(coh)
if args.xscale == 'log' and not args.xmin:
args.xmin = 1/fftlength
return plot | python | def make_plot(self):
"""Generate the coherence plot from all time series
"""
args = self.args
fftlength = float(args.secpfft)
overlap = args.overlap
self.log(2, "Calculating spectrum secpfft: %s, overlap: %s" %
(fftlength, overlap))
if overlap is not None:
overlap *= fftlength
self.log(3, 'Reference channel: ' + self.ref_chan)
# group data by segment
groups = OrderedDict()
for series in self.timeseries:
seg = series.span
try:
groups[seg][series.channel.name] = series
except KeyError:
groups[seg] = OrderedDict()
groups[seg][series.channel.name] = series
# -- plot
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca()
self.spectra = []
# calculate coherence
for seg in groups:
refts = groups[seg].pop(self.ref_chan)
for name in groups[seg]:
series = groups[seg][name]
coh = series.coherence(refts, fftlength=fftlength,
overlap=overlap, window=args.window)
label = name
if len(self.start_list) > 1:
label += ', {0}'.format(series.epoch.gps)
if self.usetex:
label = label_to_latex(label)
ax.plot(coh, label=label)
self.spectra.append(coh)
if args.xscale == 'log' and not args.xmin:
args.xmin = 1/fftlength
return plot | [
"def",
"make_plot",
"(",
"self",
")",
":",
"args",
"=",
"self",
".",
"args",
"fftlength",
"=",
"float",
"(",
"args",
".",
"secpfft",
")",
"overlap",
"=",
"args",
".",
"overlap",
"self",
".",
"log",
"(",
"2",
",",
"\"Calculating spectrum secpfft: %s, overla... | Generate the coherence plot from all time series | [
"Generate",
"the",
"coherence",
"plot",
"from",
"all",
"time",
"series"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/coherence.py#L73-L123 | train | 211,253 |
gwpy/gwpy | gwpy/cli/coherence.py | Coherence.set_legend | def set_legend(self):
"""Create a legend for this product
"""
leg = super(Coherence, self).set_legend()
if leg is not None:
leg.set_title('Coherence with:')
return leg | python | def set_legend(self):
"""Create a legend for this product
"""
leg = super(Coherence, self).set_legend()
if leg is not None:
leg.set_title('Coherence with:')
return leg | [
"def",
"set_legend",
"(",
"self",
")",
":",
"leg",
"=",
"super",
"(",
"Coherence",
",",
"self",
")",
".",
"set_legend",
"(",
")",
"if",
"leg",
"is",
"not",
"None",
":",
"leg",
".",
"set_title",
"(",
"'Coherence with:'",
")",
"return",
"leg"
] | Create a legend for this product | [
"Create",
"a",
"legend",
"for",
"this",
"product"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/coherence.py#L125-L131 | train | 211,254 |
gwpy/gwpy | gwpy/detector/units.py | parse_unit | def parse_unit(name, parse_strict='warn', format='gwpy'):
"""Attempt to intelligently parse a `str` as a `~astropy.units.Unit`
Parameters
----------
name : `str`
unit name to parse
parse_strict : `str`
one of 'silent', 'warn', or 'raise' depending on how pedantic
you want the parser to be
format : `~astropy.units.format.Base`
the formatter class to use when parsing the unit string
Returns
-------
unit : `~astropy.units.UnitBase`
the unit parsed by `~astropy.units.Unit`
Raises
------
ValueError
if the unit cannot be parsed and `parse_strict='raise'`
"""
if name is None or isinstance(name, units.UnitBase):
return name
try: # have we already identified this unit as unrecognised?
return UNRECOGNIZED_UNITS[name]
except KeyError: # no, this is new
# pylint: disable=unexpected-keyword-arg
try:
return units.Unit(name, parse_strict='raise')
except ValueError as exc:
if (parse_strict == 'raise' or
'did not parse as unit' not in str(exc)):
raise
# try again using out own lenient parser
GWpyFormat.warn = parse_strict != 'silent'
return units.Unit(name, parse_strict='silent', format=format)
finally:
GWpyFormat.warn = True | python | def parse_unit(name, parse_strict='warn', format='gwpy'):
"""Attempt to intelligently parse a `str` as a `~astropy.units.Unit`
Parameters
----------
name : `str`
unit name to parse
parse_strict : `str`
one of 'silent', 'warn', or 'raise' depending on how pedantic
you want the parser to be
format : `~astropy.units.format.Base`
the formatter class to use when parsing the unit string
Returns
-------
unit : `~astropy.units.UnitBase`
the unit parsed by `~astropy.units.Unit`
Raises
------
ValueError
if the unit cannot be parsed and `parse_strict='raise'`
"""
if name is None or isinstance(name, units.UnitBase):
return name
try: # have we already identified this unit as unrecognised?
return UNRECOGNIZED_UNITS[name]
except KeyError: # no, this is new
# pylint: disable=unexpected-keyword-arg
try:
return units.Unit(name, parse_strict='raise')
except ValueError as exc:
if (parse_strict == 'raise' or
'did not parse as unit' not in str(exc)):
raise
# try again using out own lenient parser
GWpyFormat.warn = parse_strict != 'silent'
return units.Unit(name, parse_strict='silent', format=format)
finally:
GWpyFormat.warn = True | [
"def",
"parse_unit",
"(",
"name",
",",
"parse_strict",
"=",
"'warn'",
",",
"format",
"=",
"'gwpy'",
")",
":",
"if",
"name",
"is",
"None",
"or",
"isinstance",
"(",
"name",
",",
"units",
".",
"UnitBase",
")",
":",
"return",
"name",
"try",
":",
"# have we... | Attempt to intelligently parse a `str` as a `~astropy.units.Unit`
Parameters
----------
name : `str`
unit name to parse
parse_strict : `str`
one of 'silent', 'warn', or 'raise' depending on how pedantic
you want the parser to be
format : `~astropy.units.format.Base`
the formatter class to use when parsing the unit string
Returns
-------
unit : `~astropy.units.UnitBase`
the unit parsed by `~astropy.units.Unit`
Raises
------
ValueError
if the unit cannot be parsed and `parse_strict='raise'` | [
"Attempt",
"to",
"intelligently",
"parse",
"a",
"str",
"as",
"a",
"~astropy",
".",
"units",
".",
"Unit"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/units.py#L97-L139 | train | 211,255 |
gwpy/gwpy | gwpy/table/io/gwf.py | _row_from_frevent | def _row_from_frevent(frevent, columns, selection):
"""Generate a table row from an FrEvent
Filtering (``selection``) is done here, rather than in the table reader,
to enable filtering on columns that aren't being returned.
"""
# read params
params = dict(frevent.GetParam())
params['time'] = float(LIGOTimeGPS(*frevent.GetGTime()))
params['amplitude'] = frevent.GetAmplitude()
params['probability'] = frevent.GetProbability()
params['timeBefore'] = frevent.GetTimeBefore()
params['timeAfter'] = frevent.GetTimeAfter()
params['comment'] = frevent.GetComment()
# filter
if not all(op_(params[c], t) for c, op_, t in selection):
return None
# return event as list
return [params[c] for c in columns] | python | def _row_from_frevent(frevent, columns, selection):
"""Generate a table row from an FrEvent
Filtering (``selection``) is done here, rather than in the table reader,
to enable filtering on columns that aren't being returned.
"""
# read params
params = dict(frevent.GetParam())
params['time'] = float(LIGOTimeGPS(*frevent.GetGTime()))
params['amplitude'] = frevent.GetAmplitude()
params['probability'] = frevent.GetProbability()
params['timeBefore'] = frevent.GetTimeBefore()
params['timeAfter'] = frevent.GetTimeAfter()
params['comment'] = frevent.GetComment()
# filter
if not all(op_(params[c], t) for c, op_, t in selection):
return None
# return event as list
return [params[c] for c in columns] | [
"def",
"_row_from_frevent",
"(",
"frevent",
",",
"columns",
",",
"selection",
")",
":",
"# read params",
"params",
"=",
"dict",
"(",
"frevent",
".",
"GetParam",
"(",
")",
")",
"params",
"[",
"'time'",
"]",
"=",
"float",
"(",
"LIGOTimeGPS",
"(",
"*",
"fre... | Generate a table row from an FrEvent
Filtering (``selection``) is done here, rather than in the table reader,
to enable filtering on columns that aren't being returned. | [
"Generate",
"a",
"table",
"row",
"from",
"an",
"FrEvent"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/gwf.py#L45-L63 | train | 211,256 |
gwpy/gwpy | gwpy/table/io/gwf.py | table_to_gwf | def table_to_gwf(table, filename, name, **kwargs):
"""Create a new `~frameCPP.FrameH` and fill it with data
Parameters
----------
table : `~astropy.table.Table`
the data to write
filename : `str`
the name of the file to write into
**kwargs
other keyword arguments (see below for references)
See Also
--------
gwpy.io.gwf.create_frame
gwpy.io.gwf.write_frames
for documentation of keyword arguments
"""
from LDAStools.frameCPP import (FrEvent, GPSTime)
# create frame
write_kw = {key: kwargs.pop(key) for
key in ('compression', 'compression_level') if key in kwargs}
frame = io_gwf.create_frame(name=name, **kwargs)
# append row by row
names = table.dtype.names
for row in table:
rowd = dict((n, row[n]) for n in names)
gps = LIGOTimeGPS(rowd.pop('time', 0))
frame.AppendFrEvent(FrEvent(
str(name),
str(rowd.pop('comment', '')),
str(rowd.pop('inputs', '')),
GPSTime(gps.gpsSeconds, gps.gpsNanoSeconds),
float(rowd.pop('timeBefore', 0)),
float(rowd.pop('timeAfter', 0)),
int(rowd.pop('eventStatus', 0)),
float(rowd.pop('amplitude', 0)),
float(rowd.pop('probability', -1)),
str(rowd.pop('statistics', '')),
list(rowd.items()), # remaining params as tuple
))
# write frame to file
io_gwf.write_frames(filename, [frame], **write_kw) | python | def table_to_gwf(table, filename, name, **kwargs):
"""Create a new `~frameCPP.FrameH` and fill it with data
Parameters
----------
table : `~astropy.table.Table`
the data to write
filename : `str`
the name of the file to write into
**kwargs
other keyword arguments (see below for references)
See Also
--------
gwpy.io.gwf.create_frame
gwpy.io.gwf.write_frames
for documentation of keyword arguments
"""
from LDAStools.frameCPP import (FrEvent, GPSTime)
# create frame
write_kw = {key: kwargs.pop(key) for
key in ('compression', 'compression_level') if key in kwargs}
frame = io_gwf.create_frame(name=name, **kwargs)
# append row by row
names = table.dtype.names
for row in table:
rowd = dict((n, row[n]) for n in names)
gps = LIGOTimeGPS(rowd.pop('time', 0))
frame.AppendFrEvent(FrEvent(
str(name),
str(rowd.pop('comment', '')),
str(rowd.pop('inputs', '')),
GPSTime(gps.gpsSeconds, gps.gpsNanoSeconds),
float(rowd.pop('timeBefore', 0)),
float(rowd.pop('timeAfter', 0)),
int(rowd.pop('eventStatus', 0)),
float(rowd.pop('amplitude', 0)),
float(rowd.pop('probability', -1)),
str(rowd.pop('statistics', '')),
list(rowd.items()), # remaining params as tuple
))
# write frame to file
io_gwf.write_frames(filename, [frame], **write_kw) | [
"def",
"table_to_gwf",
"(",
"table",
",",
"filename",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"LDAStools",
".",
"frameCPP",
"import",
"(",
"FrEvent",
",",
"GPSTime",
")",
"# create frame",
"write_kw",
"=",
"{",
"key",
":",
"kwargs",
".",
... | Create a new `~frameCPP.FrameH` and fill it with data
Parameters
----------
table : `~astropy.table.Table`
the data to write
filename : `str`
the name of the file to write into
**kwargs
other keyword arguments (see below for references)
See Also
--------
gwpy.io.gwf.create_frame
gwpy.io.gwf.write_frames
for documentation of keyword arguments | [
"Create",
"a",
"new",
"~frameCPP",
".",
"FrameH",
"and",
"fill",
"it",
"with",
"data"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/gwf.py#L115-L162 | train | 211,257 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.read | def read(cls, source, *args, **kwargs):
"""Read data into a `FrequencySeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the
parameters below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
*args
Other arguments are (in general) specific to the given
``format``.
format : `str`, optional
Source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
**kwargs
Other keywords are (in general) specific to the given ``format``.
Notes
-----"""
return io_registry.read(cls, source, *args, **kwargs) | python | def read(cls, source, *args, **kwargs):
"""Read data into a `FrequencySeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the
parameters below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
*args
Other arguments are (in general) specific to the given
``format``.
format : `str`, optional
Source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
**kwargs
Other keywords are (in general) specific to the given ``format``.
Notes
-----"""
return io_registry.read(cls, source, *args, **kwargs) | [
"def",
"read",
"(",
"cls",
",",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"io_registry",
".",
"read",
"(",
"cls",
",",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Read data into a `FrequencySeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the
parameters below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
*args
Other arguments are (in general) specific to the given
``format``.
format : `str`, optional
Source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
**kwargs
Other keywords are (in general) specific to the given ``format``.
Notes
----- | [
"Read",
"data",
"into",
"a",
"FrequencySeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L133-L163 | train | 211,258 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.ifft | def ifft(self):
"""Compute the one-dimensional discrete inverse Fourier
transform of this `FrequencySeries`.
Returns
-------
out : :class:`~gwpy.timeseries.TimeSeries`
the normalised, real-valued `TimeSeries`.
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
Notes
-----
This method applies the necessary normalisation such that the
condition holds:
>>> timeseries = TimeSeries([1.0, 0.0, -1.0, 0.0], sample_rate=1.0)
>>> timeseries.fft().ifft() == timeseries
"""
from ..timeseries import TimeSeries
nout = (self.size - 1) * 2
# Undo normalization from TimeSeries.fft
# The DC component does not have the factor of two applied
# so we account for it here
dift = npfft.irfft(self.value * nout) / 2
new = TimeSeries(dift, epoch=self.epoch, channel=self.channel,
unit=self.unit, dx=1/self.dx/nout)
return new | python | def ifft(self):
"""Compute the one-dimensional discrete inverse Fourier
transform of this `FrequencySeries`.
Returns
-------
out : :class:`~gwpy.timeseries.TimeSeries`
the normalised, real-valued `TimeSeries`.
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
Notes
-----
This method applies the necessary normalisation such that the
condition holds:
>>> timeseries = TimeSeries([1.0, 0.0, -1.0, 0.0], sample_rate=1.0)
>>> timeseries.fft().ifft() == timeseries
"""
from ..timeseries import TimeSeries
nout = (self.size - 1) * 2
# Undo normalization from TimeSeries.fft
# The DC component does not have the factor of two applied
# so we account for it here
dift = npfft.irfft(self.value * nout) / 2
new = TimeSeries(dift, epoch=self.epoch, channel=self.channel,
unit=self.unit, dx=1/self.dx/nout)
return new | [
"def",
"ifft",
"(",
"self",
")",
":",
"from",
".",
".",
"timeseries",
"import",
"TimeSeries",
"nout",
"=",
"(",
"self",
".",
"size",
"-",
"1",
")",
"*",
"2",
"# Undo normalization from TimeSeries.fft",
"# The DC component does not have the factor of two applied",
"#... | Compute the one-dimensional discrete inverse Fourier
transform of this `FrequencySeries`.
Returns
-------
out : :class:`~gwpy.timeseries.TimeSeries`
the normalised, real-valued `TimeSeries`.
See Also
--------
:mod:`scipy.fftpack` for the definition of the DFT and conventions
used.
Notes
-----
This method applies the necessary normalisation such that the
condition holds:
>>> timeseries = TimeSeries([1.0, 0.0, -1.0, 0.0], sample_rate=1.0)
>>> timeseries.fft().ifft() == timeseries | [
"Compute",
"the",
"one",
"-",
"dimensional",
"discrete",
"inverse",
"Fourier",
"transform",
"of",
"this",
"FrequencySeries",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L202-L232 | train | 211,259 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.interpolate | def interpolate(self, df):
"""Interpolate this `FrequencySeries` to a new resolution.
Parameters
----------
df : `float`
desired frequency resolution of the interpolated `FrequencySeries`,
in Hz
Returns
-------
out : `FrequencySeries`
the interpolated version of the input `FrequencySeries`
See Also
--------
numpy.interp
for the underlying 1-D linear interpolation scheme
"""
f0 = self.f0.decompose().value
N = (self.size - 1) * (self.df.decompose().value / df) + 1
fsamples = numpy.arange(0, numpy.rint(N), dtype=self.dtype) * df + f0
out = type(self)(numpy.interp(fsamples, self.frequencies.value,
self.value))
out.__array_finalize__(self)
out.f0 = f0
out.df = df
return out | python | def interpolate(self, df):
"""Interpolate this `FrequencySeries` to a new resolution.
Parameters
----------
df : `float`
desired frequency resolution of the interpolated `FrequencySeries`,
in Hz
Returns
-------
out : `FrequencySeries`
the interpolated version of the input `FrequencySeries`
See Also
--------
numpy.interp
for the underlying 1-D linear interpolation scheme
"""
f0 = self.f0.decompose().value
N = (self.size - 1) * (self.df.decompose().value / df) + 1
fsamples = numpy.arange(0, numpy.rint(N), dtype=self.dtype) * df + f0
out = type(self)(numpy.interp(fsamples, self.frequencies.value,
self.value))
out.__array_finalize__(self)
out.f0 = f0
out.df = df
return out | [
"def",
"interpolate",
"(",
"self",
",",
"df",
")",
":",
"f0",
"=",
"self",
".",
"f0",
".",
"decompose",
"(",
")",
".",
"value",
"N",
"=",
"(",
"self",
".",
"size",
"-",
"1",
")",
"*",
"(",
"self",
".",
"df",
".",
"decompose",
"(",
")",
".",
... | Interpolate this `FrequencySeries` to a new resolution.
Parameters
----------
df : `float`
desired frequency resolution of the interpolated `FrequencySeries`,
in Hz
Returns
-------
out : `FrequencySeries`
the interpolated version of the input `FrequencySeries`
See Also
--------
numpy.interp
for the underlying 1-D linear interpolation scheme | [
"Interpolate",
"this",
"FrequencySeries",
"to",
"a",
"new",
"resolution",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L271-L298 | train | 211,260 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.from_lal | def from_lal(cls, lalfs, copy=True):
"""Generate a new `FrequencySeries` from a LAL `FrequencySeries` of any type
"""
from ..utils.lal import from_lal_unit
try:
unit = from_lal_unit(lalfs.sampleUnits)
except TypeError:
unit = None
channel = Channel(lalfs.name, unit=unit,
dtype=lalfs.data.data.dtype)
return cls(lalfs.data.data, channel=channel, f0=lalfs.f0,
df=lalfs.deltaF, epoch=float(lalfs.epoch),
dtype=lalfs.data.data.dtype, copy=copy) | python | def from_lal(cls, lalfs, copy=True):
"""Generate a new `FrequencySeries` from a LAL `FrequencySeries` of any type
"""
from ..utils.lal import from_lal_unit
try:
unit = from_lal_unit(lalfs.sampleUnits)
except TypeError:
unit = None
channel = Channel(lalfs.name, unit=unit,
dtype=lalfs.data.data.dtype)
return cls(lalfs.data.data, channel=channel, f0=lalfs.f0,
df=lalfs.deltaF, epoch=float(lalfs.epoch),
dtype=lalfs.data.data.dtype, copy=copy) | [
"def",
"from_lal",
"(",
"cls",
",",
"lalfs",
",",
"copy",
"=",
"True",
")",
":",
"from",
".",
".",
"utils",
".",
"lal",
"import",
"from_lal_unit",
"try",
":",
"unit",
"=",
"from_lal_unit",
"(",
"lalfs",
".",
"sampleUnits",
")",
"except",
"TypeError",
"... | Generate a new `FrequencySeries` from a LAL `FrequencySeries` of any type | [
"Generate",
"a",
"new",
"FrequencySeries",
"from",
"a",
"LAL",
"FrequencySeries",
"of",
"any",
"type"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L343-L355 | train | 211,261 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.from_pycbc | def from_pycbc(cls, fs, copy=True):
"""Convert a `pycbc.types.frequencyseries.FrequencySeries` into
a `FrequencySeries`
Parameters
----------
fs : `pycbc.types.frequencyseries.FrequencySeries`
the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries`
array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
spectrum : `FrequencySeries`
a GWpy version of the input frequency series
"""
return cls(fs.data, f0=0, df=fs.delta_f, epoch=fs.epoch, copy=copy) | python | def from_pycbc(cls, fs, copy=True):
"""Convert a `pycbc.types.frequencyseries.FrequencySeries` into
a `FrequencySeries`
Parameters
----------
fs : `pycbc.types.frequencyseries.FrequencySeries`
the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries`
array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
spectrum : `FrequencySeries`
a GWpy version of the input frequency series
"""
return cls(fs.data, f0=0, df=fs.delta_f, epoch=fs.epoch, copy=copy) | [
"def",
"from_pycbc",
"(",
"cls",
",",
"fs",
",",
"copy",
"=",
"True",
")",
":",
"return",
"cls",
"(",
"fs",
".",
"data",
",",
"f0",
"=",
"0",
",",
"df",
"=",
"fs",
".",
"delta_f",
",",
"epoch",
"=",
"fs",
".",
"epoch",
",",
"copy",
"=",
"copy... | Convert a `pycbc.types.frequencyseries.FrequencySeries` into
a `FrequencySeries`
Parameters
----------
fs : `pycbc.types.frequencyseries.FrequencySeries`
the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries`
array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
spectrum : `FrequencySeries`
a GWpy version of the input frequency series | [
"Convert",
"a",
"pycbc",
".",
"types",
".",
"frequencyseries",
".",
"FrequencySeries",
"into",
"a",
"FrequencySeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L393-L411 | train | 211,262 |
gwpy/gwpy | gwpy/frequencyseries/frequencyseries.py | FrequencySeries.to_pycbc | def to_pycbc(self, copy=True):
"""Convert this `FrequencySeries` into a
`~pycbc.types.frequencyseries.FrequencySeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
frequencyseries : `pycbc.types.frequencyseries.FrequencySeries`
a PyCBC representation of this `FrequencySeries`
"""
from pycbc import types
if self.epoch is None:
epoch = None
else:
epoch = self.epoch.gps
return types.FrequencySeries(self.value,
delta_f=self.df.to('Hz').value,
epoch=epoch, copy=copy) | python | def to_pycbc(self, copy=True):
"""Convert this `FrequencySeries` into a
`~pycbc.types.frequencyseries.FrequencySeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
frequencyseries : `pycbc.types.frequencyseries.FrequencySeries`
a PyCBC representation of this `FrequencySeries`
"""
from pycbc import types
if self.epoch is None:
epoch = None
else:
epoch = self.epoch.gps
return types.FrequencySeries(self.value,
delta_f=self.df.to('Hz').value,
epoch=epoch, copy=copy) | [
"def",
"to_pycbc",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"from",
"pycbc",
"import",
"types",
"if",
"self",
".",
"epoch",
"is",
"None",
":",
"epoch",
"=",
"None",
"else",
":",
"epoch",
"=",
"self",
".",
"epoch",
".",
"gps",
"return",
"typ... | Convert this `FrequencySeries` into a
`~pycbc.types.frequencyseries.FrequencySeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
frequencyseries : `pycbc.types.frequencyseries.FrequencySeries`
a PyCBC representation of this `FrequencySeries` | [
"Convert",
"this",
"FrequencySeries",
"into",
"a",
"~pycbc",
".",
"types",
".",
"frequencyseries",
".",
"FrequencySeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/frequencyseries.py#L413-L435 | train | 211,263 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | _fetch_losc_data_file | def _fetch_losc_data_file(url, *args, **kwargs):
"""Internal function for fetching a single LOSC file and returning a Series
"""
cls = kwargs.pop('cls', TimeSeries)
cache = kwargs.pop('cache', None)
verbose = kwargs.pop('verbose', False)
# match file format
if url.endswith('.gz'):
ext = os.path.splitext(url[:-3])[-1]
else:
ext = os.path.splitext(url)[-1]
if ext == '.hdf5':
kwargs.setdefault('format', 'hdf5.losc')
elif ext == '.txt':
kwargs.setdefault('format', 'ascii.losc')
elif ext == '.gwf':
kwargs.setdefault('format', 'gwf')
with _download_file(url, cache, verbose=verbose) as rem:
# get channel for GWF if not given
if ext == ".gwf" and (not args or args[0] is None):
args = (_gwf_channel(rem, cls, kwargs.get("verbose")),)
if verbose:
print('Reading data...', end=' ')
try:
series = cls.read(rem, *args, **kwargs)
except Exception as exc:
if verbose:
print('')
exc.args = ("Failed to read LOSC data from %r: %s"
% (url, str(exc)),)
raise
else:
# parse bits from unit in GWF
if ext == '.gwf' and isinstance(series, StateVector):
try:
bits = {}
for bit in str(series.unit).split():
a, b = bit.split(':', 1)
bits[int(a)] = b
series.bits = bits
series.override_unit('')
except (TypeError, ValueError): # don't care, bad LOSC
pass
if verbose:
print('[Done]')
return series | python | def _fetch_losc_data_file(url, *args, **kwargs):
"""Internal function for fetching a single LOSC file and returning a Series
"""
cls = kwargs.pop('cls', TimeSeries)
cache = kwargs.pop('cache', None)
verbose = kwargs.pop('verbose', False)
# match file format
if url.endswith('.gz'):
ext = os.path.splitext(url[:-3])[-1]
else:
ext = os.path.splitext(url)[-1]
if ext == '.hdf5':
kwargs.setdefault('format', 'hdf5.losc')
elif ext == '.txt':
kwargs.setdefault('format', 'ascii.losc')
elif ext == '.gwf':
kwargs.setdefault('format', 'gwf')
with _download_file(url, cache, verbose=verbose) as rem:
# get channel for GWF if not given
if ext == ".gwf" and (not args or args[0] is None):
args = (_gwf_channel(rem, cls, kwargs.get("verbose")),)
if verbose:
print('Reading data...', end=' ')
try:
series = cls.read(rem, *args, **kwargs)
except Exception as exc:
if verbose:
print('')
exc.args = ("Failed to read LOSC data from %r: %s"
% (url, str(exc)),)
raise
else:
# parse bits from unit in GWF
if ext == '.gwf' and isinstance(series, StateVector):
try:
bits = {}
for bit in str(series.unit).split():
a, b = bit.split(':', 1)
bits[int(a)] = b
series.bits = bits
series.override_unit('')
except (TypeError, ValueError): # don't care, bad LOSC
pass
if verbose:
print('[Done]')
return series | [
"def",
"_fetch_losc_data_file",
"(",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"kwargs",
".",
"pop",
"(",
"'cls'",
",",
"TimeSeries",
")",
"cache",
"=",
"kwargs",
".",
"pop",
"(",
"'cache'",
",",
"None",
")",
"verbose",
... | Internal function for fetching a single LOSC file and returning a Series | [
"Internal",
"function",
"for",
"fetching",
"a",
"single",
"LOSC",
"file",
"and",
"returning",
"a",
"Series"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L68-L116 | train | 211,264 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | _overlapping | def _overlapping(files):
"""Quick method to see if a file list contains overlapping files
"""
segments = set()
for path in files:
seg = file_segment(path)
for s in segments:
if seg.intersects(s):
return True
segments.add(seg)
return False | python | def _overlapping(files):
"""Quick method to see if a file list contains overlapping files
"""
segments = set()
for path in files:
seg = file_segment(path)
for s in segments:
if seg.intersects(s):
return True
segments.add(seg)
return False | [
"def",
"_overlapping",
"(",
"files",
")",
":",
"segments",
"=",
"set",
"(",
")",
"for",
"path",
"in",
"files",
":",
"seg",
"=",
"file_segment",
"(",
"path",
")",
"for",
"s",
"in",
"segments",
":",
"if",
"seg",
".",
"intersects",
"(",
"s",
")",
":",... | Quick method to see if a file list contains overlapping files | [
"Quick",
"method",
"to",
"see",
"if",
"a",
"file",
"list",
"contains",
"overlapping",
"files"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L119-L129 | train | 211,265 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | fetch_losc_data | def fetch_losc_data(detector, start, end, cls=TimeSeries, **kwargs):
"""Fetch LOSC data for a given detector
This function is for internal purposes only, all users should instead
use the interface provided by `TimeSeries.fetch_open_data` (and similar
for `StateVector.fetch_open_data`).
"""
# format arguments
start = to_gps(start)
end = to_gps(end)
span = Segment(start, end)
kwargs.update({
'start': start,
'end': end,
})
# find URLs (requires gwopensci)
url_kw = {key: kwargs.pop(key) for key in GWOSC_LOCATE_KWARGS if
key in kwargs}
if 'sample_rate' in url_kw: # format as Hertz
url_kw['sample_rate'] = Quantity(url_kw['sample_rate'], 'Hz').value
cache = get_urls(detector, int(start), int(ceil(end)), **url_kw)
# if event dataset, pick shortest file that covers the request
# -- this is a bit hacky, and presumes that only an event dataset
# -- would be produced with overlapping files.
# -- This should probably be improved to use dataset information
if len(cache) and _overlapping(cache):
cache.sort(key=lambda x: abs(file_segment(x)))
for url in cache:
a, b = file_segment(url)
if a <= start and b >= end:
cache = [url]
break
if kwargs.get('verbose', False): # get_urls() guarantees len(cache) >= 1
host = urlparse(cache[0]).netloc
print("Fetched {0} URLs from {1} for [{2} .. {3}))".format(
len(cache), host, int(start), int(ceil(end))))
is_gwf = cache[0].endswith('.gwf')
if is_gwf and len(cache):
args = (kwargs.pop('channel', None),)
else:
args = ()
# read data
out = None
kwargs['cls'] = cls
for url in cache:
keep = file_segment(url) & span
new = _fetch_losc_data_file(url, *args, **kwargs).crop(
*keep, copy=False)
if is_gwf and (not args or args[0] is None):
args = (new.name,)
if out is None:
out = new.copy()
else:
out.append(new, resize=True)
return out | python | def fetch_losc_data(detector, start, end, cls=TimeSeries, **kwargs):
"""Fetch LOSC data for a given detector
This function is for internal purposes only, all users should instead
use the interface provided by `TimeSeries.fetch_open_data` (and similar
for `StateVector.fetch_open_data`).
"""
# format arguments
start = to_gps(start)
end = to_gps(end)
span = Segment(start, end)
kwargs.update({
'start': start,
'end': end,
})
# find URLs (requires gwopensci)
url_kw = {key: kwargs.pop(key) for key in GWOSC_LOCATE_KWARGS if
key in kwargs}
if 'sample_rate' in url_kw: # format as Hertz
url_kw['sample_rate'] = Quantity(url_kw['sample_rate'], 'Hz').value
cache = get_urls(detector, int(start), int(ceil(end)), **url_kw)
# if event dataset, pick shortest file that covers the request
# -- this is a bit hacky, and presumes that only an event dataset
# -- would be produced with overlapping files.
# -- This should probably be improved to use dataset information
if len(cache) and _overlapping(cache):
cache.sort(key=lambda x: abs(file_segment(x)))
for url in cache:
a, b = file_segment(url)
if a <= start and b >= end:
cache = [url]
break
if kwargs.get('verbose', False): # get_urls() guarantees len(cache) >= 1
host = urlparse(cache[0]).netloc
print("Fetched {0} URLs from {1} for [{2} .. {3}))".format(
len(cache), host, int(start), int(ceil(end))))
is_gwf = cache[0].endswith('.gwf')
if is_gwf and len(cache):
args = (kwargs.pop('channel', None),)
else:
args = ()
# read data
out = None
kwargs['cls'] = cls
for url in cache:
keep = file_segment(url) & span
new = _fetch_losc_data_file(url, *args, **kwargs).crop(
*keep, copy=False)
if is_gwf and (not args or args[0] is None):
args = (new.name,)
if out is None:
out = new.copy()
else:
out.append(new, resize=True)
return out | [
"def",
"fetch_losc_data",
"(",
"detector",
",",
"start",
",",
"end",
",",
"cls",
"=",
"TimeSeries",
",",
"*",
"*",
"kwargs",
")",
":",
"# format arguments",
"start",
"=",
"to_gps",
"(",
"start",
")",
"end",
"=",
"to_gps",
"(",
"end",
")",
"span",
"=",
... | Fetch LOSC data for a given detector
This function is for internal purposes only, all users should instead
use the interface provided by `TimeSeries.fetch_open_data` (and similar
for `StateVector.fetch_open_data`). | [
"Fetch",
"LOSC",
"data",
"for",
"a",
"given",
"detector"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L134-L191 | train | 211,266 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | read_losc_hdf5 | def read_losc_hdf5(h5f, path='strain/Strain',
start=None, end=None, copy=False):
"""Read a `TimeSeries` from a LOSC-format HDF file.
Parameters
----------
h5f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
name of HDF5 dataset to read.
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
dataset = io_hdf5.find_dataset(h5f, path)
# read data
nddata = dataset[()]
# read metadata
xunit = parse_unit(dataset.attrs['Xunits'])
epoch = dataset.attrs['Xstart']
dt = Quantity(dataset.attrs['Xspacing'], xunit)
unit = dataset.attrs['Yunits']
# build and return
return TimeSeries(nddata, epoch=epoch, sample_rate=(1/dt).to('Hertz'),
unit=unit, name=path.rsplit('/', 1)[1],
copy=copy).crop(start=start, end=end) | python | def read_losc_hdf5(h5f, path='strain/Strain',
start=None, end=None, copy=False):
"""Read a `TimeSeries` from a LOSC-format HDF file.
Parameters
----------
h5f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
name of HDF5 dataset to read.
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
dataset = io_hdf5.find_dataset(h5f, path)
# read data
nddata = dataset[()]
# read metadata
xunit = parse_unit(dataset.attrs['Xunits'])
epoch = dataset.attrs['Xstart']
dt = Quantity(dataset.attrs['Xspacing'], xunit)
unit = dataset.attrs['Yunits']
# build and return
return TimeSeries(nddata, epoch=epoch, sample_rate=(1/dt).to('Hertz'),
unit=unit, name=path.rsplit('/', 1)[1],
copy=copy).crop(start=start, end=end) | [
"def",
"read_losc_hdf5",
"(",
"h5f",
",",
"path",
"=",
"'strain/Strain'",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"dataset",
"=",
"io_hdf5",
".",
"find_dataset",
"(",
"h5f",
",",
"path",
")",
"# read dat... | Read a `TimeSeries` from a LOSC-format HDF file.
Parameters
----------
h5f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
name of HDF5 dataset to read.
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk | [
"Read",
"a",
"TimeSeries",
"from",
"a",
"LOSC",
"-",
"format",
"HDF",
"file",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L197-L225 | train | 211,267 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | read_losc_hdf5_state | def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None,
copy=False):
"""Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
# find data
dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path)
maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path)
# read data
nddata = dataset[()]
bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]]
# read metadata
epoch = dataset.attrs['Xstart']
try:
dt = dataset.attrs['Xspacing']
except KeyError:
dt = Quantity(1, 's')
else:
xunit = parse_unit(dataset.attrs['Xunits'])
dt = Quantity(dt, xunit)
return StateVector(nddata, bits=bits, t0=epoch, name='Data quality',
dx=dt, copy=copy).crop(start=start, end=end) | python | def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None,
copy=False):
"""Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
# find data
dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path)
maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path)
# read data
nddata = dataset[()]
bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]]
# read metadata
epoch = dataset.attrs['Xstart']
try:
dt = dataset.attrs['Xspacing']
except KeyError:
dt = Quantity(1, 's')
else:
xunit = parse_unit(dataset.attrs['Xunits'])
dt = Quantity(dt, xunit)
return StateVector(nddata, bits=bits, t0=epoch, name='Data quality',
dx=dt, copy=copy).crop(start=start, end=end) | [
"def",
"read_losc_hdf5_state",
"(",
"f",
",",
"path",
"=",
"'quality/simple'",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"# find data",
"dataset",
"=",
"io_hdf5",
".",
"find_dataset",
"(",
"f",
",",
"'%s/DQm... | Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk | [
"Read",
"a",
"StateVector",
"from",
"a",
"LOSC",
"-",
"format",
"HDF",
"file",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L229-L271 | train | 211,268 |
gwpy/gwpy | gwpy/timeseries/io/losc.py | _gwf_channel | def _gwf_channel(path, series_class=TimeSeries, verbose=False):
"""Find the right channel name for a LOSC GWF file
"""
channels = list(io_gwf.iter_channel_names(file_path(path)))
if issubclass(series_class, StateVector):
regex = DQMASK_CHANNEL_REGEX
else:
regex = STRAIN_CHANNEL_REGEX
found, = list(filter(regex.match, channels))
if verbose:
print("Using channel {0!r}".format(found))
return found | python | def _gwf_channel(path, series_class=TimeSeries, verbose=False):
"""Find the right channel name for a LOSC GWF file
"""
channels = list(io_gwf.iter_channel_names(file_path(path)))
if issubclass(series_class, StateVector):
regex = DQMASK_CHANNEL_REGEX
else:
regex = STRAIN_CHANNEL_REGEX
found, = list(filter(regex.match, channels))
if verbose:
print("Using channel {0!r}".format(found))
return found | [
"def",
"_gwf_channel",
"(",
"path",
",",
"series_class",
"=",
"TimeSeries",
",",
"verbose",
"=",
"False",
")",
":",
"channels",
"=",
"list",
"(",
"io_gwf",
".",
"iter_channel_names",
"(",
"file_path",
"(",
"path",
")",
")",
")",
"if",
"issubclass",
"(",
... | Find the right channel name for a LOSC GWF file | [
"Find",
"the",
"right",
"channel",
"name",
"for",
"a",
"LOSC",
"GWF",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L274-L285 | train | 211,269 |
gwpy/gwpy | gwpy/segments/io/segwizard.py | from_segwizard | def from_segwizard(source, gpstype=LIGOTimeGPS, strict=True):
"""Read segments from a segwizard format file into a `SegmentList`
Parameters
----------
source : `file`, `str`
An open file, or file path, from which to read
gpstype : `type`, optional
The numeric type to which to cast times (from `str`) when reading.
strict : `bool`, optional
Check that recorded duration matches ``end-start`` for all segments;
only used when reading from a 3+-column file.
Returns
-------
segments : `~gwpy.segments.SegmentList`
The list of segments as parsed from the file.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
"""
# read file path
if isinstance(source, string_types):
with open(source, 'r') as fobj:
return from_segwizard(fobj, gpstype=gpstype, strict=strict)
# read file object
out = SegmentList()
fmt_pat = None
for line in source:
if line.startswith(('#', ';')): # comment
continue
# determine line format
if fmt_pat is None:
fmt_pat = _line_format(line)
# parse line
tokens, = fmt_pat.findall(line)
out.append(_format_segment(tokens[-3:], gpstype=gpstype,
strict=strict))
return out | python | def from_segwizard(source, gpstype=LIGOTimeGPS, strict=True):
"""Read segments from a segwizard format file into a `SegmentList`
Parameters
----------
source : `file`, `str`
An open file, or file path, from which to read
gpstype : `type`, optional
The numeric type to which to cast times (from `str`) when reading.
strict : `bool`, optional
Check that recorded duration matches ``end-start`` for all segments;
only used when reading from a 3+-column file.
Returns
-------
segments : `~gwpy.segments.SegmentList`
The list of segments as parsed from the file.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
"""
# read file path
if isinstance(source, string_types):
with open(source, 'r') as fobj:
return from_segwizard(fobj, gpstype=gpstype, strict=strict)
# read file object
out = SegmentList()
fmt_pat = None
for line in source:
if line.startswith(('#', ';')): # comment
continue
# determine line format
if fmt_pat is None:
fmt_pat = _line_format(line)
# parse line
tokens, = fmt_pat.findall(line)
out.append(_format_segment(tokens[-3:], gpstype=gpstype,
strict=strict))
return out | [
"def",
"from_segwizard",
"(",
"source",
",",
"gpstype",
"=",
"LIGOTimeGPS",
",",
"strict",
"=",
"True",
")",
":",
"# read file path",
"if",
"isinstance",
"(",
"source",
",",
"string_types",
")",
":",
"with",
"open",
"(",
"source",
",",
"'r'",
")",
"as",
... | Read segments from a segwizard format file into a `SegmentList`
Parameters
----------
source : `file`, `str`
An open file, or file path, from which to read
gpstype : `type`, optional
The numeric type to which to cast times (from `str`) when reading.
strict : `bool`, optional
Check that recorded duration matches ``end-start`` for all segments;
only used when reading from a 3+-column file.
Returns
-------
segments : `~gwpy.segments.SegmentList`
The list of segments as parsed from the file.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3. | [
"Read",
"segments",
"from",
"a",
"segwizard",
"format",
"file",
"into",
"a",
"SegmentList"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/segwizard.py#L50-L93 | train | 211,270 |
gwpy/gwpy | gwpy/segments/io/segwizard.py | _line_format | def _line_format(line):
"""Determine the column format pattern for a line in an ASCII segment file.
"""
for pat in (FOUR_COL_REGEX, THREE_COL_REGEX, TWO_COL_REGEX):
if pat.match(line):
return pat
raise ValueError("unable to parse segment from line {!r}".format(line)) | python | def _line_format(line):
"""Determine the column format pattern for a line in an ASCII segment file.
"""
for pat in (FOUR_COL_REGEX, THREE_COL_REGEX, TWO_COL_REGEX):
if pat.match(line):
return pat
raise ValueError("unable to parse segment from line {!r}".format(line)) | [
"def",
"_line_format",
"(",
"line",
")",
":",
"for",
"pat",
"in",
"(",
"FOUR_COL_REGEX",
",",
"THREE_COL_REGEX",
",",
"TWO_COL_REGEX",
")",
":",
"if",
"pat",
".",
"match",
"(",
"line",
")",
":",
"return",
"pat",
"raise",
"ValueError",
"(",
"\"unable to par... | Determine the column format pattern for a line in an ASCII segment file. | [
"Determine",
"the",
"column",
"format",
"pattern",
"for",
"a",
"line",
"in",
"an",
"ASCII",
"segment",
"file",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/segwizard.py#L96-L102 | train | 211,271 |
gwpy/gwpy | gwpy/segments/io/segwizard.py | _format_segment | def _format_segment(tokens, strict=True, gpstype=LIGOTimeGPS):
"""Format a list of tokens parsed from an ASCII file into a segment.
"""
try:
start, end, dur = tokens
except ValueError: # two-columns
return Segment(*map(gpstype, tokens))
seg = Segment(gpstype(start), gpstype(end))
if strict and not float(abs(seg)) == float(dur):
raise ValueError(
"segment {0!r} has incorrect duration {1!r}".format(seg, dur),
)
return seg | python | def _format_segment(tokens, strict=True, gpstype=LIGOTimeGPS):
"""Format a list of tokens parsed from an ASCII file into a segment.
"""
try:
start, end, dur = tokens
except ValueError: # two-columns
return Segment(*map(gpstype, tokens))
seg = Segment(gpstype(start), gpstype(end))
if strict and not float(abs(seg)) == float(dur):
raise ValueError(
"segment {0!r} has incorrect duration {1!r}".format(seg, dur),
)
return seg | [
"def",
"_format_segment",
"(",
"tokens",
",",
"strict",
"=",
"True",
",",
"gpstype",
"=",
"LIGOTimeGPS",
")",
":",
"try",
":",
"start",
",",
"end",
",",
"dur",
"=",
"tokens",
"except",
"ValueError",
":",
"# two-columns",
"return",
"Segment",
"(",
"*",
"m... | Format a list of tokens parsed from an ASCII file into a segment. | [
"Format",
"a",
"list",
"of",
"tokens",
"parsed",
"from",
"an",
"ASCII",
"file",
"into",
"a",
"segment",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/segwizard.py#L105-L117 | train | 211,272 |
gwpy/gwpy | gwpy/segments/io/segwizard.py | to_segwizard | def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS):
"""Write the given `SegmentList` to a file in SegWizard format.
Parameters
----------
segs : :class:`~gwpy.segments.SegmentList`
The list of segments to write.
target : `file`, `str`
An open file, or file path, to which to write.
header : `bool`, optional
Print a column header into the file, default: `True`.
coltype : `type`, optional
The numerical type in which to cast times before printing.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
"""
# write file path
if isinstance(target, string_types):
with open(target, 'w') as fobj:
return to_segwizard(segs, fobj, header=header, coltype=coltype)
# write file object
if header:
print('# seg\tstart\tstop\tduration', file=target)
for i, seg in enumerate(segs):
a = coltype(seg[0])
b = coltype(seg[1])
c = float(b - a)
print(
'\t'.join(map(str, (i, a, b, c))),
file=target,
) | python | def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS):
"""Write the given `SegmentList` to a file in SegWizard format.
Parameters
----------
segs : :class:`~gwpy.segments.SegmentList`
The list of segments to write.
target : `file`, `str`
An open file, or file path, to which to write.
header : `bool`, optional
Print a column header into the file, default: `True`.
coltype : `type`, optional
The numerical type in which to cast times before printing.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
"""
# write file path
if isinstance(target, string_types):
with open(target, 'w') as fobj:
return to_segwizard(segs, fobj, header=header, coltype=coltype)
# write file object
if header:
print('# seg\tstart\tstop\tduration', file=target)
for i, seg in enumerate(segs):
a = coltype(seg[0])
b = coltype(seg[1])
c = float(b - a)
print(
'\t'.join(map(str, (i, a, b, c))),
file=target,
) | [
"def",
"to_segwizard",
"(",
"segs",
",",
"target",
",",
"header",
"=",
"True",
",",
"coltype",
"=",
"LIGOTimeGPS",
")",
":",
"# write file path",
"if",
"isinstance",
"(",
"target",
",",
"string_types",
")",
":",
"with",
"open",
"(",
"target",
",",
"'w'",
... | Write the given `SegmentList` to a file in SegWizard format.
Parameters
----------
segs : :class:`~gwpy.segments.SegmentList`
The list of segments to write.
target : `file`, `str`
An open file, or file path, to which to write.
header : `bool`, optional
Print a column header into the file, default: `True`.
coltype : `type`, optional
The numerical type in which to cast times before printing.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3. | [
"Write",
"the",
"given",
"SegmentList",
"to",
"a",
"file",
"in",
"SegWizard",
"format",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/segwizard.py#L123-L160 | train | 211,273 |
gwpy/gwpy | gwpy/io/utils.py | gopen | def gopen(name, *args, **kwargs):
"""Open a file handling optional gzipping
If ``name`` endswith ``'.gz'``, or if the GZIP file signature is
found at the beginning of the file, the file will be opened with
`gzip.open`, otherwise a regular file will be returned from `open`.
Parameters
----------
name : `str`
path (name) of file to open.
*args, **kwargs
other arguments to pass to either `open` for regular files, or
`gzip.open` for gzipped files.
Returns
-------
file : `io.TextIoBase`, `file`, `gzip.GzipFile`
the open file object
"""
# filename declares gzip
if name.endswith('.gz'):
return gzip.open(name, *args, **kwargs)
# open regular file
fobj = open(name, *args, **kwargs)
sig = fobj.read(3)
fobj.seek(0)
if sig == GZIP_SIGNATURE: # file signature declares gzip
fobj.close() # GzipFile won't close orig file when it closes
return gzip.open(name, *args, **kwargs)
return fobj | python | def gopen(name, *args, **kwargs):
"""Open a file handling optional gzipping
If ``name`` endswith ``'.gz'``, or if the GZIP file signature is
found at the beginning of the file, the file will be opened with
`gzip.open`, otherwise a regular file will be returned from `open`.
Parameters
----------
name : `str`
path (name) of file to open.
*args, **kwargs
other arguments to pass to either `open` for regular files, or
`gzip.open` for gzipped files.
Returns
-------
file : `io.TextIoBase`, `file`, `gzip.GzipFile`
the open file object
"""
# filename declares gzip
if name.endswith('.gz'):
return gzip.open(name, *args, **kwargs)
# open regular file
fobj = open(name, *args, **kwargs)
sig = fobj.read(3)
fobj.seek(0)
if sig == GZIP_SIGNATURE: # file signature declares gzip
fobj.close() # GzipFile won't close orig file when it closes
return gzip.open(name, *args, **kwargs)
return fobj | [
"def",
"gopen",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# filename declares gzip",
"if",
"name",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"return",
"gzip",
".",
"open",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwarg... | Open a file handling optional gzipping
If ``name`` endswith ``'.gz'``, or if the GZIP file signature is
found at the beginning of the file, the file will be opened with
`gzip.open`, otherwise a regular file will be returned from `open`.
Parameters
----------
name : `str`
path (name) of file to open.
*args, **kwargs
other arguments to pass to either `open` for regular files, or
`gzip.open` for gzipped files.
Returns
-------
file : `io.TextIoBase`, `file`, `gzip.GzipFile`
the open file object | [
"Open",
"a",
"file",
"handling",
"optional",
"gzipping"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/utils.py#L74-L106 | train | 211,274 |
gwpy/gwpy | gwpy/io/utils.py | file_list | def file_list(flist):
"""Parse a number of possible input types into a list of filepaths.
Parameters
----------
flist : `file-like` or `list-like` iterable
the input data container, normally just a single file path, or a list
of paths, but can generally be any of the following
- `str` representing a single file path (or comma-separated collection)
- open `file` or `~gzip.GzipFile` object
- :class:`~lal.utils.CacheEntry`
- `str` with ``.cache`` or ``.lcf`` extension
- simple `list` or `tuple` of `str` paths
Returns
-------
files : `list`
`list` of `str` file paths
Raises
------
ValueError
if the input `flist` cannot be interpreted as any of the above inputs
"""
# open a cache file and return list of paths
if (isinstance(flist, string_types) and
flist.endswith(('.cache', '.lcf', '.ffl'))):
from .cache import read_cache
return read_cache(flist)
# separate comma-separate list of names
if isinstance(flist, string_types):
return flist.split(',')
# parse list of entries (of some format)
if isinstance(flist, (list, tuple)):
return list(map(file_path, flist))
# otherwise parse a single entry
try:
return [file_path(flist)]
except ValueError as exc:
exc.args = (
"Could not parse input {!r} as one or more "
"file-like objects".format(flist),
)
raise | python | def file_list(flist):
"""Parse a number of possible input types into a list of filepaths.
Parameters
----------
flist : `file-like` or `list-like` iterable
the input data container, normally just a single file path, or a list
of paths, but can generally be any of the following
- `str` representing a single file path (or comma-separated collection)
- open `file` or `~gzip.GzipFile` object
- :class:`~lal.utils.CacheEntry`
- `str` with ``.cache`` or ``.lcf`` extension
- simple `list` or `tuple` of `str` paths
Returns
-------
files : `list`
`list` of `str` file paths
Raises
------
ValueError
if the input `flist` cannot be interpreted as any of the above inputs
"""
# open a cache file and return list of paths
if (isinstance(flist, string_types) and
flist.endswith(('.cache', '.lcf', '.ffl'))):
from .cache import read_cache
return read_cache(flist)
# separate comma-separate list of names
if isinstance(flist, string_types):
return flist.split(',')
# parse list of entries (of some format)
if isinstance(flist, (list, tuple)):
return list(map(file_path, flist))
# otherwise parse a single entry
try:
return [file_path(flist)]
except ValueError as exc:
exc.args = (
"Could not parse input {!r} as one or more "
"file-like objects".format(flist),
)
raise | [
"def",
"file_list",
"(",
"flist",
")",
":",
"# open a cache file and return list of paths",
"if",
"(",
"isinstance",
"(",
"flist",
",",
"string_types",
")",
"and",
"flist",
".",
"endswith",
"(",
"(",
"'.cache'",
",",
"'.lcf'",
",",
"'.ffl'",
")",
")",
")",
"... | Parse a number of possible input types into a list of filepaths.
Parameters
----------
flist : `file-like` or `list-like` iterable
the input data container, normally just a single file path, or a list
of paths, but can generally be any of the following
- `str` representing a single file path (or comma-separated collection)
- open `file` or `~gzip.GzipFile` object
- :class:`~lal.utils.CacheEntry`
- `str` with ``.cache`` or ``.lcf`` extension
- simple `list` or `tuple` of `str` paths
Returns
-------
files : `list`
`list` of `str` file paths
Raises
------
ValueError
if the input `flist` cannot be interpreted as any of the above inputs | [
"Parse",
"a",
"number",
"of",
"possible",
"input",
"types",
"into",
"a",
"list",
"of",
"filepaths",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/utils.py#L111-L158 | train | 211,275 |
gwpy/gwpy | gwpy/io/utils.py | file_path | def file_path(fobj):
"""Determine the path of a file.
This doesn't do any sanity checking to check that the file
actually exists, or is readable.
Parameters
----------
fobj : `file`, `str`, `CacheEntry`, ...
the file object or path to parse
Returns
-------
path : `str`
the path of the underlying file
Raises
------
ValueError
if a file path cannnot be determined
Examples
--------
>>> from gwpy.io.utils import file_path
>>> file_path("test.txt")
'test.txt'
>>> file_path(open("test.txt", "r"))
'test.txt'
>>> file_path("file:///home/user/test.txt")
'/home/user/test.txt'
"""
if isinstance(fobj, string_types) and fobj.startswith("file:"):
return urlparse(fobj).path
if isinstance(fobj, string_types):
return fobj
if (isinstance(fobj, FILE_LIKE) and hasattr(fobj, "name")):
return fobj.name
try:
return fobj.path
except AttributeError:
raise ValueError("Cannot parse file name for {!r}".format(fobj)) | python | def file_path(fobj):
"""Determine the path of a file.
This doesn't do any sanity checking to check that the file
actually exists, or is readable.
Parameters
----------
fobj : `file`, `str`, `CacheEntry`, ...
the file object or path to parse
Returns
-------
path : `str`
the path of the underlying file
Raises
------
ValueError
if a file path cannnot be determined
Examples
--------
>>> from gwpy.io.utils import file_path
>>> file_path("test.txt")
'test.txt'
>>> file_path(open("test.txt", "r"))
'test.txt'
>>> file_path("file:///home/user/test.txt")
'/home/user/test.txt'
"""
if isinstance(fobj, string_types) and fobj.startswith("file:"):
return urlparse(fobj).path
if isinstance(fobj, string_types):
return fobj
if (isinstance(fobj, FILE_LIKE) and hasattr(fobj, "name")):
return fobj.name
try:
return fobj.path
except AttributeError:
raise ValueError("Cannot parse file name for {!r}".format(fobj)) | [
"def",
"file_path",
"(",
"fobj",
")",
":",
"if",
"isinstance",
"(",
"fobj",
",",
"string_types",
")",
"and",
"fobj",
".",
"startswith",
"(",
"\"file:\"",
")",
":",
"return",
"urlparse",
"(",
"fobj",
")",
".",
"path",
"if",
"isinstance",
"(",
"fobj",
",... | Determine the path of a file.
This doesn't do any sanity checking to check that the file
actually exists, or is readable.
Parameters
----------
fobj : `file`, `str`, `CacheEntry`, ...
the file object or path to parse
Returns
-------
path : `str`
the path of the underlying file
Raises
------
ValueError
if a file path cannnot be determined
Examples
--------
>>> from gwpy.io.utils import file_path
>>> file_path("test.txt")
'test.txt'
>>> file_path(open("test.txt", "r"))
'test.txt'
>>> file_path("file:///home/user/test.txt")
'/home/user/test.txt' | [
"Determine",
"the",
"path",
"of",
"a",
"file",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/utils.py#L161-L201 | train | 211,276 |
gwpy/gwpy | gwpy/utils/mp.py | process_in_out_queues | def process_in_out_queues(func, q_in, q_out):
"""Iterate through a Queue, call, ``func`, and Queue the result
Parameters
----------
func : `callable`
any function that can take an element of the input `Queue` as
the only argument
q_in : `multiprocessing.queue.Queue`
the input `Queue`
q_out : `multiprocessing.queue.Queue`
the output `Queue`
Notes
-----
To close the input `Queue`, add ``(None, None)` as the last item
"""
while True:
# pick item out of input wqueue
idx, arg = q_in.get()
if idx is None: # sentinel
break
# execute method and put the result in the output queue
q_out.put((idx, func(arg))) | python | def process_in_out_queues(func, q_in, q_out):
"""Iterate through a Queue, call, ``func`, and Queue the result
Parameters
----------
func : `callable`
any function that can take an element of the input `Queue` as
the only argument
q_in : `multiprocessing.queue.Queue`
the input `Queue`
q_out : `multiprocessing.queue.Queue`
the output `Queue`
Notes
-----
To close the input `Queue`, add ``(None, None)` as the last item
"""
while True:
# pick item out of input wqueue
idx, arg = q_in.get()
if idx is None: # sentinel
break
# execute method and put the result in the output queue
q_out.put((idx, func(arg))) | [
"def",
"process_in_out_queues",
"(",
"func",
",",
"q_in",
",",
"q_out",
")",
":",
"while",
"True",
":",
"# pick item out of input wqueue",
"idx",
",",
"arg",
"=",
"q_in",
".",
"get",
"(",
")",
"if",
"idx",
"is",
"None",
":",
"# sentinel",
"break",
"# execu... | Iterate through a Queue, call, ``func`, and Queue the result
Parameters
----------
func : `callable`
any function that can take an element of the input `Queue` as
the only argument
q_in : `multiprocessing.queue.Queue`
the input `Queue`
q_out : `multiprocessing.queue.Queue`
the output `Queue`
Notes
-----
To close the input `Queue`, add ``(None, None)` as the last item | [
"Iterate",
"through",
"a",
"Queue",
"call",
"func",
"and",
"Queue",
"the",
"result"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/mp.py#L30-L55 | train | 211,277 |
gwpy/gwpy | gwpy/utils/mp.py | multiprocess_with_queues | def multiprocess_with_queues(nproc, func, inputs, verbose=False,
**progress_kw):
"""Map a function over a list of inputs using multiprocess
This essentially duplicates `multiprocess.map` but allows for
arbitrary functions (that aren't necessarily importable)
Parameters
----------
nproc : `int`
number of processes to use, if ``1`` is given, the current process
is used, and no child processes are forked
func : `callable`
the function to call in each iteration, should take a single
argument that is the next element from ``inputs``
inputs : `iterable`
iterable (e.g. `list`) of inputs, each element of which is
passed to ``func`` in one of the child processes
verbose : `bool`, `str`, optional
if `True`, print progress to the console as a bar, pass a
`str` to customise the heading for the progress bar, default: `False`,
(default heading ``'Processing:'`` if ``verbose=True`)
Returns
-------
outputs : `list`
the `list` of results from calling ``func(x)`` for each element
of ``inputs``
"""
if nproc != 1 and os.name == 'nt':
warnings.warn(
"multiprocessing is currently not supported on Windows, see "
"https://github.com/gwpy/gwpy/issues/880, will continue with "
"serial procesing (nproc=1)")
nproc = 1
if progress_kw.pop('raise_exceptions', None) is not None:
warnings.warn("the `raise_exceptions` keyword to "
"multiprocess_with_queues is deprecated, and will be "
"removed in a future release, all exceptions will be "
"raised if they occur", DeprecationWarning)
# create progress bar for verbose output
if bool(verbose):
if not isinstance(verbose, bool):
progress_kw['desc'] = str(verbose)
if isinstance(inputs, (list, tuple)):
progress_kw.setdefault('total', len(inputs))
pbar = progress_bar(**progress_kw)
else:
pbar = None
# -------------------------------------------
def _inner(x):
"""Run function capturing errors
"""
try:
return func(x)
except Exception as exc: # pylint: disable=broad-except
if nproc == 1:
raise
return exc
finally:
if pbar and nproc == 1:
pbar.update()
# -------------------------------------------
# shortcut single process
if nproc == 1:
return list(map(_inner, inputs))
# -------------------------------------------
# create input and output queues
q_in = Queue()
q_out = Queue()
# create child processes and start
proclist = [Process(target=process_in_out_queues,
args=(_inner, q_in, q_out)) for _ in range(nproc)]
for proc in proclist:
proc.daemon = True
proc.start()
# populate queue (no need to block in serial put())
sent = [q_in.put(x, block=False) for x in enumerate(inputs)]
for _ in range(nproc): # add sentinel for each process
q_in.put((None, None))
# get results
res = []
for _ in range(len(sent)):
x = q_out.get()
if pbar:
pbar.update()
res.append(x)
# close processes and unwrap results
for proc in proclist:
proc.join()
if pbar:
pbar.close()
# unwrap results in order
results = [out for _, out in sorted(res, key=itemgetter(0))]
# raise exceptions here
for res in results:
if isinstance(res, Exception):
raise res
return results | python | def multiprocess_with_queues(nproc, func, inputs, verbose=False,
**progress_kw):
"""Map a function over a list of inputs using multiprocess
This essentially duplicates `multiprocess.map` but allows for
arbitrary functions (that aren't necessarily importable)
Parameters
----------
nproc : `int`
number of processes to use, if ``1`` is given, the current process
is used, and no child processes are forked
func : `callable`
the function to call in each iteration, should take a single
argument that is the next element from ``inputs``
inputs : `iterable`
iterable (e.g. `list`) of inputs, each element of which is
passed to ``func`` in one of the child processes
verbose : `bool`, `str`, optional
if `True`, print progress to the console as a bar, pass a
`str` to customise the heading for the progress bar, default: `False`,
(default heading ``'Processing:'`` if ``verbose=True`)
Returns
-------
outputs : `list`
the `list` of results from calling ``func(x)`` for each element
of ``inputs``
"""
if nproc != 1 and os.name == 'nt':
warnings.warn(
"multiprocessing is currently not supported on Windows, see "
"https://github.com/gwpy/gwpy/issues/880, will continue with "
"serial procesing (nproc=1)")
nproc = 1
if progress_kw.pop('raise_exceptions', None) is not None:
warnings.warn("the `raise_exceptions` keyword to "
"multiprocess_with_queues is deprecated, and will be "
"removed in a future release, all exceptions will be "
"raised if they occur", DeprecationWarning)
# create progress bar for verbose output
if bool(verbose):
if not isinstance(verbose, bool):
progress_kw['desc'] = str(verbose)
if isinstance(inputs, (list, tuple)):
progress_kw.setdefault('total', len(inputs))
pbar = progress_bar(**progress_kw)
else:
pbar = None
# -------------------------------------------
def _inner(x):
"""Run function capturing errors
"""
try:
return func(x)
except Exception as exc: # pylint: disable=broad-except
if nproc == 1:
raise
return exc
finally:
if pbar and nproc == 1:
pbar.update()
# -------------------------------------------
# shortcut single process
if nproc == 1:
return list(map(_inner, inputs))
# -------------------------------------------
# create input and output queues
q_in = Queue()
q_out = Queue()
# create child processes and start
proclist = [Process(target=process_in_out_queues,
args=(_inner, q_in, q_out)) for _ in range(nproc)]
for proc in proclist:
proc.daemon = True
proc.start()
# populate queue (no need to block in serial put())
sent = [q_in.put(x, block=False) for x in enumerate(inputs)]
for _ in range(nproc): # add sentinel for each process
q_in.put((None, None))
# get results
res = []
for _ in range(len(sent)):
x = q_out.get()
if pbar:
pbar.update()
res.append(x)
# close processes and unwrap results
for proc in proclist:
proc.join()
if pbar:
pbar.close()
# unwrap results in order
results = [out for _, out in sorted(res, key=itemgetter(0))]
# raise exceptions here
for res in results:
if isinstance(res, Exception):
raise res
return results | [
"def",
"multiprocess_with_queues",
"(",
"nproc",
",",
"func",
",",
"inputs",
",",
"verbose",
"=",
"False",
",",
"*",
"*",
"progress_kw",
")",
":",
"if",
"nproc",
"!=",
"1",
"and",
"os",
".",
"name",
"==",
"'nt'",
":",
"warnings",
".",
"warn",
"(",
"\... | Map a function over a list of inputs using multiprocess
This essentially duplicates `multiprocess.map` but allows for
arbitrary functions (that aren't necessarily importable)
Parameters
----------
nproc : `int`
number of processes to use, if ``1`` is given, the current process
is used, and no child processes are forked
func : `callable`
the function to call in each iteration, should take a single
argument that is the next element from ``inputs``
inputs : `iterable`
iterable (e.g. `list`) of inputs, each element of which is
passed to ``func`` in one of the child processes
verbose : `bool`, `str`, optional
if `True`, print progress to the console as a bar, pass a
`str` to customise the heading for the progress bar, default: `False`,
(default heading ``'Processing:'`` if ``verbose=True`)
Returns
-------
outputs : `list`
the `list` of results from calling ``func(x)`` for each element
of ``inputs`` | [
"Map",
"a",
"function",
"over",
"a",
"list",
"of",
"inputs",
"using",
"multiprocess"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/mp.py#L58-L176 | train | 211,278 |
gwpy/gwpy | gwpy/types/array.py | Array.epoch | def epoch(self):
"""GPS epoch associated with these data
:type: `~astropy.time.Time`
"""
try:
if self._epoch is None:
return None
return Time(*modf(self._epoch)[::-1], format='gps', scale='utc')
except AttributeError:
self._epoch = None
return self._epoch | python | def epoch(self):
"""GPS epoch associated with these data
:type: `~astropy.time.Time`
"""
try:
if self._epoch is None:
return None
return Time(*modf(self._epoch)[::-1], format='gps', scale='utc')
except AttributeError:
self._epoch = None
return self._epoch | [
"def",
"epoch",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_epoch",
"is",
"None",
":",
"return",
"None",
"return",
"Time",
"(",
"*",
"modf",
"(",
"self",
".",
"_epoch",
")",
"[",
":",
":",
"-",
"1",
"]",
",",
"format",
"=",
"'gps'",... | GPS epoch associated with these data
:type: `~astropy.time.Time` | [
"GPS",
"epoch",
"associated",
"with",
"these",
"data"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array.py#L307-L318 | train | 211,279 |
gwpy/gwpy | gwpy/types/array.py | Array.override_unit | def override_unit(self, unit, parse_strict='raise'):
"""Forcefully reset the unit of these data
Use of this method is discouraged in favour of `to()`,
which performs accurate conversions from one unit to another.
The method should really only be used when the original unit of the
array is plain wrong.
Parameters
----------
unit : `~astropy.units.Unit`, `str`
the unit to force onto this array
parse_strict : `str`, optional
how to handle errors in the unit parsing, default is to
raise the underlying exception from `astropy.units`
Raises
------
ValueError
if a `str` cannot be parsed as a valid unit
"""
self._unit = parse_unit(unit, parse_strict=parse_strict) | python | def override_unit(self, unit, parse_strict='raise'):
"""Forcefully reset the unit of these data
Use of this method is discouraged in favour of `to()`,
which performs accurate conversions from one unit to another.
The method should really only be used when the original unit of the
array is plain wrong.
Parameters
----------
unit : `~astropy.units.Unit`, `str`
the unit to force onto this array
parse_strict : `str`, optional
how to handle errors in the unit parsing, default is to
raise the underlying exception from `astropy.units`
Raises
------
ValueError
if a `str` cannot be parsed as a valid unit
"""
self._unit = parse_unit(unit, parse_strict=parse_strict) | [
"def",
"override_unit",
"(",
"self",
",",
"unit",
",",
"parse_strict",
"=",
"'raise'",
")",
":",
"self",
".",
"_unit",
"=",
"parse_unit",
"(",
"unit",
",",
"parse_strict",
"=",
"parse_strict",
")"
] | Forcefully reset the unit of these data
Use of this method is discouraged in favour of `to()`,
which performs accurate conversions from one unit to another.
The method should really only be used when the original unit of the
array is plain wrong.
Parameters
----------
unit : `~astropy.units.Unit`, `str`
the unit to force onto this array
parse_strict : `str`, optional
how to handle errors in the unit parsing, default is to
raise the underlying exception from `astropy.units`
Raises
------
ValueError
if a `str` cannot be parsed as a valid unit | [
"Forcefully",
"reset",
"the",
"unit",
"of",
"these",
"data"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array.py#L418-L439 | train | 211,280 |
gwpy/gwpy | gwpy/types/array.py | Array.flatten | def flatten(self, order='C'):
"""Return a copy of the array collapsed into one dimension.
Any index information is removed as part of the flattening,
and the result is returned as a `~astropy.units.Quantity` array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : `~astropy.units.Quantity`
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = Array([[1,2], [3,4]], unit='m', name='Test')
>>> a.flatten()
<Quantity [1., 2., 3., 4.] m>
"""
return super(Array, self).flatten(order=order).view(Quantity) | python | def flatten(self, order='C'):
"""Return a copy of the array collapsed into one dimension.
Any index information is removed as part of the flattening,
and the result is returned as a `~astropy.units.Quantity` array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : `~astropy.units.Quantity`
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = Array([[1,2], [3,4]], unit='m', name='Test')
>>> a.flatten()
<Quantity [1., 2., 3., 4.] m>
"""
return super(Array, self).flatten(order=order).view(Quantity) | [
"def",
"flatten",
"(",
"self",
",",
"order",
"=",
"'C'",
")",
":",
"return",
"super",
"(",
"Array",
",",
"self",
")",
".",
"flatten",
"(",
"order",
"=",
"order",
")",
".",
"view",
"(",
"Quantity",
")"
] | Return a copy of the array collapsed into one dimension.
Any index information is removed as part of the flattening,
and the result is returned as a `~astropy.units.Quantity` array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : `~astropy.units.Quantity`
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = Array([[1,2], [3,4]], unit='m', name='Test')
>>> a.flatten()
<Quantity [1., 2., 3., 4.] m> | [
"Return",
"a",
"copy",
"of",
"the",
"array",
"collapsed",
"into",
"one",
"dimension",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array.py#L441-L474 | train | 211,281 |
gwpy/gwpy | gwpy/spectrogram/coherence.py | from_timeseries | def from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, nproc=1, **kwargs):
"""Calculate the coherence `Spectrogram` between two `TimeSeries`.
Parameters
----------
timeseries : :class:`~gwpy.timeseries.TimeSeries`
input time-series to process.
stride : `float`
number of seconds in single PSD (column of spectrogram).
fftlength : `float`
number of seconds in single FFT.
overlap : `int`, optiona, default: fftlength
number of seconds of overlap between FFTs, defaults to no overlap
window : `timeseries.window.Window`, optional, default: `None`
window function to apply to timeseries prior to FFT.
nproc : `int`, default: ``1``
maximum number of independent frame reading processes, default
is set to single-process file reading.
Returns
-------
spectrogram : :class:`~gwpy.spectrogram.Spectrogram`
time-frequency power spectrogram as generated from the
input time-series.
"""
# format FFT parameters
if fftlength is None:
fftlength = stride / 2.
# get size of spectrogram
nsteps = int(ts1.size // (stride * ts1.sample_rate.value))
nproc = min(nsteps, nproc)
# single-process return
if nsteps == 0 or nproc == 1:
return _from_timeseries(ts1, ts2, stride, fftlength=fftlength,
overlap=overlap, window=window, **kwargs)
# wrap spectrogram generator
def _specgram(queue_, tsa, tsb):
try:
queue_.put(_from_timeseries(tsa, tsb, stride, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs))
except Exception as exc: # pylint: disable=broad-except
queue_.put(exc)
# otherwise build process list
stepperproc = int(ceil(nsteps / nproc))
nsamp = [stepperproc * ts.sample_rate.value * stride for ts in (ts1, ts2)]
queue = ProcessQueue(nproc)
processlist = []
for i in range(nproc):
process = Process(target=_specgram,
args=(queue, ts1[i * nsamp[0]:(i + 1) * nsamp[0]],
ts2[i * nsamp[1]:(i + 1) * nsamp[1]]))
process.daemon = True
processlist.append(process)
process.start()
if ((i + 1) * nsamp[0]) >= ts1.size:
break
# get data
data = []
for process in processlist:
result = queue.get()
if isinstance(result, Exception):
raise result
else:
data.append(result)
# and block
for process in processlist:
process.join()
# format and return
out = SpectrogramList(*data)
out.sort(key=lambda spec: spec.epoch.gps)
return out.join() | python | def from_timeseries(ts1, ts2, stride, fftlength=None, overlap=None,
window=None, nproc=1, **kwargs):
"""Calculate the coherence `Spectrogram` between two `TimeSeries`.
Parameters
----------
timeseries : :class:`~gwpy.timeseries.TimeSeries`
input time-series to process.
stride : `float`
number of seconds in single PSD (column of spectrogram).
fftlength : `float`
number of seconds in single FFT.
overlap : `int`, optiona, default: fftlength
number of seconds of overlap between FFTs, defaults to no overlap
window : `timeseries.window.Window`, optional, default: `None`
window function to apply to timeseries prior to FFT.
nproc : `int`, default: ``1``
maximum number of independent frame reading processes, default
is set to single-process file reading.
Returns
-------
spectrogram : :class:`~gwpy.spectrogram.Spectrogram`
time-frequency power spectrogram as generated from the
input time-series.
"""
# format FFT parameters
if fftlength is None:
fftlength = stride / 2.
# get size of spectrogram
nsteps = int(ts1.size // (stride * ts1.sample_rate.value))
nproc = min(nsteps, nproc)
# single-process return
if nsteps == 0 or nproc == 1:
return _from_timeseries(ts1, ts2, stride, fftlength=fftlength,
overlap=overlap, window=window, **kwargs)
# wrap spectrogram generator
def _specgram(queue_, tsa, tsb):
try:
queue_.put(_from_timeseries(tsa, tsb, stride, fftlength=fftlength,
overlap=overlap, window=window,
**kwargs))
except Exception as exc: # pylint: disable=broad-except
queue_.put(exc)
# otherwise build process list
stepperproc = int(ceil(nsteps / nproc))
nsamp = [stepperproc * ts.sample_rate.value * stride for ts in (ts1, ts2)]
queue = ProcessQueue(nproc)
processlist = []
for i in range(nproc):
process = Process(target=_specgram,
args=(queue, ts1[i * nsamp[0]:(i + 1) * nsamp[0]],
ts2[i * nsamp[1]:(i + 1) * nsamp[1]]))
process.daemon = True
processlist.append(process)
process.start()
if ((i + 1) * nsamp[0]) >= ts1.size:
break
# get data
data = []
for process in processlist:
result = queue.get()
if isinstance(result, Exception):
raise result
else:
data.append(result)
# and block
for process in processlist:
process.join()
# format and return
out = SpectrogramList(*data)
out.sort(key=lambda spec: spec.epoch.gps)
return out.join() | [
"def",
"from_timeseries",
"(",
"ts1",
",",
"ts2",
",",
"stride",
",",
"fftlength",
"=",
"None",
",",
"overlap",
"=",
"None",
",",
"window",
"=",
"None",
",",
"nproc",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"# format FFT parameters",
"if",
"fftlen... | Calculate the coherence `Spectrogram` between two `TimeSeries`.
Parameters
----------
timeseries : :class:`~gwpy.timeseries.TimeSeries`
input time-series to process.
stride : `float`
number of seconds in single PSD (column of spectrogram).
fftlength : `float`
number of seconds in single FFT.
overlap : `int`, optiona, default: fftlength
number of seconds of overlap between FFTs, defaults to no overlap
window : `timeseries.window.Window`, optional, default: `None`
window function to apply to timeseries prior to FFT.
nproc : `int`, default: ``1``
maximum number of independent frame reading processes, default
is set to single-process file reading.
Returns
-------
spectrogram : :class:`~gwpy.spectrogram.Spectrogram`
time-frequency power spectrogram as generated from the
input time-series. | [
"Calculate",
"the",
"coherence",
"Spectrogram",
"between",
"two",
"TimeSeries",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/coherence.py#L89-L169 | train | 211,282 |
gwpy/gwpy | gwpy/detector/io/clf.py | read_channel_list_file | def read_channel_list_file(*source):
"""Read a `~gwpy.detector.ChannelList` from a Channel List File
"""
# read file(s)
config = configparser.ConfigParser(dict_type=OrderedDict)
source = file_list(source)
success_ = config.read(*source)
if len(success_) != len(source):
raise IOError("Failed to read one or more CLF files")
# create channel list
out = ChannelList()
out.source = source
append = out.append
# loop over all groups and channels
for group in config.sections():
params = OrderedDict(config.items(group))
channels = params.pop('channels').strip('\n').split('\n')
if 'flow' in params or 'fhigh' in params:
low = params.pop('flow', 0)
high = params.pop('fhigh', inf)
if isinstance(high, string_types) and high.lower() == 'nyquist':
high = inf
frange = float(low), float(high)
else:
frange = None
for channel in channels:
try:
match = CHANNEL_DEFINITION.match(channel).groupdict()
except AttributeError as exc:
exc.args = ('Cannot parse %r as channel list entry' % channel,)
raise
# remove Nones from match
match = dict((k, v) for k, v in match.items() if v is not None)
match.setdefault('safe', 'safe')
match.setdefault('fidelity', 'clean')
# create channel and copy group params
safe = match.get('safe', 'safe').lower() != 'unsafe'
channel = Channel(match.pop('name'), frequency_range=frange,
safe=safe, sample_rate=match.pop('sample_rate'))
channel.params = params.copy()
channel.params.update(match)
channel.group = group
# extract those params for which the Channel has an attribute
for key in ['frametype']:
setattr(channel, key, channel.params.pop(key, None))
append(channel)
return out | python | def read_channel_list_file(*source):
"""Read a `~gwpy.detector.ChannelList` from a Channel List File
"""
# read file(s)
config = configparser.ConfigParser(dict_type=OrderedDict)
source = file_list(source)
success_ = config.read(*source)
if len(success_) != len(source):
raise IOError("Failed to read one or more CLF files")
# create channel list
out = ChannelList()
out.source = source
append = out.append
# loop over all groups and channels
for group in config.sections():
params = OrderedDict(config.items(group))
channels = params.pop('channels').strip('\n').split('\n')
if 'flow' in params or 'fhigh' in params:
low = params.pop('flow', 0)
high = params.pop('fhigh', inf)
if isinstance(high, string_types) and high.lower() == 'nyquist':
high = inf
frange = float(low), float(high)
else:
frange = None
for channel in channels:
try:
match = CHANNEL_DEFINITION.match(channel).groupdict()
except AttributeError as exc:
exc.args = ('Cannot parse %r as channel list entry' % channel,)
raise
# remove Nones from match
match = dict((k, v) for k, v in match.items() if v is not None)
match.setdefault('safe', 'safe')
match.setdefault('fidelity', 'clean')
# create channel and copy group params
safe = match.get('safe', 'safe').lower() != 'unsafe'
channel = Channel(match.pop('name'), frequency_range=frange,
safe=safe, sample_rate=match.pop('sample_rate'))
channel.params = params.copy()
channel.params.update(match)
channel.group = group
# extract those params for which the Channel has an attribute
for key in ['frametype']:
setattr(channel, key, channel.params.pop(key, None))
append(channel)
return out | [
"def",
"read_channel_list_file",
"(",
"*",
"source",
")",
":",
"# read file(s)",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
"dict_type",
"=",
"OrderedDict",
")",
"source",
"=",
"file_list",
"(",
"source",
")",
"success_",
"=",
"config",
".",
"rea... | Read a `~gwpy.detector.ChannelList` from a Channel List File | [
"Read",
"a",
"~gwpy",
".",
"detector",
".",
"ChannelList",
"from",
"a",
"Channel",
"List",
"File"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/clf.py#L90-L136 | train | 211,283 |
gwpy/gwpy | gwpy/detector/io/clf.py | write_channel_list_file | def write_channel_list_file(channels, fobj):
"""Write a `~gwpy.detector.ChannelList` to a INI-format channel list file
"""
if not isinstance(fobj, FILE_LIKE):
with open(fobj, "w") as fobj:
return write_channel_list_file(channels, fobj)
out = configparser.ConfigParser(dict_type=OrderedDict)
for channel in channels:
group = channel.group
if not out.has_section(group):
out.add_section(group)
for param, value in channel.params.items():
out.set(group, param, value)
if channel.sample_rate is not None:
entry = '%s %s' % (str(channel),
str(channel.sample_rate.to('Hz').value))
else:
entry = str(channel)
entry += ' %s' % channel.params.get('safe', 'safe')
entry += ' %s' % channel.params.get('fidelity', 'clean')
try:
clist = out.get(group, 'channels')
except configparser.NoOptionError:
out.set(group, 'channels', '\n%s' % entry)
else:
out.set(group, 'channels', clist + '\n%s' % entry)
out.write(fobj) | python | def write_channel_list_file(channels, fobj):
"""Write a `~gwpy.detector.ChannelList` to a INI-format channel list file
"""
if not isinstance(fobj, FILE_LIKE):
with open(fobj, "w") as fobj:
return write_channel_list_file(channels, fobj)
out = configparser.ConfigParser(dict_type=OrderedDict)
for channel in channels:
group = channel.group
if not out.has_section(group):
out.add_section(group)
for param, value in channel.params.items():
out.set(group, param, value)
if channel.sample_rate is not None:
entry = '%s %s' % (str(channel),
str(channel.sample_rate.to('Hz').value))
else:
entry = str(channel)
entry += ' %s' % channel.params.get('safe', 'safe')
entry += ' %s' % channel.params.get('fidelity', 'clean')
try:
clist = out.get(group, 'channels')
except configparser.NoOptionError:
out.set(group, 'channels', '\n%s' % entry)
else:
out.set(group, 'channels', clist + '\n%s' % entry)
out.write(fobj) | [
"def",
"write_channel_list_file",
"(",
"channels",
",",
"fobj",
")",
":",
"if",
"not",
"isinstance",
"(",
"fobj",
",",
"FILE_LIKE",
")",
":",
"with",
"open",
"(",
"fobj",
",",
"\"w\"",
")",
"as",
"fobj",
":",
"return",
"write_channel_list_file",
"(",
"chan... | Write a `~gwpy.detector.ChannelList` to a INI-format channel list file | [
"Write",
"a",
"~gwpy",
".",
"detector",
".",
"ChannelList",
"to",
"a",
"INI",
"-",
"format",
"channel",
"list",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/clf.py#L139-L167 | train | 211,284 |
gwpy/gwpy | gwpy/signal/spectral/_registry.py | register_method | def register_method(func, name=None, deprecated=False):
"""Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user.
"""
# warn about deprecated functions
if deprecated:
func = deprecated_function(
func,
"the {0!r} PSD methods is deprecated, and will be removed "
"in a future release, please consider using {1!r} instead".format(
name, name.split('-', 1)[1],
),
)
if name is None:
name = func.__name__
name = _format_name(name)
METHODS[name] = func
return name | python | def register_method(func, name=None, deprecated=False):
"""Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user.
"""
# warn about deprecated functions
if deprecated:
func = deprecated_function(
func,
"the {0!r} PSD methods is deprecated, and will be removed "
"in a future release, please consider using {1!r} instead".format(
name, name.split('-', 1)[1],
),
)
if name is None:
name = func.__name__
name = _format_name(name)
METHODS[name] = func
return name | [
"def",
"register_method",
"(",
"func",
",",
"name",
"=",
"None",
",",
"deprecated",
"=",
"False",
")",
":",
"# warn about deprecated functions",
"if",
"deprecated",
":",
"func",
"=",
"deprecated_function",
"(",
"func",
",",
"\"the {0!r} PSD methods is deprecated, and ... | Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user. | [
"Register",
"a",
"method",
"of",
"calculating",
"an",
"average",
"spectrogram",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_registry.py#L36-L70 | train | 211,285 |
gwpy/gwpy | gwpy/signal/spectral/_registry.py | get_method | def get_method(name):
"""Return the PSD method registered with the given name.
"""
# find method
name = _format_name(name)
try:
return METHODS[name]
except KeyError as exc:
exc.args = ("no PSD method registered with name {0!r}".format(name),)
raise | python | def get_method(name):
"""Return the PSD method registered with the given name.
"""
# find method
name = _format_name(name)
try:
return METHODS[name]
except KeyError as exc:
exc.args = ("no PSD method registered with name {0!r}".format(name),)
raise | [
"def",
"get_method",
"(",
"name",
")",
":",
"# find method",
"name",
"=",
"_format_name",
"(",
"name",
")",
"try",
":",
"return",
"METHODS",
"[",
"name",
"]",
"except",
"KeyError",
"as",
"exc",
":",
"exc",
".",
"args",
"=",
"(",
"\"no PSD method registered... | Return the PSD method registered with the given name. | [
"Return",
"the",
"PSD",
"method",
"registered",
"with",
"the",
"given",
"name",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_registry.py#L73-L82 | train | 211,286 |
gwpy/gwpy | gwpy/astro/range.py | inspiral_range_psd | def inspiral_range_psd(psd, snr=8, mass1=1.4, mass2=1.4, horizon=False):
"""Compute the inspiral sensitive distance PSD from a GW strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
rspec : `~gwpy.frequencyseries.FrequencySeries`
the calculated inspiral sensitivity PSD [Mpc^2 / Hz]
"""
# compute chirp mass and symmetric mass ratio
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
mchirp = (mass1 * mass2) ** (3/5.) / mtotal ** (1/5.)
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# calculate integral pre-factor
prefactor = (
(1.77**2 * 5 * constants.c ** (1/3.) *
(mchirp * constants.G / constants.c ** 2) ** (5/3.)) /
(96 * pi ** (4/3.) * snr ** 2)
)
# calculate inspiral range ASD in m^2/Hz
integrand = 1 / psd * psd.frequencies ** (-7/3.) * prefactor
# restrict to ISCO
integrand = integrand[psd.frequencies.value < fisco.value]
# normalize and return
if integrand.f0.value == 0.0:
integrand[0] = 0.0
if horizon:
integrand *= 2.26 ** 2
return integrand.to('Mpc^2 / Hz') | python | def inspiral_range_psd(psd, snr=8, mass1=1.4, mass2=1.4, horizon=False):
"""Compute the inspiral sensitive distance PSD from a GW strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
rspec : `~gwpy.frequencyseries.FrequencySeries`
the calculated inspiral sensitivity PSD [Mpc^2 / Hz]
"""
# compute chirp mass and symmetric mass ratio
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
mchirp = (mass1 * mass2) ** (3/5.) / mtotal ** (1/5.)
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# calculate integral pre-factor
prefactor = (
(1.77**2 * 5 * constants.c ** (1/3.) *
(mchirp * constants.G / constants.c ** 2) ** (5/3.)) /
(96 * pi ** (4/3.) * snr ** 2)
)
# calculate inspiral range ASD in m^2/Hz
integrand = 1 / psd * psd.frequencies ** (-7/3.) * prefactor
# restrict to ISCO
integrand = integrand[psd.frequencies.value < fisco.value]
# normalize and return
if integrand.f0.value == 0.0:
integrand[0] = 0.0
if horizon:
integrand *= 2.26 ** 2
return integrand.to('Mpc^2 / Hz') | [
"def",
"inspiral_range_psd",
"(",
"psd",
",",
"snr",
"=",
"8",
",",
"mass1",
"=",
"1.4",
",",
"mass2",
"=",
"1.4",
",",
"horizon",
"=",
"False",
")",
":",
"# compute chirp mass and symmetric mass ratio",
"mass1",
"=",
"units",
".",
"Quantity",
"(",
"mass1",
... | Compute the inspiral sensitive distance PSD from a GW strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
rspec : `~gwpy.frequencyseries.FrequencySeries`
the calculated inspiral sensitivity PSD [Mpc^2 / Hz] | [
"Compute",
"the",
"inspiral",
"sensitive",
"distance",
"PSD",
"from",
"a",
"GW",
"strain",
"PSD"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/astro/range.py#L45-L101 | train | 211,287 |
gwpy/gwpy | gwpy/astro/range.py | inspiral_range | def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None,
horizon=False):
"""Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
"""
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# format frequency limits
fmax = units.Quantity(fmax or fisco, 'Hz')
if fmax > fisco:
warnings.warn("Upper frequency bound greater than %s-%s ISCO "
"frequency of %s, using ISCO" % (mass1, mass2, fisco))
fmax = fisco
if fmin is None:
fmin = psd.df # avoid using 0 as lower limit
fmin = units.Quantity(fmin, 'Hz')
# integrate
f = psd.frequencies.to('Hz')
condition = (f >= fmin) & (f < fmax)
integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1,
mass2=mass2, horizon=horizon)
result = units.Quantity(
integrate.trapz(integrand.value, f.value[condition]),
unit=integrand.unit * units.Hertz)
return (result ** (1/2.)).to('Mpc') | python | def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None,
horizon=False):
"""Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
"""
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# format frequency limits
fmax = units.Quantity(fmax or fisco, 'Hz')
if fmax > fisco:
warnings.warn("Upper frequency bound greater than %s-%s ISCO "
"frequency of %s, using ISCO" % (mass1, mass2, fisco))
fmax = fisco
if fmin is None:
fmin = psd.df # avoid using 0 as lower limit
fmin = units.Quantity(fmin, 'Hz')
# integrate
f = psd.frequencies.to('Hz')
condition = (f >= fmin) & (f < fmax)
integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1,
mass2=mass2, horizon=horizon)
result = units.Quantity(
integrate.trapz(integrand.value, f.value[condition]),
unit=integrand.unit * units.Hertz)
return (result ** (1/2.)).to('Mpc') | [
"def",
"inspiral_range",
"(",
"psd",
",",
"snr",
"=",
"8",
",",
"mass1",
"=",
"1.4",
",",
"mass2",
"=",
"1.4",
",",
"fmin",
"=",
"None",
",",
"fmax",
"=",
"None",
",",
"horizon",
"=",
"False",
")",
":",
"mass1",
"=",
"units",
".",
"Quantity",
"("... | Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc | [
"Calculate",
"the",
"inspiral",
"sensitive",
"distance",
"from",
"a",
"GW",
"strain",
"PSD"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/astro/range.py#L104-L188 | train | 211,288 |
gwpy/gwpy | gwpy/astro/range.py | burst_range_spectrum | def burst_range_spectrum(psd, snr=8, energy=1e-2):
"""Calculate the frequency-dependent GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
energy : `float`, optional
the relative energy output of the GW burst,
default: `0.01` (GRB-like burst)
Returns
-------
rangespec : `~gwpy.frequencyseries.FrequencySeries`
the burst range `FrequencySeries` [Mpc (default)]
"""
# calculate frequency dependent range in parsecs
a = (constants.G * energy * constants.M_sun * 0.4 /
(pi**2 * constants.c))**(1/2.)
dspec = psd ** (-1/2.) * a / (snr * psd.frequencies)
# convert to output unit
rspec = dspec.to('Mpc')
# rescale 0 Hertz (which has 0 range always)
if rspec.f0.value == 0.0:
rspec[0] = 0.0
return rspec | python | def burst_range_spectrum(psd, snr=8, energy=1e-2):
"""Calculate the frequency-dependent GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
energy : `float`, optional
the relative energy output of the GW burst,
default: `0.01` (GRB-like burst)
Returns
-------
rangespec : `~gwpy.frequencyseries.FrequencySeries`
the burst range `FrequencySeries` [Mpc (default)]
"""
# calculate frequency dependent range in parsecs
a = (constants.G * energy * constants.M_sun * 0.4 /
(pi**2 * constants.c))**(1/2.)
dspec = psd ** (-1/2.) * a / (snr * psd.frequencies)
# convert to output unit
rspec = dspec.to('Mpc')
# rescale 0 Hertz (which has 0 range always)
if rspec.f0.value == 0.0:
rspec[0] = 0.0
return rspec | [
"def",
"burst_range_spectrum",
"(",
"psd",
",",
"snr",
"=",
"8",
",",
"energy",
"=",
"1e-2",
")",
":",
"# calculate frequency dependent range in parsecs",
"a",
"=",
"(",
"constants",
".",
"G",
"*",
"energy",
"*",
"constants",
".",
"M_sun",
"*",
"0.4",
"/",
... | Calculate the frequency-dependent GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
energy : `float`, optional
the relative energy output of the GW burst,
default: `0.01` (GRB-like burst)
Returns
-------
rangespec : `~gwpy.frequencyseries.FrequencySeries`
the burst range `FrequencySeries` [Mpc (default)] | [
"Calculate",
"the",
"frequency",
"-",
"dependent",
"GW",
"burst",
"range",
"from",
"a",
"strain",
"PSD"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/astro/range.py#L192-L225 | train | 211,289 |
gwpy/gwpy | gwpy/astro/range.py | burst_range | def burst_range(psd, snr=8, energy=1e-2, fmin=100, fmax=500):
"""Calculate the integrated GRB-like GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: ``8``
energy : `float`, optional
the relative energy output of the GW burst, defaults to ``1e-2``
for a GRB-like burst
fmin : `float`, optional
the lower frequency cutoff of the burst range integral,
default: ``100 Hz``
fmax : `float`, optional
the upper frequency cutoff of the burst range integral,
default: ``500 Hz``
Returns
-------
range : `~astropy.units.Quantity`
the GRB-like-burst sensitive range [Mpc (default)]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`burst_range`:
>>> from gwpy.astro import burst_range
>>> r = burst_range(hoff, fmin=30)
>>> print(r)
42.5055584195 Mpc
"""
freqs = psd.frequencies.value
# restrict integral
if not fmin:
fmin = psd.f0
if not fmax:
fmax = psd.span[1]
condition = (freqs >= fmin) & (freqs < fmax)
# calculate integrand and integrate
integrand = burst_range_spectrum(
psd[condition], snr=snr, energy=energy) ** 3
result = integrate.trapz(integrand.value, freqs[condition])
# normalize and return
r = units.Quantity(result / (fmax - fmin), unit=integrand.unit) ** (1/3.)
return r.to('Mpc') | python | def burst_range(psd, snr=8, energy=1e-2, fmin=100, fmax=500):
"""Calculate the integrated GRB-like GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: ``8``
energy : `float`, optional
the relative energy output of the GW burst, defaults to ``1e-2``
for a GRB-like burst
fmin : `float`, optional
the lower frequency cutoff of the burst range integral,
default: ``100 Hz``
fmax : `float`, optional
the upper frequency cutoff of the burst range integral,
default: ``500 Hz``
Returns
-------
range : `~astropy.units.Quantity`
the GRB-like-burst sensitive range [Mpc (default)]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`burst_range`:
>>> from gwpy.astro import burst_range
>>> r = burst_range(hoff, fmin=30)
>>> print(r)
42.5055584195 Mpc
"""
freqs = psd.frequencies.value
# restrict integral
if not fmin:
fmin = psd.f0
if not fmax:
fmax = psd.span[1]
condition = (freqs >= fmin) & (freqs < fmax)
# calculate integrand and integrate
integrand = burst_range_spectrum(
psd[condition], snr=snr, energy=energy) ** 3
result = integrate.trapz(integrand.value, freqs[condition])
# normalize and return
r = units.Quantity(result / (fmax - fmin), unit=integrand.unit) ** (1/3.)
return r.to('Mpc') | [
"def",
"burst_range",
"(",
"psd",
",",
"snr",
"=",
"8",
",",
"energy",
"=",
"1e-2",
",",
"fmin",
"=",
"100",
",",
"fmax",
"=",
"500",
")",
":",
"freqs",
"=",
"psd",
".",
"frequencies",
".",
"value",
"# restrict integral",
"if",
"not",
"fmin",
":",
... | Calculate the integrated GRB-like GW burst range from a strain PSD
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: ``8``
energy : `float`, optional
the relative energy output of the GW burst, defaults to ``1e-2``
for a GRB-like burst
fmin : `float`, optional
the lower frequency cutoff of the burst range integral,
default: ``100 Hz``
fmax : `float`, optional
the upper frequency cutoff of the burst range integral,
default: ``500 Hz``
Returns
-------
range : `~astropy.units.Quantity`
the GRB-like-burst sensitive range [Mpc (default)]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`burst_range`:
>>> from gwpy.astro import burst_range
>>> r = burst_range(hoff, fmin=30)
>>> print(r)
42.5055584195 Mpc | [
"Calculate",
"the",
"integrated",
"GRB",
"-",
"like",
"GW",
"burst",
"range",
"from",
"a",
"strain",
"PSD"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/astro/range.py#L228-L285 | train | 211,290 |
gwpy/gwpy | gwpy/plot/colors.py | format_norm | def format_norm(kwargs, current=None):
"""Format a `~matplotlib.colors.Normalize` from a set of kwargs
Returns
-------
norm, kwargs
the formatted `Normalize` instance, and the remaining keywords
"""
norm = kwargs.pop('norm', current) or 'linear'
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
clim = kwargs.pop('clim', (vmin, vmax)) or (None, None)
clip = kwargs.pop('clip', None)
if norm == 'linear':
norm = colors.Normalize()
elif norm == 'log':
norm = colors.LogNorm()
elif not isinstance(norm, colors.Normalize):
raise ValueError("unrecognised value for norm {!r}".format(norm))
for attr, value in (('vmin', clim[0]), ('vmax', clim[1]), ('clip', clip)):
if value is not None:
setattr(norm, attr, value)
return norm, kwargs | python | def format_norm(kwargs, current=None):
"""Format a `~matplotlib.colors.Normalize` from a set of kwargs
Returns
-------
norm, kwargs
the formatted `Normalize` instance, and the remaining keywords
"""
norm = kwargs.pop('norm', current) or 'linear'
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
clim = kwargs.pop('clim', (vmin, vmax)) or (None, None)
clip = kwargs.pop('clip', None)
if norm == 'linear':
norm = colors.Normalize()
elif norm == 'log':
norm = colors.LogNorm()
elif not isinstance(norm, colors.Normalize):
raise ValueError("unrecognised value for norm {!r}".format(norm))
for attr, value in (('vmin', clim[0]), ('vmax', clim[1]), ('clip', clip)):
if value is not None:
setattr(norm, attr, value)
return norm, kwargs | [
"def",
"format_norm",
"(",
"kwargs",
",",
"current",
"=",
"None",
")",
":",
"norm",
"=",
"kwargs",
".",
"pop",
"(",
"'norm'",
",",
"current",
")",
"or",
"'linear'",
"vmin",
"=",
"kwargs",
".",
"pop",
"(",
"'vmin'",
",",
"None",
")",
"vmax",
"=",
"k... | Format a `~matplotlib.colors.Normalize` from a set of kwargs
Returns
-------
norm, kwargs
the formatted `Normalize` instance, and the remaining keywords | [
"Format",
"a",
"~matplotlib",
".",
"colors",
".",
"Normalize",
"from",
"a",
"set",
"of",
"kwargs"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/colors.py#L99-L124 | train | 211,291 |
gwpy/gwpy | gwpy/io/gwf.py | identify_gwf | def identify_gwf(origin, filepath, fileobj, *args, **kwargs):
"""Identify a filename or file object as GWF
This function is overloaded in that it will also identify a cache file
as 'gwf' if the first entry in the cache contains a GWF file extension
"""
# pylint: disable=unused-argument
# try and read file descriptor
if fileobj is not None:
loc = fileobj.tell()
fileobj.seek(0)
try:
if fileobj.read(4) == GWF_SIGNATURE:
return True
finally:
fileobj.seek(loc)
if filepath is not None:
if filepath.endswith('.gwf'):
return True
if filepath.endswith(('.lcf', '.cache')):
try:
cache = read_cache(filepath)
except IOError:
return False
else:
if cache[0].path.endswith('.gwf'):
return True | python | def identify_gwf(origin, filepath, fileobj, *args, **kwargs):
"""Identify a filename or file object as GWF
This function is overloaded in that it will also identify a cache file
as 'gwf' if the first entry in the cache contains a GWF file extension
"""
# pylint: disable=unused-argument
# try and read file descriptor
if fileobj is not None:
loc = fileobj.tell()
fileobj.seek(0)
try:
if fileobj.read(4) == GWF_SIGNATURE:
return True
finally:
fileobj.seek(loc)
if filepath is not None:
if filepath.endswith('.gwf'):
return True
if filepath.endswith(('.lcf', '.cache')):
try:
cache = read_cache(filepath)
except IOError:
return False
else:
if cache[0].path.endswith('.gwf'):
return True | [
"def",
"identify_gwf",
"(",
"origin",
",",
"filepath",
",",
"fileobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"# try and read file descriptor",
"if",
"fileobj",
"is",
"not",
"None",
":",
"loc",
"=",
"fileobj",
... | Identify a filename or file object as GWF
This function is overloaded in that it will also identify a cache file
as 'gwf' if the first entry in the cache contains a GWF file extension | [
"Identify",
"a",
"filename",
"or",
"file",
"object",
"as",
"GWF"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L42-L69 | train | 211,292 |
gwpy/gwpy | gwpy/io/gwf.py | open_gwf | def open_gwf(filename, mode='r'):
"""Open a filename for reading or writing GWF format data
Parameters
----------
filename : `str`
the path to read from, or write to
mode : `str`, optional
either ``'r'`` (read) or ``'w'`` (write)
Returns
-------
`LDAStools.frameCPP.IFrameFStream`
the input frame stream (if `mode='r'`), or
`LDAStools.frameCPP.IFrameFStream`
the output frame stream (if `mode='w'`)
"""
if mode not in ('r', 'w'):
raise ValueError("mode must be either 'r' or 'w'")
from LDAStools import frameCPP
filename = urlparse(filename).path # strip file://localhost or similar
if mode == 'r':
return frameCPP.IFrameFStream(str(filename))
return frameCPP.OFrameFStream(str(filename)) | python | def open_gwf(filename, mode='r'):
"""Open a filename for reading or writing GWF format data
Parameters
----------
filename : `str`
the path to read from, or write to
mode : `str`, optional
either ``'r'`` (read) or ``'w'`` (write)
Returns
-------
`LDAStools.frameCPP.IFrameFStream`
the input frame stream (if `mode='r'`), or
`LDAStools.frameCPP.IFrameFStream`
the output frame stream (if `mode='w'`)
"""
if mode not in ('r', 'w'):
raise ValueError("mode must be either 'r' or 'w'")
from LDAStools import frameCPP
filename = urlparse(filename).path # strip file://localhost or similar
if mode == 'r':
return frameCPP.IFrameFStream(str(filename))
return frameCPP.OFrameFStream(str(filename)) | [
"def",
"open_gwf",
"(",
"filename",
",",
"mode",
"=",
"'r'",
")",
":",
"if",
"mode",
"not",
"in",
"(",
"'r'",
",",
"'w'",
")",
":",
"raise",
"ValueError",
"(",
"\"mode must be either 'r' or 'w'\"",
")",
"from",
"LDAStools",
"import",
"frameCPP",
"filename",
... | Open a filename for reading or writing GWF format data
Parameters
----------
filename : `str`
the path to read from, or write to
mode : `str`, optional
either ``'r'`` (read) or ``'w'`` (write)
Returns
-------
`LDAStools.frameCPP.IFrameFStream`
the input frame stream (if `mode='r'`), or
`LDAStools.frameCPP.IFrameFStream`
the output frame stream (if `mode='w'`) | [
"Open",
"a",
"filename",
"for",
"reading",
"or",
"writing",
"GWF",
"format",
"data"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L72-L96 | train | 211,293 |
gwpy/gwpy | gwpy/io/gwf.py | write_frames | def write_frames(filename, frames, compression=257, compression_level=6):
"""Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
"""
from LDAStools import frameCPP
# open stream
stream = open_gwf(filename, 'w')
# write frames one-by-one
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level) | python | def write_frames(filename, frames, compression=257, compression_level=6):
"""Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
"""
from LDAStools import frameCPP
# open stream
stream = open_gwf(filename, 'w')
# write frames one-by-one
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level) | [
"def",
"write_frames",
"(",
"filename",
",",
"frames",
",",
"compression",
"=",
"257",
",",
"compression_level",
"=",
"6",
")",
":",
"from",
"LDAStools",
"import",
"frameCPP",
"# open stream",
"stream",
"=",
"open_gwf",
"(",
"filename",
",",
"'w'",
")",
"# w... | Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme | [
"Write",
"a",
"list",
"of",
"frame",
"objects",
"to",
"a",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L99-L127 | train | 211,294 |
gwpy/gwpy | gwpy/io/gwf.py | get_channel_type | def get_channel_type(channel, framefile):
"""Find the channel type in a given GWF file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
name of data channel to find
framefile : `str`
path of GWF file in which to search
Returns
-------
ctype : `str`
the type of the channel ('adc', 'sim', or 'proc')
Raises
------
ValueError
if the channel is not found in the table-of-contents
"""
channel = str(channel)
for name, type_ in _iter_channels(framefile):
if channel == name:
return type_
raise ValueError("%s not found in table-of-contents for %s"
% (channel, framefile)) | python | def get_channel_type(channel, framefile):
"""Find the channel type in a given GWF file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
name of data channel to find
framefile : `str`
path of GWF file in which to search
Returns
-------
ctype : `str`
the type of the channel ('adc', 'sim', or 'proc')
Raises
------
ValueError
if the channel is not found in the table-of-contents
"""
channel = str(channel)
for name, type_ in _iter_channels(framefile):
if channel == name:
return type_
raise ValueError("%s not found in table-of-contents for %s"
% (channel, framefile)) | [
"def",
"get_channel_type",
"(",
"channel",
",",
"framefile",
")",
":",
"channel",
"=",
"str",
"(",
"channel",
")",
"for",
"name",
",",
"type_",
"in",
"_iter_channels",
"(",
"framefile",
")",
":",
"if",
"channel",
"==",
"name",
":",
"return",
"type_",
"ra... | Find the channel type in a given GWF file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
name of data channel to find
framefile : `str`
path of GWF file in which to search
Returns
-------
ctype : `str`
the type of the channel ('adc', 'sim', or 'proc')
Raises
------
ValueError
if the channel is not found in the table-of-contents | [
"Find",
"the",
"channel",
"type",
"in",
"a",
"given",
"GWF",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L205-L233 | train | 211,295 |
gwpy/gwpy | gwpy/io/gwf.py | channel_in_frame | def channel_in_frame(channel, framefile):
"""Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile
"""
channel = str(channel)
for name in iter_channel_names(framefile):
if channel == name:
return True
return False | python | def channel_in_frame(channel, framefile):
"""Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile
"""
channel = str(channel)
for name in iter_channel_names(framefile):
if channel == name:
return True
return False | [
"def",
"channel_in_frame",
"(",
"channel",
",",
"framefile",
")",
":",
"channel",
"=",
"str",
"(",
"channel",
")",
"for",
"name",
"in",
"iter_channel_names",
"(",
"framefile",
")",
":",
"if",
"channel",
"==",
"name",
":",
"return",
"True",
"return",
"False... | Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile | [
"Determine",
"whether",
"a",
"channel",
"is",
"stored",
"in",
"this",
"framefile"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L236-L259 | train | 211,296 |
gwpy/gwpy | gwpy/io/gwf.py | _iter_channels | def _iter_channels(framefile):
"""Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read
"""
from LDAStools import frameCPP
if not isinstance(framefile, frameCPP.IFrameFStream):
framefile = open_gwf(framefile, 'r')
toc = framefile.GetTOC()
for typename in ('Sim', 'Proc', 'ADC'):
typen = typename.lower()
for name in getattr(toc, 'Get{0}'.format(typename))():
yield name, typen | python | def _iter_channels(framefile):
"""Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read
"""
from LDAStools import frameCPP
if not isinstance(framefile, frameCPP.IFrameFStream):
framefile = open_gwf(framefile, 'r')
toc = framefile.GetTOC()
for typename in ('Sim', 'Proc', 'ADC'):
typen = typename.lower()
for name in getattr(toc, 'Get{0}'.format(typename))():
yield name, typen | [
"def",
"_iter_channels",
"(",
"framefile",
")",
":",
"from",
"LDAStools",
"import",
"frameCPP",
"if",
"not",
"isinstance",
"(",
"framefile",
",",
"frameCPP",
".",
"IFrameFStream",
")",
":",
"framefile",
"=",
"open_gwf",
"(",
"framefile",
",",
"'r'",
")",
"to... | Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read | [
"Yields",
"the",
"name",
"and",
"type",
"of",
"each",
"channel",
"in",
"a",
"GWF",
"file",
"TOC"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L305-L322 | train | 211,297 |
gwpy/gwpy | gwpy/io/gwf.py | data_segments | def data_segments(paths, channel, warn=True):
"""Returns the segments containing data for a channel
**Requires:** |LDAStools.frameCPP|_
A frame is considered to contain data if a valid FrData structure
(of any type) exists for the channel in that frame. No checks
are directly made against the underlying FrVect structures.
Parameters
----------
paths : `list` of `str`
a list of GWF file paths
channel : `str`
the name to check in each frame
warn : `bool`, optional
emit a `UserWarning` when a channel is not found in a frame
Returns
-------
segments : `~gwpy.segments.SegmentList`
the list of segments containing data
"""
segments = SegmentList()
for path in paths:
segments.extend(_gwf_channel_segments(path, channel, warn=warn))
return segments.coalesce() | python | def data_segments(paths, channel, warn=True):
"""Returns the segments containing data for a channel
**Requires:** |LDAStools.frameCPP|_
A frame is considered to contain data if a valid FrData structure
(of any type) exists for the channel in that frame. No checks
are directly made against the underlying FrVect structures.
Parameters
----------
paths : `list` of `str`
a list of GWF file paths
channel : `str`
the name to check in each frame
warn : `bool`, optional
emit a `UserWarning` when a channel is not found in a frame
Returns
-------
segments : `~gwpy.segments.SegmentList`
the list of segments containing data
"""
segments = SegmentList()
for path in paths:
segments.extend(_gwf_channel_segments(path, channel, warn=warn))
return segments.coalesce() | [
"def",
"data_segments",
"(",
"paths",
",",
"channel",
",",
"warn",
"=",
"True",
")",
":",
"segments",
"=",
"SegmentList",
"(",
")",
"for",
"path",
"in",
"paths",
":",
"segments",
".",
"extend",
"(",
"_gwf_channel_segments",
"(",
"path",
",",
"channel",
"... | Returns the segments containing data for a channel
**Requires:** |LDAStools.frameCPP|_
A frame is considered to contain data if a valid FrData structure
(of any type) exists for the channel in that frame. No checks
are directly made against the underlying FrVect structures.
Parameters
----------
paths : `list` of `str`
a list of GWF file paths
channel : `str`
the name to check in each frame
warn : `bool`, optional
emit a `UserWarning` when a channel is not found in a frame
Returns
-------
segments : `~gwpy.segments.SegmentList`
the list of segments containing data | [
"Returns",
"the",
"segments",
"containing",
"data",
"for",
"a",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L325-L353 | train | 211,298 |
gwpy/gwpy | gwpy/io/gwf.py | _gwf_channel_segments | def _gwf_channel_segments(path, channel, warn=True):
"""Yields the segments containing data for ``channel`` in this GWF path
"""
stream = open_gwf(path)
# get segments for frames
toc = stream.GetTOC()
secs = toc.GetGTimeS()
nano = toc.GetGTimeN()
dur = toc.GetDt()
readers = [getattr(stream, 'ReadFr{0}Data'.format(type_.title())) for
type_ in ("proc", "sim", "adc")]
# for each segment, try and read the data for this channel
for i, (s, ns, dt) in enumerate(zip(secs, nano, dur)):
for read in readers:
try:
read(i, channel)
except (IndexError, ValueError):
continue
readers = [read] # use this one from now on
epoch = LIGOTimeGPS(s, ns)
yield Segment(epoch, epoch + dt)
break
else: # none of the readers worked for this channel, warn
if warn:
warnings.warn(
"{0!r} not found in frame {1} of {2}".format(
channel, i, path),
) | python | def _gwf_channel_segments(path, channel, warn=True):
"""Yields the segments containing data for ``channel`` in this GWF path
"""
stream = open_gwf(path)
# get segments for frames
toc = stream.GetTOC()
secs = toc.GetGTimeS()
nano = toc.GetGTimeN()
dur = toc.GetDt()
readers = [getattr(stream, 'ReadFr{0}Data'.format(type_.title())) for
type_ in ("proc", "sim", "adc")]
# for each segment, try and read the data for this channel
for i, (s, ns, dt) in enumerate(zip(secs, nano, dur)):
for read in readers:
try:
read(i, channel)
except (IndexError, ValueError):
continue
readers = [read] # use this one from now on
epoch = LIGOTimeGPS(s, ns)
yield Segment(epoch, epoch + dt)
break
else: # none of the readers worked for this channel, warn
if warn:
warnings.warn(
"{0!r} not found in frame {1} of {2}".format(
channel, i, path),
) | [
"def",
"_gwf_channel_segments",
"(",
"path",
",",
"channel",
",",
"warn",
"=",
"True",
")",
":",
"stream",
"=",
"open_gwf",
"(",
"path",
")",
"# get segments for frames",
"toc",
"=",
"stream",
".",
"GetTOC",
"(",
")",
"secs",
"=",
"toc",
".",
"GetGTimeS",
... | Yields the segments containing data for ``channel`` in this GWF path | [
"Yields",
"the",
"segments",
"containing",
"data",
"for",
"channel",
"in",
"this",
"GWF",
"path"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L356-L385 | train | 211,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.