repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
gwpy/gwpy | gwpy/frequencyseries/_fdcommon.py | fdfilter | def fdfilter(data, *filt, **kwargs):
"""Filter a frequency-domain data object
See Also
--------
gwpy.frequencyseries.FrequencySeries.filter
gwpy.spectrogram.Spectrogram.filter
"""
# parse keyword args
inplace = kwargs.pop('inplace', False)
analog = kwargs.pop('analog', False)
fs = kwargs.pop('sample_rate', None)
if kwargs:
raise TypeError("filter() got an unexpected keyword argument '%s'"
% list(kwargs.keys())[0])
# parse filter
if fs is None:
fs = 2 * (data.shape[-1] * data.df).to('Hz').value
form, filt = parse_filter(filt, analog=analog, sample_rate=fs)
lti = signal.lti(*filt)
# generate frequency response
freqs = data.frequencies.value.copy()
fresp = numpy.nan_to_num(abs(lti.freqresp(w=freqs)[1]))
# apply to array
if inplace:
data *= fresp
return data
new = data * fresp
return new | python | def fdfilter(data, *filt, **kwargs):
"""Filter a frequency-domain data object
See Also
--------
gwpy.frequencyseries.FrequencySeries.filter
gwpy.spectrogram.Spectrogram.filter
"""
# parse keyword args
inplace = kwargs.pop('inplace', False)
analog = kwargs.pop('analog', False)
fs = kwargs.pop('sample_rate', None)
if kwargs:
raise TypeError("filter() got an unexpected keyword argument '%s'"
% list(kwargs.keys())[0])
# parse filter
if fs is None:
fs = 2 * (data.shape[-1] * data.df).to('Hz').value
form, filt = parse_filter(filt, analog=analog, sample_rate=fs)
lti = signal.lti(*filt)
# generate frequency response
freqs = data.frequencies.value.copy()
fresp = numpy.nan_to_num(abs(lti.freqresp(w=freqs)[1]))
# apply to array
if inplace:
data *= fresp
return data
new = data * fresp
return new | [
"def",
"fdfilter",
"(",
"data",
",",
"*",
"filt",
",",
"*",
"*",
"kwargs",
")",
":",
"# parse keyword args",
"inplace",
"=",
"kwargs",
".",
"pop",
"(",
"'inplace'",
",",
"False",
")",
"analog",
"=",
"kwargs",
".",
"pop",
"(",
"'analog'",
",",
"False",
... | Filter a frequency-domain data object
See Also
--------
gwpy.frequencyseries.FrequencySeries.filter
gwpy.spectrogram.Spectrogram.filter | [
"Filter",
"a",
"frequency",
"-",
"domain",
"data",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/_fdcommon.py#L33-L64 | train | 211,300 |
gwpy/gwpy | gwpy/time/__main__.py | main | def main(args=None):
"""Parse command-line arguments, tconvert inputs, and print
"""
# define command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-V", "--version", action="version",
version=__version__,
help="show version number and exit")
parser.add_argument("-l", "--local", action="store_true", default=False,
help="print datetimes in local timezone")
parser.add_argument("-f", "--format", type=str, action="store",
default=r"%Y-%m-%d %H:%M:%S.%f %Z",
help="output datetime format (default: %(default)r)")
parser.add_argument("input", help="GPS or datetime string to convert",
nargs="*")
# parse and convert
args = parser.parse_args(args)
input_ = " ".join(args.input)
output = tconvert(input_)
# print (now with timezones!)
if isinstance(output, datetime.datetime):
output = output.replace(tzinfo=tz.tzutc())
if args.local:
output = output.astimezone(tz.tzlocal())
print(output.strftime(args.format))
else:
print(output) | python | def main(args=None):
"""Parse command-line arguments, tconvert inputs, and print
"""
# define command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-V", "--version", action="version",
version=__version__,
help="show version number and exit")
parser.add_argument("-l", "--local", action="store_true", default=False,
help="print datetimes in local timezone")
parser.add_argument("-f", "--format", type=str, action="store",
default=r"%Y-%m-%d %H:%M:%S.%f %Z",
help="output datetime format (default: %(default)r)")
parser.add_argument("input", help="GPS or datetime string to convert",
nargs="*")
# parse and convert
args = parser.parse_args(args)
input_ = " ".join(args.input)
output = tconvert(input_)
# print (now with timezones!)
if isinstance(output, datetime.datetime):
output = output.replace(tzinfo=tz.tzutc())
if args.local:
output = output.astimezone(tz.tzlocal())
print(output.strftime(args.format))
else:
print(output) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"# define command line arguments",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"\"-V\"",
",",
"\"--version\"",
",",
"action",
... | Parse command-line arguments, tconvert inputs, and print | [
"Parse",
"command",
"-",
"line",
"arguments",
"tconvert",
"inputs",
"and",
"print"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/__main__.py#L35-L63 | train | 211,301 |
gwpy/gwpy | gwpy/cli/cliproduct.py | timer | def timer(func):
"""Time a method and print its duration after return
"""
name = func.__name__
@wraps(func)
def timed_func(self, *args, **kwargs): # pylint: disable=missing-docstring
_start = time.time()
out = func(self, *args, **kwargs)
self.log(2, '{0} took {1:.1f} sec'.format(name, time.time() - _start))
return out
return timed_func | python | def timer(func):
"""Time a method and print its duration after return
"""
name = func.__name__
@wraps(func)
def timed_func(self, *args, **kwargs): # pylint: disable=missing-docstring
_start = time.time()
out = func(self, *args, **kwargs)
self.log(2, '{0} took {1:.1f} sec'.format(name, time.time() - _start))
return out
return timed_func | [
"def",
"timer",
"(",
"func",
")",
":",
"name",
"=",
"func",
".",
"__name__",
"@",
"wraps",
"(",
"func",
")",
"def",
"timed_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"_start",
"=",
"t... | Time a method and print its duration after return | [
"Time",
"a",
"method",
"and",
"print",
"its",
"duration",
"after",
"return"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/cliproduct.py#L57-L69 | train | 211,302 |
gwpy/gwpy | gwpy/cli/cliproduct.py | to_float | def to_float(unit):
"""Factory to build a converter from quantity string to float
Examples
--------
>>> conv = to_float('Hz')
>>> conv('4 mHz')
>>> 0.004
"""
def converter(x):
"""Convert the input to a `float` in %s
"""
return Quantity(x, unit).value
converter.__doc__ %= str(unit) # pylint: disable=no-member
return converter | python | def to_float(unit):
"""Factory to build a converter from quantity string to float
Examples
--------
>>> conv = to_float('Hz')
>>> conv('4 mHz')
>>> 0.004
"""
def converter(x):
"""Convert the input to a `float` in %s
"""
return Quantity(x, unit).value
converter.__doc__ %= str(unit) # pylint: disable=no-member
return converter | [
"def",
"to_float",
"(",
"unit",
")",
":",
"def",
"converter",
"(",
"x",
")",
":",
"\"\"\"Convert the input to a `float` in %s\n \"\"\"",
"return",
"Quantity",
"(",
"x",
",",
"unit",
")",
".",
"value",
"converter",
".",
"__doc__",
"%=",
"str",
"(",
"unit... | Factory to build a converter from quantity string to float
Examples
--------
>>> conv = to_float('Hz')
>>> conv('4 mHz')
>>> 0.004 | [
"Factory",
"to",
"build",
"a",
"converter",
"from",
"quantity",
"string",
"to",
"float"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/cliproduct.py#L72-L87 | train | 211,303 |
gwpy/gwpy | gwpy/plot/axes.py | log_norm | def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func | python | def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func | [
"def",
"log_norm",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"norm",
",",
"kwargs",
"=",
"format_norm",
"(",
"kwargs",
")",
"kwargs",
"[",
"'norm'",
"]",
"="... | Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring | [
"Wrap",
"func",
"to",
"handle",
"custom",
"gwpy",
"keywords",
"for",
"a",
"LogNorm",
"colouring"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L51-L59 | train | 211,304 |
gwpy/gwpy | gwpy/plot/axes.py | xlim_as_gps | def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func | python | def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func | [
"def",
"xlim_as_gps",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"self",
",",
"left",
"=",
"None",
",",
"right",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"right",
"is",
"None",
"and",
"numpy",
".",... | Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps` | [
"Wrap",
"func",
"to",
"handle",
"pass",
"limit",
"inputs",
"through",
"gwpy",
".",
"time",
".",
"to_gps"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L62-L79 | train | 211,305 |
gwpy/gwpy | gwpy/plot/axes.py | restore_grid | def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func | python | def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func | [
"def",
"restore_grid",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"grid",
"=",
"(",
"self",
".",
"xaxis",
".",
"_gridOnMinor",
",",
"self",
".",
... | Wrap ``func`` to preserve the Axes current grid settings. | [
"Wrap",
"func",
"to",
"preserve",
"the",
"Axes",
"current",
"grid",
"settings",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L82-L97 | train | 211,306 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.set_epoch | def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch) | python | def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch) | [
"def",
"set_epoch",
"(",
"self",
",",
"epoch",
")",
":",
"scale",
"=",
"self",
".",
"get_xscale",
"(",
")",
"return",
"self",
".",
"set_xscale",
"(",
"scale",
",",
"epoch",
"=",
"epoch",
")"
] | Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine. | [
"Set",
"the",
"epoch",
"for",
"the",
"current",
"GPS",
"scale",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L149-L162 | train | 211,307 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.imshow | def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See Also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if isinstance(array, Array2D):
return self._imshow_array2d(array, *args, **kwargs)
image = super(Axes, self).imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image | python | def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See Also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if isinstance(array, Array2D):
return self._imshow_array2d(array, *args, **kwargs)
image = super(Axes, self).imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image | [
"def",
"imshow",
"(",
"self",
",",
"array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"array",
",",
"Array2D",
")",
":",
"return",
"self",
".",
"_imshow_array2d",
"(",
"array",
",",
"*",
"args",
",",
"*",
"*",
... | Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See Also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering | [
"Display",
"an",
"image",
"i",
".",
"e",
".",
"data",
"on",
"a",
"2D",
"regular",
"raster",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L202-L236 | train | 211,308 |
gwpy/gwpy | gwpy/plot/axes.py | Axes._imshow_array2d | def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs) | python | def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs) | [
"def",
"_imshow_array2d",
"(",
"self",
",",
"array",
",",
"origin",
"=",
"'lower'",
",",
"interpolation",
"=",
"'none'",
",",
"aspect",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"# NOTE: If you change the defaults for this method, please update",
"# th... | Render an `~gwpy.types.Array2D` using `Axes.imshow` | [
"Render",
"an",
"~gwpy",
".",
"types",
".",
"Array2D",
"using",
"Axes",
".",
"imshow"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L238-L254 | train | 211,309 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.pcolormesh | def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See Also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and isinstance(args[0], Array2D):
return self._pcolormesh_array2d(*args, **kwargs)
return super(Axes, self).pcolormesh(*args, **kwargs) | python | def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See Also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and isinstance(args[0], Array2D):
return self._pcolormesh_array2d(*args, **kwargs)
return super(Axes, self).pcolormesh(*args, **kwargs) | [
"def",
"pcolormesh",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"Array2D",
")",
":",
"return",
"self",
".",
"_pcolormesh_array2d"... | Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See Also
--------
matplotlib.axes.Axes.pcolormesh | [
"Create",
"a",
"pseudocolor",
"plot",
"with",
"a",
"non",
"-",
"regular",
"rectangular",
"grid",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L258-L279 | train | 211,310 |
gwpy/gwpy | gwpy/plot/axes.py | Axes._pcolormesh_array2d | def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs) | python | def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs) | [
"def",
"_pcolormesh_array2d",
"(",
"self",
",",
"array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"x",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"array",
".",
"xindex",
".",
"value",
",",
"array",
".",
"xspan",
"[",
"-",
"1",
":",
"... | Render an `~gwpy.types.Array2D` using `Axes.pcolormesh` | [
"Render",
"an",
"~gwpy",
".",
"types",
".",
"Array2D",
"using",
"Axes",
".",
"pcolormesh"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L281-L287 | train | 211,311 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.plot_mmm | def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See Also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out | python | def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See Also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out | [
"def",
"plot_mmm",
"(",
"self",
",",
"data",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"alpha",
"=",
"kwargs",
".",
"pop",
"(",
"'alpha'",
",",
".1",
")",
"# plot mean",
"line",
",",
"=",
"self",
"."... | Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See Also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs`` | [
"Plot",
"a",
"Series",
"as",
"a",
"line",
"with",
"a",
"shaded",
"region",
"around",
"it",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L335-L400 | train | 211,312 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.tile | def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out | python | def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out | [
"def",
"tile",
"(",
"self",
",",
"x",
",",
"y",
",",
"w",
",",
"h",
",",
"color",
"=",
"None",
",",
"anchor",
"=",
"'center'",
",",
"edgecolors",
"=",
"'face'",
",",
"linewidth",
"=",
"0.8",
",",
"*",
"*",
"kwargs",
")",
":",
"# get color and sort"... | Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show() | [
"Plot",
"rectanguler",
"tiles",
"based",
"onto",
"these",
"Axes",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L402-L492 | train | 211,313 |
gwpy/gwpy | gwpy/plot/axes.py | Axes.colorbar | def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs) | python | def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs) | [
"def",
"colorbar",
"(",
"self",
",",
"mappable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"fig",
"=",
"self",
".",
"get_figure",
"(",
")",
"if",
"kwargs",
".",
"get",
"(",
"'use_axesgrid'",
",",
"True",
")",
":",
"kwargs",
".",
"setdefault",
... | Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar | [
"Add",
"a",
"~matplotlib",
".",
"colorbar",
".",
"Colorbar",
"to",
"these",
"Axes"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L515-L556 | train | 211,314 |
gwpy/gwpy | gwpy/time/_tconvert.py | tconvert | def tconvert(gpsordate='now'):
"""Convert GPS times to ISO-format date-times and vice-versa.
Parameters
----------
gpsordate : `float`, `astropy.time.Time`, `datetime.datetime`, ...
input gps or date to convert, many input types are supported
Returns
-------
date : `datetime.datetime` or `LIGOTimeGPS`
converted gps or date
Notes
-----
If the input object is a `float` or `LIGOTimeGPS`, it will get
converted from GPS format into a `datetime.datetime`, otherwise
the input will be converted into `LIGOTimeGPS`.
Examples
--------
Integers and floats are automatically converted from GPS to
`datetime.datetime`:
>>> from gwpy.time import tconvert
>>> tconvert(0)
datetime.datetime(1980, 1, 6, 0, 0)
>>> tconvert(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
while strings are automatically converted to `~gwpy.time.LIGOTimeGPS`:
>>> to_gps('Sep 14 2015 09:50:45.391')
LIGOTimeGPS(1126259462, 391000000)
Additionally, a few special-case words as supported, which all return
`~gwpy.time.LIGOTimeGPS`:
>>> tconvert('now')
>>> tconvert('today')
>>> tconvert('tomorrow')
>>> tconvert('yesterday')
"""
# convert from GPS into datetime
try:
float(gpsordate) # if we can 'float' it, then its probably a GPS time
except (TypeError, ValueError):
return to_gps(gpsordate)
return from_gps(gpsordate) | python | def tconvert(gpsordate='now'):
"""Convert GPS times to ISO-format date-times and vice-versa.
Parameters
----------
gpsordate : `float`, `astropy.time.Time`, `datetime.datetime`, ...
input gps or date to convert, many input types are supported
Returns
-------
date : `datetime.datetime` or `LIGOTimeGPS`
converted gps or date
Notes
-----
If the input object is a `float` or `LIGOTimeGPS`, it will get
converted from GPS format into a `datetime.datetime`, otherwise
the input will be converted into `LIGOTimeGPS`.
Examples
--------
Integers and floats are automatically converted from GPS to
`datetime.datetime`:
>>> from gwpy.time import tconvert
>>> tconvert(0)
datetime.datetime(1980, 1, 6, 0, 0)
>>> tconvert(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
while strings are automatically converted to `~gwpy.time.LIGOTimeGPS`:
>>> to_gps('Sep 14 2015 09:50:45.391')
LIGOTimeGPS(1126259462, 391000000)
Additionally, a few special-case words as supported, which all return
`~gwpy.time.LIGOTimeGPS`:
>>> tconvert('now')
>>> tconvert('today')
>>> tconvert('tomorrow')
>>> tconvert('yesterday')
"""
# convert from GPS into datetime
try:
float(gpsordate) # if we can 'float' it, then its probably a GPS time
except (TypeError, ValueError):
return to_gps(gpsordate)
return from_gps(gpsordate) | [
"def",
"tconvert",
"(",
"gpsordate",
"=",
"'now'",
")",
":",
"# convert from GPS into datetime",
"try",
":",
"float",
"(",
"gpsordate",
")",
"# if we can 'float' it, then its probably a GPS time",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"to_... | Convert GPS times to ISO-format date-times and vice-versa.
Parameters
----------
gpsordate : `float`, `astropy.time.Time`, `datetime.datetime`, ...
input gps or date to convert, many input types are supported
Returns
-------
date : `datetime.datetime` or `LIGOTimeGPS`
converted gps or date
Notes
-----
If the input object is a `float` or `LIGOTimeGPS`, it will get
converted from GPS format into a `datetime.datetime`, otherwise
the input will be converted into `LIGOTimeGPS`.
Examples
--------
Integers and floats are automatically converted from GPS to
`datetime.datetime`:
>>> from gwpy.time import tconvert
>>> tconvert(0)
datetime.datetime(1980, 1, 6, 0, 0)
>>> tconvert(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
while strings are automatically converted to `~gwpy.time.LIGOTimeGPS`:
>>> to_gps('Sep 14 2015 09:50:45.391')
LIGOTimeGPS(1126259462, 391000000)
Additionally, a few special-case words as supported, which all return
`~gwpy.time.LIGOTimeGPS`:
>>> tconvert('now')
>>> tconvert('today')
>>> tconvert('tomorrow')
>>> tconvert('yesterday') | [
"Convert",
"GPS",
"times",
"to",
"ISO",
"-",
"format",
"date",
"-",
"times",
"and",
"vice",
"-",
"versa",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/_tconvert.py#L42-L90 | train | 211,315 |
gwpy/gwpy | gwpy/time/_tconvert.py | from_gps | def from_gps(gps):
"""Convert a GPS time into a `datetime.datetime`.
Parameters
----------
gps : `LIGOTimeGPS`, `int`, `float`
GPS time to convert
Returns
-------
datetime : `datetime.datetime`
ISO-format datetime equivalent of input GPS time
Examples
--------
>>> from_gps(1167264018)
datetime.datetime(2017, 1, 1, 0, 0)
>>> from_gps(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
"""
try:
gps = LIGOTimeGPS(gps)
except (ValueError, TypeError, RuntimeError):
gps = LIGOTimeGPS(float(gps))
sec, nano = gps.gpsSeconds, gps.gpsNanoSeconds
date = Time(sec, format='gps', scale='utc').datetime
return date + datetime.timedelta(microseconds=nano*1e-3) | python | def from_gps(gps):
"""Convert a GPS time into a `datetime.datetime`.
Parameters
----------
gps : `LIGOTimeGPS`, `int`, `float`
GPS time to convert
Returns
-------
datetime : `datetime.datetime`
ISO-format datetime equivalent of input GPS time
Examples
--------
>>> from_gps(1167264018)
datetime.datetime(2017, 1, 1, 0, 0)
>>> from_gps(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000)
"""
try:
gps = LIGOTimeGPS(gps)
except (ValueError, TypeError, RuntimeError):
gps = LIGOTimeGPS(float(gps))
sec, nano = gps.gpsSeconds, gps.gpsNanoSeconds
date = Time(sec, format='gps', scale='utc').datetime
return date + datetime.timedelta(microseconds=nano*1e-3) | [
"def",
"from_gps",
"(",
"gps",
")",
":",
"try",
":",
"gps",
"=",
"LIGOTimeGPS",
"(",
"gps",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"RuntimeError",
")",
":",
"gps",
"=",
"LIGOTimeGPS",
"(",
"float",
"(",
"gps",
")",
")",
"sec",
",",
... | Convert a GPS time into a `datetime.datetime`.
Parameters
----------
gps : `LIGOTimeGPS`, `int`, `float`
GPS time to convert
Returns
-------
datetime : `datetime.datetime`
ISO-format datetime equivalent of input GPS time
Examples
--------
>>> from_gps(1167264018)
datetime.datetime(2017, 1, 1, 0, 0)
>>> from_gps(1126259462.3910)
datetime.datetime(2015, 9, 14, 9, 50, 45, 391000) | [
"Convert",
"a",
"GPS",
"time",
"into",
"a",
"datetime",
".",
"datetime",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/_tconvert.py#L176-L202 | train | 211,316 |
gwpy/gwpy | gwpy/time/_tconvert.py | _str_to_datetime | def _str_to_datetime(datestr):
"""Convert `str` to `datetime.datetime`.
"""
# try known string
try:
return DATE_STRINGS[str(datestr).lower()]()
except KeyError: # any other string
pass
# use maya
try:
import maya
return maya.when(datestr).datetime()
except ImportError:
pass
# use dateutil.parse
with warnings.catch_warnings():
# don't allow lazy passing of time-zones
warnings.simplefilter("error", RuntimeWarning)
try:
return dateparser.parse(datestr)
except RuntimeWarning:
raise ValueError("Cannot parse date string with timezone "
"without maya, please install maya")
except (ValueError, TypeError) as exc: # improve error reporting
exc.args = ("Cannot parse date string {0!r}: {1}".format(
datestr, exc.args[0]),)
raise | python | def _str_to_datetime(datestr):
"""Convert `str` to `datetime.datetime`.
"""
# try known string
try:
return DATE_STRINGS[str(datestr).lower()]()
except KeyError: # any other string
pass
# use maya
try:
import maya
return maya.when(datestr).datetime()
except ImportError:
pass
# use dateutil.parse
with warnings.catch_warnings():
# don't allow lazy passing of time-zones
warnings.simplefilter("error", RuntimeWarning)
try:
return dateparser.parse(datestr)
except RuntimeWarning:
raise ValueError("Cannot parse date string with timezone "
"without maya, please install maya")
except (ValueError, TypeError) as exc: # improve error reporting
exc.args = ("Cannot parse date string {0!r}: {1}".format(
datestr, exc.args[0]),)
raise | [
"def",
"_str_to_datetime",
"(",
"datestr",
")",
":",
"# try known string",
"try",
":",
"return",
"DATE_STRINGS",
"[",
"str",
"(",
"datestr",
")",
".",
"lower",
"(",
")",
"]",
"(",
")",
"except",
"KeyError",
":",
"# any other string",
"pass",
"# use maya",
"t... | Convert `str` to `datetime.datetime`. | [
"Convert",
"str",
"to",
"datetime",
".",
"datetime",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/_tconvert.py#L236-L264 | train | 211,317 |
gwpy/gwpy | gwpy/time/_tconvert.py | _time_to_gps | def _time_to_gps(time):
"""Convert a `Time` into `LIGOTimeGPS`.
This method uses `datetime.datetime` underneath, which restricts
to microsecond precision by design. This should probably be fixed...
Parameters
----------
time : `~astropy.time.Time`
formatted `Time` object to convert
Returns
-------
gps : `LIGOTimeGPS`
Nano-second precision `LIGOTimeGPS` time
"""
time = time.utc
date = time.datetime
micro = date.microsecond if isinstance(date, datetime.datetime) else 0
return LIGOTimeGPS(int(time.gps), int(micro*1e3)) | python | def _time_to_gps(time):
"""Convert a `Time` into `LIGOTimeGPS`.
This method uses `datetime.datetime` underneath, which restricts
to microsecond precision by design. This should probably be fixed...
Parameters
----------
time : `~astropy.time.Time`
formatted `Time` object to convert
Returns
-------
gps : `LIGOTimeGPS`
Nano-second precision `LIGOTimeGPS` time
"""
time = time.utc
date = time.datetime
micro = date.microsecond if isinstance(date, datetime.datetime) else 0
return LIGOTimeGPS(int(time.gps), int(micro*1e3)) | [
"def",
"_time_to_gps",
"(",
"time",
")",
":",
"time",
"=",
"time",
".",
"utc",
"date",
"=",
"time",
".",
"datetime",
"micro",
"=",
"date",
".",
"microsecond",
"if",
"isinstance",
"(",
"date",
",",
"datetime",
".",
"datetime",
")",
"else",
"0",
"return"... | Convert a `Time` into `LIGOTimeGPS`.
This method uses `datetime.datetime` underneath, which restricts
to microsecond precision by design. This should probably be fixed...
Parameters
----------
time : `~astropy.time.Time`
formatted `Time` object to convert
Returns
-------
gps : `LIGOTimeGPS`
Nano-second precision `LIGOTimeGPS` time | [
"Convert",
"a",
"Time",
"into",
"LIGOTimeGPS",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/time/_tconvert.py#L274-L293 | train | 211,318 |
gwpy/gwpy | gwpy/io/hdf5.py | with_read_hdf5 | def with_read_hdf5(func):
"""Decorate an HDF5-reading function to open a filepath if needed
``func`` should be written to presume an `h5py.Group` as the first
positional argument.
"""
@wraps(func)
def decorated_func(fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
if isinstance(fobj, FILE_LIKE):
fobj = fobj.name
with h5py.File(fobj, 'r') as h5f:
return func(h5f, *args, **kwargs)
return func(fobj, *args, **kwargs)
return decorated_func | python | def with_read_hdf5(func):
"""Decorate an HDF5-reading function to open a filepath if needed
``func`` should be written to presume an `h5py.Group` as the first
positional argument.
"""
@wraps(func)
def decorated_func(fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
if isinstance(fobj, FILE_LIKE):
fobj = fobj.name
with h5py.File(fobj, 'r') as h5f:
return func(h5f, *args, **kwargs)
return func(fobj, *args, **kwargs)
return decorated_func | [
"def",
"with_read_hdf5",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated_func",
"(",
"fobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"if",
"not",
"isinstance",
"(",
"fobj",
",",
... | Decorate an HDF5-reading function to open a filepath if needed
``func`` should be written to presume an `h5py.Group` as the first
positional argument. | [
"Decorate",
"an",
"HDF5",
"-",
"reading",
"function",
"to",
"open",
"a",
"filepath",
"if",
"needed"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L46-L62 | train | 211,319 |
gwpy/gwpy | gwpy/io/hdf5.py | find_dataset | def find_dataset(h5o, path=None):
"""Find and return the relevant dataset inside the given H5 object
If ``path=None`` is given, and ``h5o`` contains a single dataset, that
will be returned
Parameters
----------
h5o : `h5py.File`, `h5py.Group`
the HDF5 object in which to search
path : `str`, optional
the path (relative to ``h5o``) of the desired data set
Returns
-------
dset : `h5py.Dataset`
the recovered dataset object
Raises
------
ValueError
if ``path=None`` and the HDF5 object contains multiple datasets
KeyError
if ``path`` is given but is not found within the HDF5 object
"""
# find dataset
if isinstance(h5o, h5py.Dataset):
return h5o
elif path is None and len(h5o) == 1:
path = list(h5o.keys())[0]
elif path is None:
raise ValueError("Please specify the HDF5 path via the "
"``path=`` keyword argument")
return h5o[path] | python | def find_dataset(h5o, path=None):
"""Find and return the relevant dataset inside the given H5 object
If ``path=None`` is given, and ``h5o`` contains a single dataset, that
will be returned
Parameters
----------
h5o : `h5py.File`, `h5py.Group`
the HDF5 object in which to search
path : `str`, optional
the path (relative to ``h5o``) of the desired data set
Returns
-------
dset : `h5py.Dataset`
the recovered dataset object
Raises
------
ValueError
if ``path=None`` and the HDF5 object contains multiple datasets
KeyError
if ``path`` is given but is not found within the HDF5 object
"""
# find dataset
if isinstance(h5o, h5py.Dataset):
return h5o
elif path is None and len(h5o) == 1:
path = list(h5o.keys())[0]
elif path is None:
raise ValueError("Please specify the HDF5 path via the "
"``path=`` keyword argument")
return h5o[path] | [
"def",
"find_dataset",
"(",
"h5o",
",",
"path",
"=",
"None",
")",
":",
"# find dataset",
"if",
"isinstance",
"(",
"h5o",
",",
"h5py",
".",
"Dataset",
")",
":",
"return",
"h5o",
"elif",
"path",
"is",
"None",
"and",
"len",
"(",
"h5o",
")",
"==",
"1",
... | Find and return the relevant dataset inside the given H5 object
If ``path=None`` is given, and ``h5o`` contains a single dataset, that
will be returned
Parameters
----------
h5o : `h5py.File`, `h5py.Group`
the HDF5 object in which to search
path : `str`, optional
the path (relative to ``h5o``) of the desired data set
Returns
-------
dset : `h5py.Dataset`
the recovered dataset object
Raises
------
ValueError
if ``path=None`` and the HDF5 object contains multiple datasets
KeyError
if ``path`` is given but is not found within the HDF5 object | [
"Find",
"and",
"return",
"the",
"relevant",
"dataset",
"inside",
"the",
"given",
"H5",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L65-L99 | train | 211,320 |
gwpy/gwpy | gwpy/io/hdf5.py | with_write_hdf5 | def with_write_hdf5(func):
"""Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w``
"""
@wraps(func)
def decorated_func(obj, fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
append = kwargs.get('append', False)
overwrite = kwargs.get('overwrite', False)
if os.path.exists(fobj) and not (overwrite or append):
raise IOError("File exists: %s" % fobj)
with h5py.File(fobj, 'a' if append else 'w') as h5f:
return func(obj, h5f, *args, **kwargs)
return func(obj, fobj, *args, **kwargs)
return decorated_func | python | def with_write_hdf5(func):
"""Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w``
"""
@wraps(func)
def decorated_func(obj, fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
append = kwargs.get('append', False)
overwrite = kwargs.get('overwrite', False)
if os.path.exists(fobj) and not (overwrite or append):
raise IOError("File exists: %s" % fobj)
with h5py.File(fobj, 'a' if append else 'w') as h5f:
return func(obj, h5f, *args, **kwargs)
return func(obj, fobj, *args, **kwargs)
return decorated_func | [
"def",
"with_write_hdf5",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorated_func",
"(",
"obj",
",",
"fobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"if",
"not",
"isinstance",
"(",
... | Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w`` | [
"Decorate",
"an",
"HDF5",
"-",
"writing",
"function",
"to",
"open",
"a",
"filepath",
"if",
"needed"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L104-L129 | train | 211,321 |
gwpy/gwpy | gwpy/io/hdf5.py | create_dataset | def create_dataset(parent, path, overwrite=False, **kwargs):
"""Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset
"""
# force deletion of existing dataset
if path in parent and overwrite:
del parent[path]
# create new dataset with improved error handling
try:
return parent.create_dataset(path, **kwargs)
except RuntimeError as exc:
if str(exc) == 'Unable to create link (Name already exists)':
exc.args = ('{0}: {1!r}, pass overwrite=True '
'to ignore existing datasets'.format(str(exc), path),)
raise | python | def create_dataset(parent, path, overwrite=False, **kwargs):
"""Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset
"""
# force deletion of existing dataset
if path in parent and overwrite:
del parent[path]
# create new dataset with improved error handling
try:
return parent.create_dataset(path, **kwargs)
except RuntimeError as exc:
if str(exc) == 'Unable to create link (Name already exists)':
exc.args = ('{0}: {1!r}, pass overwrite=True '
'to ignore existing datasets'.format(str(exc), path),)
raise | [
"def",
"create_dataset",
"(",
"parent",
",",
"path",
",",
"overwrite",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# force deletion of existing dataset",
"if",
"path",
"in",
"parent",
"and",
"overwrite",
":",
"del",
"parent",
"[",
"path",
"]",
"# creat... | Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset | [
"Create",
"a",
"new",
"dataset",
"inside",
"the",
"parent",
"HDF5",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L132-L167 | train | 211,322 |
gwpy/gwpy | gwpy/table/io/sql.py | format_db_selection | def format_db_selection(selection, engine=None):
"""Format a column filter selection as a SQL database WHERE string
"""
# parse selection for SQL query
if selection is None:
return ''
selections = []
for col, op_, value in parse_column_filters(selection):
if engine and engine.name == 'postgresql':
col = '"%s"' % col
try:
opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0]
except KeyError:
raise ValueError("Cannot format database 'WHERE' command with "
"selection operator %r" % op_)
selections.append('{0} {1} {2!r}'.format(col, opstr, value))
if selections:
return 'WHERE %s' % ' AND '.join(selections)
return '' | python | def format_db_selection(selection, engine=None):
"""Format a column filter selection as a SQL database WHERE string
"""
# parse selection for SQL query
if selection is None:
return ''
selections = []
for col, op_, value in parse_column_filters(selection):
if engine and engine.name == 'postgresql':
col = '"%s"' % col
try:
opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0]
except KeyError:
raise ValueError("Cannot format database 'WHERE' command with "
"selection operator %r" % op_)
selections.append('{0} {1} {2!r}'.format(col, opstr, value))
if selections:
return 'WHERE %s' % ' AND '.join(selections)
return '' | [
"def",
"format_db_selection",
"(",
"selection",
",",
"engine",
"=",
"None",
")",
":",
"# parse selection for SQL query",
"if",
"selection",
"is",
"None",
":",
"return",
"''",
"selections",
"=",
"[",
"]",
"for",
"col",
",",
"op_",
",",
"value",
"in",
"parse_c... | Format a column filter selection as a SQL database WHERE string | [
"Format",
"a",
"column",
"filter",
"selection",
"as",
"a",
"SQL",
"database",
"WHERE",
"string"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/sql.py#L29-L47 | train | 211,323 |
gwpy/gwpy | gwpy/table/io/sql.py | fetch | def fetch(engine, tablename, columns=None, selection=None, **kwargs):
"""Fetch data from an SQL table into an `EventTable`
Parameters
----------
engine : `sqlalchemy.engine.Engine`
the database engine to use when connecting
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable`
"""
import pandas as pd
# parse columns for SQL query
if columns is None:
columnstr = '*'
else:
columnstr = ', '.join('"%s"' % c for c in columns)
# parse selection for SQL query
selectionstr = format_db_selection(selection, engine=engine)
# build SQL query
qstr = 'SELECT %s FROM %s %s' % (columnstr, tablename, selectionstr)
# perform query
tab = pd.read_sql(qstr, engine, **kwargs)
# Convert unicode columns to string
types = tab.apply(lambda x: pd.api.types.infer_dtype(x.values))
if not tab.empty:
for col in types[types == 'unicode'].index:
tab[col] = tab[col].astype(str)
return Table.from_pandas(tab).filled() | python | def fetch(engine, tablename, columns=None, selection=None, **kwargs):
"""Fetch data from an SQL table into an `EventTable`
Parameters
----------
engine : `sqlalchemy.engine.Engine`
the database engine to use when connecting
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable`
"""
import pandas as pd
# parse columns for SQL query
if columns is None:
columnstr = '*'
else:
columnstr = ', '.join('"%s"' % c for c in columns)
# parse selection for SQL query
selectionstr = format_db_selection(selection, engine=engine)
# build SQL query
qstr = 'SELECT %s FROM %s %s' % (columnstr, tablename, selectionstr)
# perform query
tab = pd.read_sql(qstr, engine, **kwargs)
# Convert unicode columns to string
types = tab.apply(lambda x: pd.api.types.infer_dtype(x.values))
if not tab.empty:
for col in types[types == 'unicode'].index:
tab[col] = tab[col].astype(str)
return Table.from_pandas(tab).filled() | [
"def",
"fetch",
"(",
"engine",
",",
"tablename",
",",
"columns",
"=",
"None",
",",
"selection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"pandas",
"as",
"pd",
"# parse columns for SQL query",
"if",
"columns",
"is",
"None",
":",
"columnstr"... | Fetch data from an SQL table into an `EventTable`
Parameters
----------
engine : `sqlalchemy.engine.Engine`
the database engine to use when connecting
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable` | [
"Fetch",
"data",
"from",
"an",
"SQL",
"table",
"into",
"an",
"EventTable"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/sql.py#L50-L100 | train | 211,324 |
gwpy/gwpy | gwpy/signal/qtransform.py | q_scan | def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE, duration=None, sampling=None,
**kwargs):
"""Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram`
"""
from gwpy.timeseries import TimeSeries
# prepare input
if isinstance(data, TimeSeries):
duration = abs(data.span)
sampling = data.sample_rate.to('Hz').value
kwargs.update({'epoch': data.t0.value})
data = data.fft().value
# return a raw Q-transform and its significance
qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,
frange=frange).transform(data, **kwargs)
far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration
return (qgram, far) | python | def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE, duration=None, sampling=None,
**kwargs):
"""Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram`
"""
from gwpy.timeseries import TimeSeries
# prepare input
if isinstance(data, TimeSeries):
duration = abs(data.span)
sampling = data.sample_rate.to('Hz').value
kwargs.update({'epoch': data.t0.value})
data = data.fft().value
# return a raw Q-transform and its significance
qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,
frange=frange).transform(data, **kwargs)
far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration
return (qgram, far) | [
"def",
"q_scan",
"(",
"data",
",",
"mismatch",
"=",
"DEFAULT_MISMATCH",
",",
"qrange",
"=",
"DEFAULT_QRANGE",
",",
"frange",
"=",
"DEFAULT_FRANGE",
",",
"duration",
"=",
"None",
",",
"sampling",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"gw... | Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram` | [
"Transform",
"data",
"by",
"scanning",
"over",
"a",
"QTiling"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L635-L688 | train | 211,325 |
gwpy/gwpy | gwpy/signal/qtransform.py | QTiling._iter_qs | def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in xrange(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5)) | python | def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in xrange(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5)) | [
"def",
"_iter_qs",
"(",
"self",
")",
":",
"# work out how many Qs we need",
"cumum",
"=",
"log",
"(",
"self",
".",
"qrange",
"[",
"1",
"]",
"/",
"self",
".",
"qrange",
"[",
"0",
"]",
")",
"/",
"2",
"**",
"(",
"1",
"/",
"2.",
")",
"nplanes",
"=",
... | Iterate over the Q values | [
"Iterate",
"over",
"the",
"Q",
"values"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L147-L155 | train | 211,326 |
gwpy/gwpy | gwpy/signal/qtransform.py | QTiling.transform | def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes) | python | def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes) | [
"def",
"transform",
"(",
"self",
",",
"fseries",
",",
"*",
"*",
"kwargs",
")",
":",
"weight",
"=",
"1",
"+",
"numpy",
".",
"log10",
"(",
"self",
".",
"qrange",
"[",
"1",
"]",
"/",
"self",
".",
"qrange",
"[",
"0",
"]",
")",
"/",
"numpy",
".",
... | Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See Also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane | [
"Compute",
"the",
"time",
"-",
"frequency",
"plane",
"at",
"fixed",
"Q",
"with",
"the",
"most",
"significant",
"tile"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L166-L202 | train | 211,327 |
gwpy/gwpy | gwpy/signal/qtransform.py | QPlane.farray | def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2. | python | def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2. | [
"def",
"farray",
"(",
"self",
")",
":",
"bandwidths",
"=",
"2",
"*",
"pi",
"**",
"(",
"1",
"/",
"2.",
")",
"*",
"self",
".",
"frequencies",
"/",
"self",
".",
"q",
"return",
"self",
".",
"frequencies",
"-",
"bandwidths",
"/",
"2."
] | Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray` | [
"Array",
"of",
"frequencies",
"for",
"the",
"lower",
"-",
"edge",
"of",
"each",
"frequency",
"bin"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L272-L278 | train | 211,328 |
gwpy/gwpy | gwpy/signal/qtransform.py | QTile.ntiles | def ntiles(self):
"""The number of tiles in this row
:type: `int`
"""
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q
return next_power_of_two(tcum_mismatch / self.deltam) | python | def ntiles(self):
"""The number of tiles in this row
:type: `int`
"""
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q
return next_power_of_two(tcum_mismatch / self.deltam) | [
"def",
"ntiles",
"(",
"self",
")",
":",
"tcum_mismatch",
"=",
"self",
".",
"duration",
"*",
"2",
"*",
"pi",
"*",
"self",
".",
"frequency",
"/",
"self",
".",
"q",
"return",
"next_power_of_two",
"(",
"tcum_mismatch",
"/",
"self",
".",
"deltam",
")"
] | The number of tiles in this row
:type: `int` | [
"The",
"number",
"of",
"tiles",
"in",
"this",
"row"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L344-L350 | train | 211,329 |
gwpy/gwpy | gwpy/signal/qtransform.py | QTile.get_window | def get_window(self):
"""Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray`
"""
# real frequencies
wfrequencies = self._get_indices() / self.duration
# dimensionless frequencies
xfrequencies = wfrequencies * self.qprime / self.frequency
# normalize and generate bi-square window
norm = self.ntiles / (self.duration * self.sampling) * (
315 * self.qprime / (128 * self.frequency)) ** (1/2.)
return (1 - xfrequencies ** 2) ** 2 * norm | python | def get_window(self):
"""Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray`
"""
# real frequencies
wfrequencies = self._get_indices() / self.duration
# dimensionless frequencies
xfrequencies = wfrequencies * self.qprime / self.frequency
# normalize and generate bi-square window
norm = self.ntiles / (self.duration * self.sampling) * (
315 * self.qprime / (128 * self.frequency)) ** (1/2.)
return (1 - xfrequencies ** 2) ** 2 * norm | [
"def",
"get_window",
"(",
"self",
")",
":",
"# real frequencies",
"wfrequencies",
"=",
"self",
".",
"_get_indices",
"(",
")",
"/",
"self",
".",
"duration",
"# dimensionless frequencies",
"xfrequencies",
"=",
"wfrequencies",
"*",
"self",
".",
"qprime",
"/",
"self... | Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray` | [
"Generate",
"the",
"bi",
"-",
"square",
"window",
"for",
"this",
"row"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L364-L378 | train | 211,330 |
gwpy/gwpy | gwpy/signal/qtransform.py | QTile.get_data_indices | def get_data_indices(self):
"""Returns the index array of interesting frequencies for this row
"""
return numpy.round(self._get_indices() + 1 +
self.frequency * self.duration).astype(int) | python | def get_data_indices(self):
"""Returns the index array of interesting frequencies for this row
"""
return numpy.round(self._get_indices() + 1 +
self.frequency * self.duration).astype(int) | [
"def",
"get_data_indices",
"(",
"self",
")",
":",
"return",
"numpy",
".",
"round",
"(",
"self",
".",
"_get_indices",
"(",
")",
"+",
"1",
"+",
"self",
".",
"frequency",
"*",
"self",
".",
"duration",
")",
".",
"astype",
"(",
"int",
")"
] | Returns the index array of interesting frequencies for this row | [
"Returns",
"the",
"index",
"array",
"of",
"interesting",
"frequencies",
"for",
"this",
"row"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L380-L384 | train | 211,331 |
gwpy/gwpy | gwpy/signal/qtransform.py | QGram.interpolate | def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new | python | def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
# using `~numpy.logspace` here to support numpy-1.7.1 for EPEL7,
# but numpy-1.12.0 introduced the function `~numpy.geomspace`
logfmin = numpy.log10(self.plane.frange[0])
logfmax = numpy.log10(self.plane.frange[1])
outfreq = numpy.logspace(logfmin, logfmax, num=int(fres))
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new | [
"def",
"interpolate",
"(",
"self",
",",
"tres",
"=",
"\"<default>\"",
",",
"fres",
"=",
"\"<default>\"",
",",
"logf",
"=",
"False",
",",
"outseg",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"interpolate",
"import",
"(",
"interp2d",
",",
"InterpolatedUniv... | Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See Also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest. | [
"Interpolate",
"this",
"QGram",
"over",
"a",
"regularly",
"-",
"gridded",
"spectrogram"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L482-L583 | train | 211,332 |
gwpy/gwpy | gwpy/signal/qtransform.py | QGram.table | def table(self, snrthresh=5.5):
"""Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`.
"""
from ..table import EventTable
# get plane properties
freqs = self.plane.frequencies
bws = 2 * (freqs - self.plane.farray)
# collect table data as a recarray
names = ('time', 'frequency', 'duration', 'bandwidth', 'energy')
rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names))
for f, bw, row in zip(freqs, bws, self.energies):
ind, = (row.value >= snrthresh ** 2 / 2.).nonzero()
new = ind.size
if new > 0:
rec.resize((rec.size + new,), refcheck=False)
rec['time'][-new:] = row.times.value[ind]
rec['frequency'][-new:] = f
rec['duration'][-new:] = row.dt.to('s').value
rec['bandwidth'][-new:] = bw
rec['energy'][-new:] = row.value[ind]
# save to a table
out = EventTable(rec, copy=False)
out.meta['q'] = self.plane.q
return out | python | def table(self, snrthresh=5.5):
"""Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`.
"""
from ..table import EventTable
# get plane properties
freqs = self.plane.frequencies
bws = 2 * (freqs - self.plane.farray)
# collect table data as a recarray
names = ('time', 'frequency', 'duration', 'bandwidth', 'energy')
rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names))
for f, bw, row in zip(freqs, bws, self.energies):
ind, = (row.value >= snrthresh ** 2 / 2.).nonzero()
new = ind.size
if new > 0:
rec.resize((rec.size + new,), refcheck=False)
rec['time'][-new:] = row.times.value[ind]
rec['frequency'][-new:] = f
rec['duration'][-new:] = row.dt.to('s').value
rec['bandwidth'][-new:] = bw
rec['energy'][-new:] = row.value[ind]
# save to a table
out = EventTable(rec, copy=False)
out.meta['q'] = self.plane.q
return out | [
"def",
"table",
"(",
"self",
",",
"snrthresh",
"=",
"5.5",
")",
":",
"from",
".",
".",
"table",
"import",
"EventTable",
"# get plane properties",
"freqs",
"=",
"self",
".",
"plane",
".",
"frequencies",
"bws",
"=",
"2",
"*",
"(",
"freqs",
"-",
"self",
"... | Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`. | [
"Represent",
"this",
"QPlane",
"as",
"an",
"EventTable"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L585-L624 | train | 211,333 |
gwpy/gwpy | gwpy/types/index.py | Index.define | def define(cls, start, step, num, dtype=None):
"""Define a new `Index`.
The output is basically::
start + numpy.arange(num) * step
Parameters
----------
start : `Number`
The starting value of the index.
step : `Number`
The step size of the index.
num : `int`
The size of the index (number of samples).
dtype : `numpy.dtype`, `None`, optional
The desired dtype of the index, if not given, defaults
to the higher-precision dtype from ``start`` and ``step``.
Returns
-------
index : `Index`
A new `Index` created from the given parameters.
"""
if dtype is None:
dtype = max(
numpy.array(start, subok=True, copy=False).dtype,
numpy.array(step, subok=True, copy=False).dtype,
)
start = start.astype(dtype, copy=False)
step = step.astype(dtype, copy=False)
return cls(start + numpy.arange(num, dtype=dtype) * step, copy=False) | python | def define(cls, start, step, num, dtype=None):
"""Define a new `Index`.
The output is basically::
start + numpy.arange(num) * step
Parameters
----------
start : `Number`
The starting value of the index.
step : `Number`
The step size of the index.
num : `int`
The size of the index (number of samples).
dtype : `numpy.dtype`, `None`, optional
The desired dtype of the index, if not given, defaults
to the higher-precision dtype from ``start`` and ``step``.
Returns
-------
index : `Index`
A new `Index` created from the given parameters.
"""
if dtype is None:
dtype = max(
numpy.array(start, subok=True, copy=False).dtype,
numpy.array(step, subok=True, copy=False).dtype,
)
start = start.astype(dtype, copy=False)
step = step.astype(dtype, copy=False)
return cls(start + numpy.arange(num, dtype=dtype) * step, copy=False) | [
"def",
"define",
"(",
"cls",
",",
"start",
",",
"step",
",",
"num",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"max",
"(",
"numpy",
".",
"array",
"(",
"start",
",",
"subok",
"=",
"True",
",",
"copy",
"=... | Define a new `Index`.
The output is basically::
start + numpy.arange(num) * step
Parameters
----------
start : `Number`
The starting value of the index.
step : `Number`
The step size of the index.
num : `int`
The size of the index (number of samples).
dtype : `numpy.dtype`, `None`, optional
The desired dtype of the index, if not given, defaults
to the higher-precision dtype from ``start`` and ``step``.
Returns
-------
index : `Index`
A new `Index` created from the given parameters. | [
"Define",
"a",
"new",
"Index",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/index.py#L31-L65 | train | 211,334 |
gwpy/gwpy | gwpy/types/index.py | Index.regular | def regular(self):
"""`True` if this index is linearly increasing
"""
try:
return self.info.meta['regular']
except (TypeError, KeyError):
if self.info.meta is None:
self.info.meta = {}
self.info.meta['regular'] = self.is_regular()
return self.info.meta['regular'] | python | def regular(self):
"""`True` if this index is linearly increasing
"""
try:
return self.info.meta['regular']
except (TypeError, KeyError):
if self.info.meta is None:
self.info.meta = {}
self.info.meta['regular'] = self.is_regular()
return self.info.meta['regular'] | [
"def",
"regular",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"info",
".",
"meta",
"[",
"'regular'",
"]",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"if",
"self",
".",
"info",
".",
"meta",
"is",
"None",
":",
"self",
".",
... | `True` if this index is linearly increasing | [
"True",
"if",
"this",
"index",
"is",
"linearly",
"increasing"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/index.py#L68-L77 | train | 211,335 |
gwpy/gwpy | gwpy/types/index.py | Index.is_regular | def is_regular(self):
"""Determine whether this `Index` contains linearly increasing samples
This also works for linear decrease
"""
if self.size <= 1:
return False
return numpy.isclose(numpy.diff(self.value, n=2), 0).all() | python | def is_regular(self):
"""Determine whether this `Index` contains linearly increasing samples
This also works for linear decrease
"""
if self.size <= 1:
return False
return numpy.isclose(numpy.diff(self.value, n=2), 0).all() | [
"def",
"is_regular",
"(",
"self",
")",
":",
"if",
"self",
".",
"size",
"<=",
"1",
":",
"return",
"False",
"return",
"numpy",
".",
"isclose",
"(",
"numpy",
".",
"diff",
"(",
"self",
".",
"value",
",",
"n",
"=",
"2",
")",
",",
"0",
")",
".",
"all... | Determine whether this `Index` contains linearly increasing samples
This also works for linear decrease | [
"Determine",
"whether",
"this",
"Index",
"contains",
"linearly",
"increasing",
"samples"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/index.py#L79-L86 | train | 211,336 |
gwpy/gwpy | gwpy/table/io/omicron.py | table_from_omicron | def table_from_omicron(source, *args, **kwargs):
"""Read an `EventTable` from an Omicron ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
if not args: # only default treename if args not given
kwargs.setdefault('treename', 'triggers')
return EventTable.read(source, *args, format='root', **kwargs) | python | def table_from_omicron(source, *args, **kwargs):
"""Read an `EventTable` from an Omicron ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
"""
if not args: # only default treename if args not given
kwargs.setdefault('treename', 'triggers')
return EventTable.read(source, *args, format='root', **kwargs) | [
"def",
"table_from_omicron",
"(",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"args",
":",
"# only default treename if args not given",
"kwargs",
".",
"setdefault",
"(",
"'treename'",
",",
"'triggers'",
")",
"return",
"EventTable"... | Read an `EventTable` from an Omicron ROOT file
This function just redirects to the format='root' reader with appropriate
defaults. | [
"Read",
"an",
"EventTable",
"from",
"an",
"Omicron",
"ROOT",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/omicron.py#L28-L36 | train | 211,337 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.plot | def plot(self, *args, **kwargs):
"""Plot data onto these axes
Parameters
----------
args
a single instance of
- `~gwpy.segments.DataQualityFlag`
- `~gwpy.segments.Segment`
- `~gwpy.segments.SegmentList`
- `~gwpy.segments.SegmentListDict`
or equivalent types upstream from :mod:`ligo.segments`
kwargs
keyword arguments applicable to `~matplotib.axes.Axes.plot`
Returns
-------
Line2D
the `~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
out = []
args = list(args)
while args:
try:
plotter = self._plot_method(args[0])
except TypeError:
break
out.append(plotter(args[0], **kwargs))
args.pop(0)
if args:
out.extend(super(SegmentAxes, self).plot(*args, **kwargs))
self.autoscale(enable=None, axis='both', tight=False)
return out | python | def plot(self, *args, **kwargs):
"""Plot data onto these axes
Parameters
----------
args
a single instance of
- `~gwpy.segments.DataQualityFlag`
- `~gwpy.segments.Segment`
- `~gwpy.segments.SegmentList`
- `~gwpy.segments.SegmentListDict`
or equivalent types upstream from :mod:`ligo.segments`
kwargs
keyword arguments applicable to `~matplotib.axes.Axes.plot`
Returns
-------
Line2D
the `~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
out = []
args = list(args)
while args:
try:
plotter = self._plot_method(args[0])
except TypeError:
break
out.append(plotter(args[0], **kwargs))
args.pop(0)
if args:
out.extend(super(SegmentAxes, self).plot(*args, **kwargs))
self.autoscale(enable=None, axis='both', tight=False)
return out | [
"def",
"plot",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"[",
"]",
"args",
"=",
"list",
"(",
"args",
")",
"while",
"args",
":",
"try",
":",
"plotter",
"=",
"self",
".",
"_plot_method",
"(",
"args",
"[",
"0",
... | Plot data onto these axes
Parameters
----------
args
a single instance of
- `~gwpy.segments.DataQualityFlag`
- `~gwpy.segments.Segment`
- `~gwpy.segments.SegmentList`
- `~gwpy.segments.SegmentListDict`
or equivalent types upstream from :mod:`ligo.segments`
kwargs
keyword arguments applicable to `~matplotib.axes.Axes.plot`
Returns
-------
Line2D
the `~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs`` | [
"Plot",
"data",
"onto",
"these",
"axes"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L94-L134 | train | 211,338 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.plot_dict | def plot_dict(self, flags, label='key', known='x', **kwargs):
"""Plot a `~gwpy.segments.DataQualityDict` onto these axes
Parameters
----------
flags : `~gwpy.segments.DataQualityDict`
data-quality dict to display
label : `str`, optional
labelling system to use, or fixed label for all `DataQualityFlags`.
Special values include
- ``'key'``: use the key of the `DataQualityDict`,
- ``'name'``: use the :attr:`~DataQualityFlag.name` of the
`DataQualityFlag`
If anything else, that fixed label will be used for all lines.
known : `str`, `dict`, `None`, default: '/'
display `known` segments with the given hatching, or give a
dict of keyword arguments to pass to
:meth:`~SegmentAxes.plot_segmentlist`, or `None` to hide.
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches
"""
out = []
for lab, flag in flags.items():
if label.lower() == 'name':
lab = flag.name
elif label.lower() != 'key':
lab = label
out.append(self.plot_flag(flag, label=to_string(lab), known=known,
**kwargs))
return out | python | def plot_dict(self, flags, label='key', known='x', **kwargs):
"""Plot a `~gwpy.segments.DataQualityDict` onto these axes
Parameters
----------
flags : `~gwpy.segments.DataQualityDict`
data-quality dict to display
label : `str`, optional
labelling system to use, or fixed label for all `DataQualityFlags`.
Special values include
- ``'key'``: use the key of the `DataQualityDict`,
- ``'name'``: use the :attr:`~DataQualityFlag.name` of the
`DataQualityFlag`
If anything else, that fixed label will be used for all lines.
known : `str`, `dict`, `None`, default: '/'
display `known` segments with the given hatching, or give a
dict of keyword arguments to pass to
:meth:`~SegmentAxes.plot_segmentlist`, or `None` to hide.
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches
"""
out = []
for lab, flag in flags.items():
if label.lower() == 'name':
lab = flag.name
elif label.lower() != 'key':
lab = label
out.append(self.plot_flag(flag, label=to_string(lab), known=known,
**kwargs))
return out | [
"def",
"plot_dict",
"(",
"self",
",",
"flags",
",",
"label",
"=",
"'key'",
",",
"known",
"=",
"'x'",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"[",
"]",
"for",
"lab",
",",
"flag",
"in",
"flags",
".",
"items",
"(",
")",
":",
"if",
"label",
... | Plot a `~gwpy.segments.DataQualityDict` onto these axes
Parameters
----------
flags : `~gwpy.segments.DataQualityDict`
data-quality dict to display
label : `str`, optional
labelling system to use, or fixed label for all `DataQualityFlags`.
Special values include
- ``'key'``: use the key of the `DataQualityDict`,
- ``'name'``: use the :attr:`~DataQualityFlag.name` of the
`DataQualityFlag`
If anything else, that fixed label will be used for all lines.
known : `str`, `dict`, `None`, default: '/'
display `known` segments with the given hatching, or give a
dict of keyword arguments to pass to
:meth:`~SegmentAxes.plot_segmentlist`, or `None` to hide.
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches | [
"Plot",
"a",
"~gwpy",
".",
"segments",
".",
"DataQualityDict",
"onto",
"these",
"axes"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L136-L176 | train | 211,339 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.plot_flag | def plot_flag(self, flag, y=None, **kwargs):
"""Plot a `~gwpy.segments.DataQualityFlag` onto these axes.
Parameters
----------
flag : `~gwpy.segments.DataQualityFlag`
Data-quality flag to display.
y : `float`, optional
Y-axis value for new segments.
height : `float`, optional,
Height for each segment, default: `0.8`.
known : `str`, `dict`, `None`
One of the following
- ``'fancy'`` - to use fancy format (try it and see)
- ``'x'`` (or similar) - to use hatching
- `str` to specify ``facecolor`` for known segmentlist
- `dict` of kwargs to use
- `None` to ignore known segmentlist
**kwargs
Any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`.
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches for active
segments
"""
# get y axis position
if y is None:
y = self.get_next_y()
# default a 'good' flag to green segments and vice-versa
if flag.isgood:
kwargs.setdefault('facecolor', '#33cc33')
kwargs.setdefault('known', '#ff0000')
else:
kwargs.setdefault('facecolor', '#ff0000')
kwargs.setdefault('known', '#33cc33')
known = kwargs.pop('known')
# get flag name
name = kwargs.pop('label', flag.label or flag.name)
# make active collection
kwargs.setdefault('zorder', 0)
coll = self.plot_segmentlist(flag.active, y=y, label=name,
**kwargs)
# make known collection
if known not in (None, False):
known_kw = {
'facecolor': coll.get_facecolor()[0],
'collection': 'ignore',
'zorder': -1000,
}
if isinstance(known, dict):
known_kw.update(known)
elif known == 'fancy':
known_kw.update(height=kwargs.get('height', .8)*.05)
elif known in HATCHES:
known_kw.update(fill=False, hatch=known)
else:
known_kw.update(fill=True, facecolor=known,
height=kwargs.get('height', .8)*.5)
self.plot_segmentlist(flag.known, y=y, label=name, **known_kw)
return coll | python | def plot_flag(self, flag, y=None, **kwargs):
"""Plot a `~gwpy.segments.DataQualityFlag` onto these axes.
Parameters
----------
flag : `~gwpy.segments.DataQualityFlag`
Data-quality flag to display.
y : `float`, optional
Y-axis value for new segments.
height : `float`, optional,
Height for each segment, default: `0.8`.
known : `str`, `dict`, `None`
One of the following
- ``'fancy'`` - to use fancy format (try it and see)
- ``'x'`` (or similar) - to use hatching
- `str` to specify ``facecolor`` for known segmentlist
- `dict` of kwargs to use
- `None` to ignore known segmentlist
**kwargs
Any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`.
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches for active
segments
"""
# get y axis position
if y is None:
y = self.get_next_y()
# default a 'good' flag to green segments and vice-versa
if flag.isgood:
kwargs.setdefault('facecolor', '#33cc33')
kwargs.setdefault('known', '#ff0000')
else:
kwargs.setdefault('facecolor', '#ff0000')
kwargs.setdefault('known', '#33cc33')
known = kwargs.pop('known')
# get flag name
name = kwargs.pop('label', flag.label or flag.name)
# make active collection
kwargs.setdefault('zorder', 0)
coll = self.plot_segmentlist(flag.active, y=y, label=name,
**kwargs)
# make known collection
if known not in (None, False):
known_kw = {
'facecolor': coll.get_facecolor()[0],
'collection': 'ignore',
'zorder': -1000,
}
if isinstance(known, dict):
known_kw.update(known)
elif known == 'fancy':
known_kw.update(height=kwargs.get('height', .8)*.05)
elif known in HATCHES:
known_kw.update(fill=False, hatch=known)
else:
known_kw.update(fill=True, facecolor=known,
height=kwargs.get('height', .8)*.5)
self.plot_segmentlist(flag.known, y=y, label=name, **known_kw)
return coll | [
"def",
"plot_flag",
"(",
"self",
",",
"flag",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# get y axis position",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"self",
".",
"get_next_y",
"(",
")",
"# default a 'good' flag to green segments and vice-... | Plot a `~gwpy.segments.DataQualityFlag` onto these axes.
Parameters
----------
flag : `~gwpy.segments.DataQualityFlag`
Data-quality flag to display.
y : `float`, optional
Y-axis value for new segments.
height : `float`, optional,
Height for each segment, default: `0.8`.
known : `str`, `dict`, `None`
One of the following
- ``'fancy'`` - to use fancy format (try it and see)
- ``'x'`` (or similar) - to use hatching
- `str` to specify ``facecolor`` for known segmentlist
- `dict` of kwargs to use
- `None` to ignore known segmentlist
**kwargs
Any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`.
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches for active
segments | [
"Plot",
"a",
"~gwpy",
".",
"segments",
".",
"DataQualityFlag",
"onto",
"these",
"axes",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L184-L256 | train | 211,340 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.plot_segmentlist | def plot_segmentlist(self, segmentlist, y=None, height=.8, label=None,
collection=True, rasterized=None, **kwargs):
"""Plot a `~gwpy.segments.SegmentList` onto these axes
Parameters
----------
segmentlist : `~gwpy.segments.SegmentList`
list of segments to display
y : `float`, optional
y-axis value for new segments
collection : `bool`, default: `True`
add all patches as a
`~matplotlib.collections.PatchCollection`, doesn't seem
to work for hatched rectangles
label : `str`, optional
custom descriptive name to print as y-axis tick label
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches
"""
# get colour
facecolor = kwargs.pop('facecolor', kwargs.pop('color', '#629fca'))
if is_color_like(facecolor):
kwargs.setdefault('edgecolor', tint(facecolor, factor=.5))
# get y
if y is None:
y = self.get_next_y()
# build patches
patches = [SegmentRectangle(seg, y, height=height, facecolor=facecolor,
**kwargs) for seg in segmentlist]
if collection: # map to PatchCollection
coll = PatchCollection(patches, match_original=patches,
zorder=kwargs.get('zorder', 1))
coll.set_rasterized(rasterized)
coll._ignore = collection == 'ignore'
coll._ypos = y
out = self.add_collection(coll)
# reset label with tex-formatting now
# matplotlib default label is applied by add_collection
# so we can only replace the leading underscore after
# this point
if label is None:
label = coll.get_label()
coll.set_label(to_string(label))
else:
out = []
for patch in patches:
patch.set_label(label)
patch.set_rasterized(rasterized)
label = ''
out.append(self.add_patch(patch))
self.autoscale(enable=None, axis='both', tight=False)
return out | python | def plot_segmentlist(self, segmentlist, y=None, height=.8, label=None,
collection=True, rasterized=None, **kwargs):
"""Plot a `~gwpy.segments.SegmentList` onto these axes
Parameters
----------
segmentlist : `~gwpy.segments.SegmentList`
list of segments to display
y : `float`, optional
y-axis value for new segments
collection : `bool`, default: `True`
add all patches as a
`~matplotlib.collections.PatchCollection`, doesn't seem
to work for hatched rectangles
label : `str`, optional
custom descriptive name to print as y-axis tick label
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches
"""
# get colour
facecolor = kwargs.pop('facecolor', kwargs.pop('color', '#629fca'))
if is_color_like(facecolor):
kwargs.setdefault('edgecolor', tint(facecolor, factor=.5))
# get y
if y is None:
y = self.get_next_y()
# build patches
patches = [SegmentRectangle(seg, y, height=height, facecolor=facecolor,
**kwargs) for seg in segmentlist]
if collection: # map to PatchCollection
coll = PatchCollection(patches, match_original=patches,
zorder=kwargs.get('zorder', 1))
coll.set_rasterized(rasterized)
coll._ignore = collection == 'ignore'
coll._ypos = y
out = self.add_collection(coll)
# reset label with tex-formatting now
# matplotlib default label is applied by add_collection
# so we can only replace the leading underscore after
# this point
if label is None:
label = coll.get_label()
coll.set_label(to_string(label))
else:
out = []
for patch in patches:
patch.set_label(label)
patch.set_rasterized(rasterized)
label = ''
out.append(self.add_patch(patch))
self.autoscale(enable=None, axis='both', tight=False)
return out | [
"def",
"plot_segmentlist",
"(",
"self",
",",
"segmentlist",
",",
"y",
"=",
"None",
",",
"height",
"=",
".8",
",",
"label",
"=",
"None",
",",
"collection",
"=",
"True",
",",
"rasterized",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# get colour",
... | Plot a `~gwpy.segments.SegmentList` onto these axes
Parameters
----------
segmentlist : `~gwpy.segments.SegmentList`
list of segments to display
y : `float`, optional
y-axis value for new segments
collection : `bool`, default: `True`
add all patches as a
`~matplotlib.collections.PatchCollection`, doesn't seem
to work for hatched rectangles
label : `str`, optional
custom descriptive name to print as y-axis tick label
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collection : `~matplotlib.patches.PatchCollection`
list of `~matplotlib.patches.Rectangle` patches | [
"Plot",
"a",
"~gwpy",
".",
"segments",
".",
"SegmentList",
"onto",
"these",
"axes"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L264-L328 | train | 211,341 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.plot_segmentlistdict | def plot_segmentlistdict(self, segmentlistdict, y=None, dy=1, **kwargs):
"""Plot a `~gwpy.segments.SegmentListDict` onto
these axes
Parameters
----------
segmentlistdict : `~gwpy.segments.SegmentListDict`
(name, `~gwpy.segments.SegmentList`) dict
y : `float`, optional
starting y-axis value for new segmentlists
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collections : `list`
list of `~matplotlib.patches.PatchCollection` sets for
each segmentlist
"""
if y is None:
y = self.get_next_y()
collections = []
for name, segmentlist in segmentlistdict.items():
collections.append(self.plot_segmentlist(segmentlist, y=y,
label=name, **kwargs))
y += dy
return collections | python | def plot_segmentlistdict(self, segmentlistdict, y=None, dy=1, **kwargs):
"""Plot a `~gwpy.segments.SegmentListDict` onto
these axes
Parameters
----------
segmentlistdict : `~gwpy.segments.SegmentListDict`
(name, `~gwpy.segments.SegmentList`) dict
y : `float`, optional
starting y-axis value for new segmentlists
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collections : `list`
list of `~matplotlib.patches.PatchCollection` sets for
each segmentlist
"""
if y is None:
y = self.get_next_y()
collections = []
for name, segmentlist in segmentlistdict.items():
collections.append(self.plot_segmentlist(segmentlist, y=y,
label=name, **kwargs))
y += dy
return collections | [
"def",
"plot_segmentlistdict",
"(",
"self",
",",
"segmentlistdict",
",",
"y",
"=",
"None",
",",
"dy",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"self",
".",
"get_next_y",
"(",
")",
"collections",
"=",
"[",... | Plot a `~gwpy.segments.SegmentListDict` onto
these axes
Parameters
----------
segmentlistdict : `~gwpy.segments.SegmentListDict`
(name, `~gwpy.segments.SegmentList`) dict
y : `float`, optional
starting y-axis value for new segmentlists
**kwargs
any other keyword arguments acceptable for
`~matplotlib.patches.Rectangle`
Returns
-------
collections : `list`
list of `~matplotlib.patches.PatchCollection` sets for
each segmentlist | [
"Plot",
"a",
"~gwpy",
".",
"segments",
".",
"SegmentListDict",
"onto",
"these",
"axes"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L330-L359 | train | 211,342 |
gwpy/gwpy | gwpy/plot/segments.py | SegmentAxes.get_collections | def get_collections(self, ignore=None):
"""Return the collections matching the given `_ignore` value
Parameters
----------
ignore : `bool`, or `None`
value of `_ignore` to match
Returns
-------
collections : `list`
if `ignore=None`, simply returns all collections, otherwise
returns those collections matching the `ignore` parameter
"""
if ignore is None:
return self.collections
return [c for c in self.collections if
getattr(c, '_ignore', None) == ignore] | python | def get_collections(self, ignore=None):
"""Return the collections matching the given `_ignore` value
Parameters
----------
ignore : `bool`, or `None`
value of `_ignore` to match
Returns
-------
collections : `list`
if `ignore=None`, simply returns all collections, otherwise
returns those collections matching the `ignore` parameter
"""
if ignore is None:
return self.collections
return [c for c in self.collections if
getattr(c, '_ignore', None) == ignore] | [
"def",
"get_collections",
"(",
"self",
",",
"ignore",
"=",
"None",
")",
":",
"if",
"ignore",
"is",
"None",
":",
"return",
"self",
".",
"collections",
"return",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"collections",
"if",
"getattr",
"(",
"c",
",",
"'... | Return the collections matching the given `_ignore` value
Parameters
----------
ignore : `bool`, or `None`
value of `_ignore` to match
Returns
-------
collections : `list`
if `ignore=None`, simply returns all collections, otherwise
returns those collections matching the `ignore` parameter | [
"Return",
"the",
"collections",
"matching",
"the",
"given",
"_ignore",
"value"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/segments.py#L369-L386 | train | 211,343 |
gwpy/gwpy | gwpy/io/kerberos.py | parse_keytab | def parse_keytab(keytab):
"""Read the contents of a KRB5 keytab file, returning a list of
credentials listed within
Parameters
----------
keytab : `str`
path to keytab file
Returns
-------
creds : `list` of `tuple`
the (unique) list of `(username, realm, kvno)` as read from the
keytab file
Examples
--------
>>> from gwpy.io.kerberos import parse_keytab
>>> print(parse_keytab("creds.keytab"))
[('albert.einstein', 'LIGO.ORG', 1)]
"""
try:
out = subprocess.check_output(['klist', '-k', keytab],
stderr=subprocess.PIPE)
except OSError:
raise KerberosError("Failed to locate klist, cannot read keytab")
except subprocess.CalledProcessError:
raise KerberosError("Cannot read keytab {!r}".format(keytab))
principals = []
for line in out.splitlines():
if isinstance(line, bytes):
line = line.decode('utf-8')
try:
kvno, principal, = re.split(r'\s+', line.strip(' '), 1)
except ValueError:
continue
else:
if not kvno.isdigit():
continue
principals.append(tuple(principal.split('@')) + (int(kvno),))
# return unique, ordered list
return list(OrderedDict.fromkeys(principals).keys()) | python | def parse_keytab(keytab):
"""Read the contents of a KRB5 keytab file, returning a list of
credentials listed within
Parameters
----------
keytab : `str`
path to keytab file
Returns
-------
creds : `list` of `tuple`
the (unique) list of `(username, realm, kvno)` as read from the
keytab file
Examples
--------
>>> from gwpy.io.kerberos import parse_keytab
>>> print(parse_keytab("creds.keytab"))
[('albert.einstein', 'LIGO.ORG', 1)]
"""
try:
out = subprocess.check_output(['klist', '-k', keytab],
stderr=subprocess.PIPE)
except OSError:
raise KerberosError("Failed to locate klist, cannot read keytab")
except subprocess.CalledProcessError:
raise KerberosError("Cannot read keytab {!r}".format(keytab))
principals = []
for line in out.splitlines():
if isinstance(line, bytes):
line = line.decode('utf-8')
try:
kvno, principal, = re.split(r'\s+', line.strip(' '), 1)
except ValueError:
continue
else:
if not kvno.isdigit():
continue
principals.append(tuple(principal.split('@')) + (int(kvno),))
# return unique, ordered list
return list(OrderedDict.fromkeys(principals).keys()) | [
"def",
"parse_keytab",
"(",
"keytab",
")",
":",
"try",
":",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'klist'",
",",
"'-k'",
",",
"keytab",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"OSError",
":",
"raise",
"Ker... | Read the contents of a KRB5 keytab file, returning a list of
credentials listed within
Parameters
----------
keytab : `str`
path to keytab file
Returns
-------
creds : `list` of `tuple`
the (unique) list of `(username, realm, kvno)` as read from the
keytab file
Examples
--------
>>> from gwpy.io.kerberos import parse_keytab
>>> print(parse_keytab("creds.keytab"))
[('albert.einstein', 'LIGO.ORG', 1)] | [
"Read",
"the",
"contents",
"of",
"a",
"KRB5",
"keytab",
"file",
"returning",
"a",
"list",
"of",
"credentials",
"listed",
"within"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/kerberos.py#L180-L221 | train | 211,344 |
gwpy/gwpy | gwpy/types/array2d.py | Array2D.y0 | def y0(self):
"""Y-axis coordinate of the first data point
:type: `~astropy.units.Quantity` scalar
"""
try:
return self._y0
except AttributeError:
self._y0 = Quantity(0, self.yunit)
return self._y0 | python | def y0(self):
"""Y-axis coordinate of the first data point
:type: `~astropy.units.Quantity` scalar
"""
try:
return self._y0
except AttributeError:
self._y0 = Quantity(0, self.yunit)
return self._y0 | [
"def",
"y0",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_y0",
"except",
"AttributeError",
":",
"self",
".",
"_y0",
"=",
"Quantity",
"(",
"0",
",",
"self",
".",
"yunit",
")",
"return",
"self",
".",
"_y0"
] | Y-axis coordinate of the first data point
:type: `~astropy.units.Quantity` scalar | [
"Y",
"-",
"axis",
"coordinate",
"of",
"the",
"first",
"data",
"point"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array2d.py#L189-L198 | train | 211,345 |
gwpy/gwpy | gwpy/types/array2d.py | Array2D.dy | def dy(self):
"""Y-axis sample separation
:type: `~astropy.units.Quantity` scalar
"""
try:
return self._dy
except AttributeError:
try:
self._yindex
except AttributeError:
self._dy = Quantity(1, self.yunit)
else:
if not self.yindex.regular:
raise AttributeError(
"This series has an irregular y-axis "
"index, so 'dy' is not well defined")
self._dy = self.yindex[1] - self.yindex[0]
return self._dy | python | def dy(self):
"""Y-axis sample separation
:type: `~astropy.units.Quantity` scalar
"""
try:
return self._dy
except AttributeError:
try:
self._yindex
except AttributeError:
self._dy = Quantity(1, self.yunit)
else:
if not self.yindex.regular:
raise AttributeError(
"This series has an irregular y-axis "
"index, so 'dy' is not well defined")
self._dy = self.yindex[1] - self.yindex[0]
return self._dy | [
"def",
"dy",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_dy",
"except",
"AttributeError",
":",
"try",
":",
"self",
".",
"_yindex",
"except",
"AttributeError",
":",
"self",
".",
"_dy",
"=",
"Quantity",
"(",
"1",
",",
"self",
".",
"yuni... | Y-axis sample separation
:type: `~astropy.units.Quantity` scalar | [
"Y",
"-",
"axis",
"sample",
"separation"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array2d.py#L213-L231 | train | 211,346 |
gwpy/gwpy | gwpy/types/array2d.py | Array2D.yunit | def yunit(self):
"""Unit of Y-axis index
:type: `~astropy.units.Unit`
"""
try:
return self._dy.unit
except AttributeError:
try:
return self._y0.unit
except AttributeError:
return self._default_yunit | python | def yunit(self):
"""Unit of Y-axis index
:type: `~astropy.units.Unit`
"""
try:
return self._dy.unit
except AttributeError:
try:
return self._y0.unit
except AttributeError:
return self._default_yunit | [
"def",
"yunit",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_dy",
".",
"unit",
"except",
"AttributeError",
":",
"try",
":",
"return",
"self",
".",
"_y0",
".",
"unit",
"except",
"AttributeError",
":",
"return",
"self",
".",
"_default_yunit"... | Unit of Y-axis index
:type: `~astropy.units.Unit` | [
"Unit",
"of",
"Y",
"-",
"axis",
"index"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array2d.py#L245-L256 | train | 211,347 |
gwpy/gwpy | gwpy/types/array2d.py | Array2D.yindex | def yindex(self):
"""Positions of the data on the y-axis
:type: `~astropy.units.Quantity` array
"""
try:
return self._yindex
except AttributeError:
self._yindex = Index.define(self.y0, self.dy, self.shape[1])
return self._yindex | python | def yindex(self):
"""Positions of the data on the y-axis
:type: `~astropy.units.Quantity` array
"""
try:
return self._yindex
except AttributeError:
self._yindex = Index.define(self.y0, self.dy, self.shape[1])
return self._yindex | [
"def",
"yindex",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_yindex",
"except",
"AttributeError",
":",
"self",
".",
"_yindex",
"=",
"Index",
".",
"define",
"(",
"self",
".",
"y0",
",",
"self",
".",
"dy",
",",
"self",
".",
"shape",
"... | Positions of the data on the y-axis
:type: `~astropy.units.Quantity` array | [
"Positions",
"of",
"the",
"data",
"on",
"the",
"y",
"-",
"axis"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array2d.py#L260-L269 | train | 211,348 |
gwpy/gwpy | gwpy/types/array2d.py | Array2D.is_compatible | def is_compatible(self, other):
"""Check whether this array and ``other`` have compatible metadata
"""
super(Array2D, self).is_compatible(other)
# check y-axis metadata
if isinstance(other, type(self)):
try:
if not self.dy == other.dy:
raise ValueError("%s sample sizes do not match: "
"%s vs %s." % (type(self).__name__,
self.dy, other.dy))
except AttributeError:
raise ValueError("Series with irregular y-indexes cannot "
"be compatible")
return True | python | def is_compatible(self, other):
"""Check whether this array and ``other`` have compatible metadata
"""
super(Array2D, self).is_compatible(other)
# check y-axis metadata
if isinstance(other, type(self)):
try:
if not self.dy == other.dy:
raise ValueError("%s sample sizes do not match: "
"%s vs %s." % (type(self).__name__,
self.dy, other.dy))
except AttributeError:
raise ValueError("Series with irregular y-indexes cannot "
"be compatible")
return True | [
"def",
"is_compatible",
"(",
"self",
",",
"other",
")",
":",
"super",
"(",
"Array2D",
",",
"self",
")",
".",
"is_compatible",
"(",
"other",
")",
"# check y-axis metadata",
"if",
"isinstance",
"(",
"other",
",",
"type",
"(",
"self",
")",
")",
":",
"try",
... | Check whether this array and ``other`` have compatible metadata | [
"Check",
"whether",
"this",
"array",
"and",
"other",
"have",
"compatible",
"metadata"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array2d.py#L308-L323 | train | 211,349 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | find_flag_groups | def find_flag_groups(h5group, strict=True):
"""Returns all HDF5 Groups under the given group that contain a flag
The check is just that the sub-group has a ``'name'`` attribute, so its
not fool-proof by any means.
Parameters
----------
h5group : `h5py.Group`
the parent group in which to search
strict : `bool`, optional, default: `True`
if `True` raise an exception for any sub-group that doesn't have a
name, otherwise just return all of those that do
Raises
------
KeyError
if a sub-group doesn't have a ``'name'`` attribtue and ``strict=True``
"""
names = []
for group in h5group:
try:
names.append(h5group[group].attrs['name'])
except KeyError:
if strict:
raise
continue
return names | python | def find_flag_groups(h5group, strict=True):
"""Returns all HDF5 Groups under the given group that contain a flag
The check is just that the sub-group has a ``'name'`` attribute, so its
not fool-proof by any means.
Parameters
----------
h5group : `h5py.Group`
the parent group in which to search
strict : `bool`, optional, default: `True`
if `True` raise an exception for any sub-group that doesn't have a
name, otherwise just return all of those that do
Raises
------
KeyError
if a sub-group doesn't have a ``'name'`` attribtue and ``strict=True``
"""
names = []
for group in h5group:
try:
names.append(h5group[group].attrs['name'])
except KeyError:
if strict:
raise
continue
return names | [
"def",
"find_flag_groups",
"(",
"h5group",
",",
"strict",
"=",
"True",
")",
":",
"names",
"=",
"[",
"]",
"for",
"group",
"in",
"h5group",
":",
"try",
":",
"names",
".",
"append",
"(",
"h5group",
"[",
"group",
"]",
".",
"attrs",
"[",
"'name'",
"]",
... | Returns all HDF5 Groups under the given group that contain a flag
The check is just that the sub-group has a ``'name'`` attribute, so its
not fool-proof by any means.
Parameters
----------
h5group : `h5py.Group`
the parent group in which to search
strict : `bool`, optional, default: `True`
if `True` raise an exception for any sub-group that doesn't have a
name, otherwise just return all of those that do
Raises
------
KeyError
if a sub-group doesn't have a ``'name'`` attribtue and ``strict=True`` | [
"Returns",
"all",
"HDF5",
"Groups",
"under",
"the",
"given",
"group",
"that",
"contain",
"a",
"flag"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L47-L75 | train | 211,350 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | _is_flag_group | def _is_flag_group(obj):
"""Returns `True` if `obj` is an `h5py.Group` that looks like
if contains a flag
"""
return (
isinstance(obj, h5py.Group) and
isinstance(obj.get("active"), h5py.Dataset) and
isinstance(obj.get("known"), h5py.Dataset)
) | python | def _is_flag_group(obj):
"""Returns `True` if `obj` is an `h5py.Group` that looks like
if contains a flag
"""
return (
isinstance(obj, h5py.Group) and
isinstance(obj.get("active"), h5py.Dataset) and
isinstance(obj.get("known"), h5py.Dataset)
) | [
"def",
"_is_flag_group",
"(",
"obj",
")",
":",
"return",
"(",
"isinstance",
"(",
"obj",
",",
"h5py",
".",
"Group",
")",
"and",
"isinstance",
"(",
"obj",
".",
"get",
"(",
"\"active\"",
")",
",",
"h5py",
".",
"Dataset",
")",
"and",
"isinstance",
"(",
"... | Returns `True` if `obj` is an `h5py.Group` that looks like
if contains a flag | [
"Returns",
"True",
"if",
"obj",
"is",
"an",
"h5py",
".",
"Group",
"that",
"looks",
"like",
"if",
"contains",
"a",
"flag"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L81-L89 | train | 211,351 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | _find_flag_groups | def _find_flag_groups(h5f):
"""Return all groups in `h5f` that look like flags
"""
flag_groups = []
def _find(name, obj):
if _is_flag_group(obj):
flag_groups.append(name)
h5f.visititems(_find)
return flag_groups | python | def _find_flag_groups(h5f):
"""Return all groups in `h5f` that look like flags
"""
flag_groups = []
def _find(name, obj):
if _is_flag_group(obj):
flag_groups.append(name)
h5f.visititems(_find)
return flag_groups | [
"def",
"_find_flag_groups",
"(",
"h5f",
")",
":",
"flag_groups",
"=",
"[",
"]",
"def",
"_find",
"(",
"name",
",",
"obj",
")",
":",
"if",
"_is_flag_group",
"(",
"obj",
")",
":",
"flag_groups",
".",
"append",
"(",
"name",
")",
"h5f",
".",
"visititems",
... | Return all groups in `h5f` that look like flags | [
"Return",
"all",
"groups",
"in",
"h5f",
"that",
"look",
"like",
"flags"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L92-L102 | train | 211,352 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | _get_flag_group | def _get_flag_group(h5f, path):
"""Determine the group to use in order to read a flag
"""
# if user chose the path, just use it
if path:
return h5f[path]
# if the user gave us the group directly, use it
if _is_flag_group(h5f):
return h5f
# otherwise try and find a single group that matches
try:
path, = _find_flag_groups(h5f)
except ValueError:
pass
else:
return h5f[path]
# if not exactly 1 valid group in the file, complain
raise ValueError(
"please pass a valid HDF5 Group, or specify the HDF5 Group "
"path via the ``path=`` keyword argument",
) | python | def _get_flag_group(h5f, path):
"""Determine the group to use in order to read a flag
"""
# if user chose the path, just use it
if path:
return h5f[path]
# if the user gave us the group directly, use it
if _is_flag_group(h5f):
return h5f
# otherwise try and find a single group that matches
try:
path, = _find_flag_groups(h5f)
except ValueError:
pass
else:
return h5f[path]
# if not exactly 1 valid group in the file, complain
raise ValueError(
"please pass a valid HDF5 Group, or specify the HDF5 Group "
"path via the ``path=`` keyword argument",
) | [
"def",
"_get_flag_group",
"(",
"h5f",
",",
"path",
")",
":",
"# if user chose the path, just use it",
"if",
"path",
":",
"return",
"h5f",
"[",
"path",
"]",
"# if the user gave us the group directly, use it",
"if",
"_is_flag_group",
"(",
"h5f",
")",
":",
"return",
"h... | Determine the group to use in order to read a flag | [
"Determine",
"the",
"group",
"to",
"use",
"in",
"order",
"to",
"read",
"a",
"flag"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L105-L128 | train | 211,353 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | read_hdf5_flag | def read_hdf5_flag(h5f, path=None, gpstype=LIGOTimeGPS):
"""Read a `DataQualityFlag` object from an HDF5 file or group.
"""
# extract correct group
dataset = _get_flag_group(h5f, path)
# read dataset
active = SegmentList.read(dataset['active'], format='hdf5',
gpstype=gpstype)
try:
known = SegmentList.read(dataset['known'], format='hdf5',
gpstype=gpstype)
except KeyError as first_keyerror:
try:
known = SegmentList.read(dataset['valid'], format='hdf5',
gpstype=gpstype)
except KeyError:
raise first_keyerror
return DataQualityFlag(active=active, known=known, **dict(dataset.attrs)) | python | def read_hdf5_flag(h5f, path=None, gpstype=LIGOTimeGPS):
"""Read a `DataQualityFlag` object from an HDF5 file or group.
"""
# extract correct group
dataset = _get_flag_group(h5f, path)
# read dataset
active = SegmentList.read(dataset['active'], format='hdf5',
gpstype=gpstype)
try:
known = SegmentList.read(dataset['known'], format='hdf5',
gpstype=gpstype)
except KeyError as first_keyerror:
try:
known = SegmentList.read(dataset['valid'], format='hdf5',
gpstype=gpstype)
except KeyError:
raise first_keyerror
return DataQualityFlag(active=active, known=known, **dict(dataset.attrs)) | [
"def",
"read_hdf5_flag",
"(",
"h5f",
",",
"path",
"=",
"None",
",",
"gpstype",
"=",
"LIGOTimeGPS",
")",
":",
"# extract correct group",
"dataset",
"=",
"_get_flag_group",
"(",
"h5f",
",",
"path",
")",
"# read dataset",
"active",
"=",
"SegmentList",
".",
"read"... | Read a `DataQualityFlag` object from an HDF5 file or group. | [
"Read",
"a",
"DataQualityFlag",
"object",
"from",
"an",
"HDF5",
"file",
"or",
"group",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L132-L151 | train | 211,354 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | read_hdf5_segmentlist | def read_hdf5_segmentlist(h5f, path=None, gpstype=LIGOTimeGPS, **kwargs):
"""Read a `SegmentList` object from an HDF5 file or group.
"""
# find dataset
dataset = io_hdf5.find_dataset(h5f, path=path)
segtable = Table.read(dataset, format='hdf5', **kwargs)
out = SegmentList()
for row in segtable:
start = LIGOTimeGPS(int(row['start_time']), int(row['start_time_ns']))
end = LIGOTimeGPS(int(row['end_time']), int(row['end_time_ns']))
if gpstype is LIGOTimeGPS:
out.append(Segment(start, end))
else:
out.append(Segment(gpstype(start), gpstype(end)))
return out | python | def read_hdf5_segmentlist(h5f, path=None, gpstype=LIGOTimeGPS, **kwargs):
"""Read a `SegmentList` object from an HDF5 file or group.
"""
# find dataset
dataset = io_hdf5.find_dataset(h5f, path=path)
segtable = Table.read(dataset, format='hdf5', **kwargs)
out = SegmentList()
for row in segtable:
start = LIGOTimeGPS(int(row['start_time']), int(row['start_time_ns']))
end = LIGOTimeGPS(int(row['end_time']), int(row['end_time_ns']))
if gpstype is LIGOTimeGPS:
out.append(Segment(start, end))
else:
out.append(Segment(gpstype(start), gpstype(end)))
return out | [
"def",
"read_hdf5_segmentlist",
"(",
"h5f",
",",
"path",
"=",
"None",
",",
"gpstype",
"=",
"LIGOTimeGPS",
",",
"*",
"*",
"kwargs",
")",
":",
"# find dataset",
"dataset",
"=",
"io_hdf5",
".",
"find_dataset",
"(",
"h5f",
",",
"path",
"=",
"path",
")",
"seg... | Read a `SegmentList` object from an HDF5 file or group. | [
"Read",
"a",
"SegmentList",
"object",
"from",
"an",
"HDF5",
"file",
"or",
"group",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L155-L170 | train | 211,355 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | read_hdf5_dict | def read_hdf5_dict(h5f, names=None, path=None, on_missing='error', **kwargs):
"""Read a `DataQualityDict` from an HDF5 file
"""
if path:
h5f = h5f[path]
# allow alternative keyword argument name (FIXME)
if names is None:
names = kwargs.pop('flags', None)
# try and get list of names automatically
if names is None:
try:
names = find_flag_groups(h5f, strict=True)
except KeyError:
names = None
if not names:
raise ValueError("Failed to automatically parse available flag "
"names from HDF5, please give a list of names "
"to read via the ``names=`` keyword")
# read data
out = DataQualityDict()
for name in names:
try:
out[name] = read_hdf5_flag(h5f, name, **kwargs)
except KeyError as exc:
if on_missing == 'ignore':
pass
elif on_missing == 'warn':
warnings.warn(str(exc))
else:
raise ValueError('no H5Group found for flag '
'{0!r}'.format(name))
return out | python | def read_hdf5_dict(h5f, names=None, path=None, on_missing='error', **kwargs):
"""Read a `DataQualityDict` from an HDF5 file
"""
if path:
h5f = h5f[path]
# allow alternative keyword argument name (FIXME)
if names is None:
names = kwargs.pop('flags', None)
# try and get list of names automatically
if names is None:
try:
names = find_flag_groups(h5f, strict=True)
except KeyError:
names = None
if not names:
raise ValueError("Failed to automatically parse available flag "
"names from HDF5, please give a list of names "
"to read via the ``names=`` keyword")
# read data
out = DataQualityDict()
for name in names:
try:
out[name] = read_hdf5_flag(h5f, name, **kwargs)
except KeyError as exc:
if on_missing == 'ignore':
pass
elif on_missing == 'warn':
warnings.warn(str(exc))
else:
raise ValueError('no H5Group found for flag '
'{0!r}'.format(name))
return out | [
"def",
"read_hdf5_dict",
"(",
"h5f",
",",
"names",
"=",
"None",
",",
"path",
"=",
"None",
",",
"on_missing",
"=",
"'error'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"path",
":",
"h5f",
"=",
"h5f",
"[",
"path",
"]",
"# allow alternative keyword argument ... | Read a `DataQualityDict` from an HDF5 file | [
"Read",
"a",
"DataQualityDict",
"from",
"an",
"HDF5",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L174-L209 | train | 211,356 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | write_hdf5_flag_group | def write_hdf5_flag_group(flag, h5group, **kwargs):
"""Write a `DataQualityFlag` into the given HDF5 group
"""
# write segmentlists
flag.active.write(h5group, 'active', **kwargs)
kwargs['append'] = True
flag.known.write(h5group, 'known', **kwargs)
# store metadata
for attr in ['name', 'label', 'category', 'description', 'isgood',
'padding']:
value = getattr(flag, attr)
if value is None:
continue
elif isinstance(value, Quantity):
h5group.attrs[attr] = value.value
elif isinstance(value, UnitBase):
h5group.attrs[attr] = str(value)
else:
h5group.attrs[attr] = value
return h5group | python | def write_hdf5_flag_group(flag, h5group, **kwargs):
"""Write a `DataQualityFlag` into the given HDF5 group
"""
# write segmentlists
flag.active.write(h5group, 'active', **kwargs)
kwargs['append'] = True
flag.known.write(h5group, 'known', **kwargs)
# store metadata
for attr in ['name', 'label', 'category', 'description', 'isgood',
'padding']:
value = getattr(flag, attr)
if value is None:
continue
elif isinstance(value, Quantity):
h5group.attrs[attr] = value.value
elif isinstance(value, UnitBase):
h5group.attrs[attr] = str(value)
else:
h5group.attrs[attr] = value
return h5group | [
"def",
"write_hdf5_flag_group",
"(",
"flag",
",",
"h5group",
",",
"*",
"*",
"kwargs",
")",
":",
"# write segmentlists",
"flag",
".",
"active",
".",
"write",
"(",
"h5group",
",",
"'active'",
",",
"*",
"*",
"kwargs",
")",
"kwargs",
"[",
"'append'",
"]",
"=... | Write a `DataQualityFlag` into the given HDF5 group | [
"Write",
"a",
"DataQualityFlag",
"into",
"the",
"given",
"HDF5",
"group"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L214-L235 | train | 211,357 |
gwpy/gwpy | gwpy/segments/io/hdf5.py | write_hdf5_dict | def write_hdf5_dict(flags, output, path=None, append=False, overwrite=False,
**kwargs):
"""Write this `DataQualityFlag` to a `h5py.Group`.
This allows writing to an HDF5-format file.
Parameters
----------
output : `str`, :class:`h5py.Group`
path to new output file, or open h5py `Group` to write to.
path : `str`
the HDF5 group path in which to write a new group for this flag
**kwargs
other keyword arguments passed to :meth:`h5py.Group.create_dataset`
Returns
-------
dqfgroup : :class:`h5py.Group`
HDF group containing these data. This group contains 'active'
and 'known' datasets, and metadata attrs.
See also
--------
astropy.io
for details on acceptable keyword arguments when writing a
:class:`~astropy.table.Table` to HDF5
"""
if path:
try:
parent = output[path]
except KeyError:
parent = output.create_group(path)
else:
parent = output
for name in flags:
# handle existing group
if name in parent:
if not (overwrite and append):
raise IOError("Group '%s' already exists, give ``append=True, "
"overwrite=True`` to overwrite it"
% os.path.join(parent.name, name))
del parent[name]
# create group
group = parent.create_group(name)
# write flag
write_hdf5_flag_group(flags[name], group, **kwargs) | python | def write_hdf5_dict(flags, output, path=None, append=False, overwrite=False,
**kwargs):
"""Write this `DataQualityFlag` to a `h5py.Group`.
This allows writing to an HDF5-format file.
Parameters
----------
output : `str`, :class:`h5py.Group`
path to new output file, or open h5py `Group` to write to.
path : `str`
the HDF5 group path in which to write a new group for this flag
**kwargs
other keyword arguments passed to :meth:`h5py.Group.create_dataset`
Returns
-------
dqfgroup : :class:`h5py.Group`
HDF group containing these data. This group contains 'active'
and 'known' datasets, and metadata attrs.
See also
--------
astropy.io
for details on acceptable keyword arguments when writing a
:class:`~astropy.table.Table` to HDF5
"""
if path:
try:
parent = output[path]
except KeyError:
parent = output.create_group(path)
else:
parent = output
for name in flags:
# handle existing group
if name in parent:
if not (overwrite and append):
raise IOError("Group '%s' already exists, give ``append=True, "
"overwrite=True`` to overwrite it"
% os.path.join(parent.name, name))
del parent[name]
# create group
group = parent.create_group(name)
# write flag
write_hdf5_flag_group(flags[name], group, **kwargs) | [
"def",
"write_hdf5_dict",
"(",
"flags",
",",
"output",
",",
"path",
"=",
"None",
",",
"append",
"=",
"False",
",",
"overwrite",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"path",
":",
"try",
":",
"parent",
"=",
"output",
"[",
"path",
"]... | Write this `DataQualityFlag` to a `h5py.Group`.
This allows writing to an HDF5-format file.
Parameters
----------
output : `str`, :class:`h5py.Group`
path to new output file, or open h5py `Group` to write to.
path : `str`
the HDF5 group path in which to write a new group for this flag
**kwargs
other keyword arguments passed to :meth:`h5py.Group.create_dataset`
Returns
-------
dqfgroup : :class:`h5py.Group`
HDF group containing these data. This group contains 'active'
and 'known' datasets, and metadata attrs.
See also
--------
astropy.io
for details on acceptable keyword arguments when writing a
:class:`~astropy.table.Table` to HDF5 | [
"Write",
"this",
"DataQualityFlag",
"to",
"a",
"h5py",
".",
"Group",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L239-L287 | train | 211,358 |
gwpy/gwpy | gwpy/plot/tex.py | float_to_latex | def float_to_latex(x, format="%.2g"): # pylint: disable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string
r"""Convert a floating point number to a latex representation.
In particular, scientific notation is handled gracefully: e -> 10^
Parameters
----------
x : `float`
the number to represent
format : `str`, optional
the output string format
Returns
-------
tex : `str`
a TeX representation of the input
Examples
--------
>>> from gwpy.plot.tex import float_to_latex
>>> float_to_latex(1)
'1'
>>> float_to_latex(2000)
'2\times 10^{3}'
>>> float_to_latex(100)
'10^{2}'
>>> float_to_latex(-500)
r'-5\!\!\times\!\!10^{2}'
"""
if x == 0.:
return '0'
base_str = format % x
if "e" not in base_str:
return base_str
mantissa, exponent = base_str.split("e")
if float(mantissa).is_integer():
mantissa = int(float(mantissa))
exponent = exponent.lstrip("0+")
if exponent.startswith('-0'):
exponent = '-' + exponent[2:]
if float(mantissa) == 1.0:
return r"10^{%s}" % exponent
return r"%s\!\!\times\!\!10^{%s}" % (mantissa, exponent) | python | def float_to_latex(x, format="%.2g"): # pylint: disable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string
r"""Convert a floating point number to a latex representation.
In particular, scientific notation is handled gracefully: e -> 10^
Parameters
----------
x : `float`
the number to represent
format : `str`, optional
the output string format
Returns
-------
tex : `str`
a TeX representation of the input
Examples
--------
>>> from gwpy.plot.tex import float_to_latex
>>> float_to_latex(1)
'1'
>>> float_to_latex(2000)
'2\times 10^{3}'
>>> float_to_latex(100)
'10^{2}'
>>> float_to_latex(-500)
r'-5\!\!\times\!\!10^{2}'
"""
if x == 0.:
return '0'
base_str = format % x
if "e" not in base_str:
return base_str
mantissa, exponent = base_str.split("e")
if float(mantissa).is_integer():
mantissa = int(float(mantissa))
exponent = exponent.lstrip("0+")
if exponent.startswith('-0'):
exponent = '-' + exponent[2:]
if float(mantissa) == 1.0:
return r"10^{%s}" % exponent
return r"%s\!\!\times\!\!10^{%s}" % (mantissa, exponent) | [
"def",
"float_to_latex",
"(",
"x",
",",
"format",
"=",
"\"%.2g\"",
")",
":",
"# pylint: disable=redefined-builtin",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"x",
"==",
"0.",
":",
"return",
"'0'",
"base_str",
"=",
"format",
"%",
"x",
"if",
"\"e\"",
... | r"""Convert a floating point number to a latex representation.
In particular, scientific notation is handled gracefully: e -> 10^
Parameters
----------
x : `float`
the number to represent
format : `str`, optional
the output string format
Returns
-------
tex : `str`
a TeX representation of the input
Examples
--------
>>> from gwpy.plot.tex import float_to_latex
>>> float_to_latex(1)
'1'
>>> float_to_latex(2000)
'2\times 10^{3}'
>>> float_to_latex(100)
'10^{2}'
>>> float_to_latex(-500)
r'-5\!\!\times\!\!10^{2}' | [
"r",
"Convert",
"a",
"floating",
"point",
"number",
"to",
"a",
"latex",
"representation",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/tex.py#L65-L109 | train | 211,359 |
gwpy/gwpy | gwpy/plot/tex.py | label_to_latex | def label_to_latex(text):
# pylint: disable=anomalous-backslash-in-string
r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI'
"""
if text is None:
return ''
out = []
x = None
# loop over matches in reverse order and replace
for m in re_latex_control.finditer(text):
a, b = m.span()
char = m.group()[0]
out.append(text[x:a])
out.append(r'\%s' % char)
x = b
if not x: # no match
return text
# append prefix and return joined components
out.append(text[b:])
return ''.join(out) | python | def label_to_latex(text):
# pylint: disable=anomalous-backslash-in-string
r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI'
"""
if text is None:
return ''
out = []
x = None
# loop over matches in reverse order and replace
for m in re_latex_control.finditer(text):
a, b = m.span()
char = m.group()[0]
out.append(text[x:a])
out.append(r'\%s' % char)
x = b
if not x: # no match
return text
# append prefix and return joined components
out.append(text[b:])
return ''.join(out) | [
"def",
"label_to_latex",
"(",
"text",
")",
":",
"# pylint: disable=anomalous-backslash-in-string",
"if",
"text",
"is",
"None",
":",
"return",
"''",
"out",
"=",
"[",
"]",
"x",
"=",
"None",
"# loop over matches in reverse order and replace",
"for",
"m",
"in",
"re_late... | r"""Convert text into a latex-passable representation.
This method just escapes the following reserved LaTeX characters:
% \ _ ~ &, whilst trying to avoid doubly-escaping already escaped
characters
Parameters
----------
text : `str`
input text to convert
Returns
-------
tex : `str`
a modified version of the input text with all unescaped reserved
latex characters escaped
Examples
--------
>>> from gwpy.plot.tex import label_to_latex
>>> label_to_latex('normal text')
'normal text'
>>> label_to_latex('$1 + 2 = 3$')
'$1 + 2 = 3$'
>>> label_to_latex('H1:ABC-DEF_GHI')
'H1:ABC-DEF\\_GHI'
>>> label_to_latex('H1:ABC-DEF\_GHI')
'H1:ABC-DEF\\_GHI' | [
"r",
"Convert",
"text",
"into",
"a",
"latex",
"-",
"passable",
"representation",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/tex.py#L112-L158 | train | 211,360 |
gwpy/gwpy | gwpy/timeseries/io/cache.py | preformat_cache | def preformat_cache(cache, start=None, end=None):
"""Preprocess a `list` of file paths for reading.
- read the cache from the file (if necessary)
- sieve the cache to only include data we need
Parameters
----------
cache : `list`, `str`
List of file paths, or path to a LAL-format cache file on disk.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
Returns
-------
modcache : `list`
A parsed, sieved list of paths based on the input arguments.
"""
# open cache file
if isinstance(cache, FILE_LIKE + string_types):
return read_cache(cache, sort=file_segment,
segment=Segment(start, end))
# format existing cache file
cache = type(cache)(cache) # copy cache
# sort cache
try:
cache.sort(key=file_segment) # sort
except ValueError:
# if this failed, then the sieving will also fail, but lets proceed
# anyway, since the user didn't actually ask us to do this (but
# its a very good idea)
return cache
# sieve cache
if start is None: # start time of earliest file
start = file_segment(cache[0])[0]
if end is None: # end time of latest file
end = file_segment(cache[-1])[-1]
return sieve(cache, segment=Segment(start, end)) | python | def preformat_cache(cache, start=None, end=None):
"""Preprocess a `list` of file paths for reading.
- read the cache from the file (if necessary)
- sieve the cache to only include data we need
Parameters
----------
cache : `list`, `str`
List of file paths, or path to a LAL-format cache file on disk.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
Returns
-------
modcache : `list`
A parsed, sieved list of paths based on the input arguments.
"""
# open cache file
if isinstance(cache, FILE_LIKE + string_types):
return read_cache(cache, sort=file_segment,
segment=Segment(start, end))
# format existing cache file
cache = type(cache)(cache) # copy cache
# sort cache
try:
cache.sort(key=file_segment) # sort
except ValueError:
# if this failed, then the sieving will also fail, but lets proceed
# anyway, since the user didn't actually ask us to do this (but
# its a very good idea)
return cache
# sieve cache
if start is None: # start time of earliest file
start = file_segment(cache[0])[0]
if end is None: # end time of latest file
end = file_segment(cache[-1])[-1]
return sieve(cache, segment=Segment(start, end)) | [
"def",
"preformat_cache",
"(",
"cache",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"# open cache file",
"if",
"isinstance",
"(",
"cache",
",",
"FILE_LIKE",
"+",
"string_types",
")",
":",
"return",
"read_cache",
"(",
"cache",
",",
"sort",... | Preprocess a `list` of file paths for reading.
- read the cache from the file (if necessary)
- sieve the cache to only include data we need
Parameters
----------
cache : `list`, `str`
List of file paths, or path to a LAL-format cache file on disk.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine.
Returns
-------
modcache : `list`
A parsed, sieved list of paths based on the input arguments. | [
"Preprocess",
"a",
"list",
"of",
"file",
"paths",
"for",
"reading",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/cache.py#L30-L76 | train | 211,361 |
gwpy/gwpy | gwpy/utils/progress.py | progress_bar | def progress_bar(**kwargs):
"""Create a `tqdm.tqdm` progress bar
This is just a thin wrapper around `tqdm.tqdm` to set some updated defaults
"""
tqdm_kw = {
'desc': 'Processing',
'file': sys.stdout,
'bar_format': TQDM_BAR_FORMAT,
}
tqdm_kw.update(kwargs)
pbar = tqdm(**tqdm_kw)
if not pbar.disable:
pbar.desc = pbar.desc.rstrip(': ')
pbar.refresh()
return pbar | python | def progress_bar(**kwargs):
"""Create a `tqdm.tqdm` progress bar
This is just a thin wrapper around `tqdm.tqdm` to set some updated defaults
"""
tqdm_kw = {
'desc': 'Processing',
'file': sys.stdout,
'bar_format': TQDM_BAR_FORMAT,
}
tqdm_kw.update(kwargs)
pbar = tqdm(**tqdm_kw)
if not pbar.disable:
pbar.desc = pbar.desc.rstrip(': ')
pbar.refresh()
return pbar | [
"def",
"progress_bar",
"(",
"*",
"*",
"kwargs",
")",
":",
"tqdm_kw",
"=",
"{",
"'desc'",
":",
"'Processing'",
",",
"'file'",
":",
"sys",
".",
"stdout",
",",
"'bar_format'",
":",
"TQDM_BAR_FORMAT",
",",
"}",
"tqdm_kw",
".",
"update",
"(",
"kwargs",
")",
... | Create a `tqdm.tqdm` progress bar
This is just a thin wrapper around `tqdm.tqdm` to set some updated defaults | [
"Create",
"a",
"tqdm",
".",
"tqdm",
"progress",
"bar"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/progress.py#L31-L46 | train | 211,362 |
gwpy/gwpy | gwpy/signal/filter_design.py | num_taps | def num_taps(sample_rate, transitionwidth, gpass, gstop):
"""Returns the number of taps for an FIR filter with the given shape
Parameters
----------
sample_rate : `float`
sampling rate of target data
transitionwidth : `float`
the width (in the same units as `sample_rate` of the transition
from stop-band to pass-band
gpass : `float`
the maximum loss in the passband (dB)
gstop : `float`
the minimum attenuation in the stopband (dB)
Returns
-------
numtaps : `int`
the number of taps for an FIR filter
Notes
-----
Credit: http://dsp.stackexchange.com/a/31077/8223
"""
gpass = 10 ** (-gpass / 10.)
gstop = 10 ** (-gstop / 10.)
return int(2/3. * log10(1 / (10 * gpass * gstop)) *
sample_rate / transitionwidth) | python | def num_taps(sample_rate, transitionwidth, gpass, gstop):
"""Returns the number of taps for an FIR filter with the given shape
Parameters
----------
sample_rate : `float`
sampling rate of target data
transitionwidth : `float`
the width (in the same units as `sample_rate` of the transition
from stop-band to pass-band
gpass : `float`
the maximum loss in the passband (dB)
gstop : `float`
the minimum attenuation in the stopband (dB)
Returns
-------
numtaps : `int`
the number of taps for an FIR filter
Notes
-----
Credit: http://dsp.stackexchange.com/a/31077/8223
"""
gpass = 10 ** (-gpass / 10.)
gstop = 10 ** (-gstop / 10.)
return int(2/3. * log10(1 / (10 * gpass * gstop)) *
sample_rate / transitionwidth) | [
"def",
"num_taps",
"(",
"sample_rate",
",",
"transitionwidth",
",",
"gpass",
",",
"gstop",
")",
":",
"gpass",
"=",
"10",
"**",
"(",
"-",
"gpass",
"/",
"10.",
")",
"gstop",
"=",
"10",
"**",
"(",
"-",
"gstop",
"/",
"10.",
")",
"return",
"int",
"(",
... | Returns the number of taps for an FIR filter with the given shape
Parameters
----------
sample_rate : `float`
sampling rate of target data
transitionwidth : `float`
the width (in the same units as `sample_rate` of the transition
from stop-band to pass-band
gpass : `float`
the maximum loss in the passband (dB)
gstop : `float`
the minimum attenuation in the stopband (dB)
Returns
-------
numtaps : `int`
the number of taps for an FIR filter
Notes
-----
Credit: http://dsp.stackexchange.com/a/31077/8223 | [
"Returns",
"the",
"number",
"of",
"taps",
"for",
"an",
"FIR",
"filter",
"with",
"the",
"given",
"shape"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L102-L132 | train | 211,363 |
gwpy/gwpy | gwpy/signal/filter_design.py | is_zpk | def is_zpk(zpktup):
"""Determin whether the given tuple is a ZPK-format filter definition
Returns
-------
iszpk : `bool`
`True` if the ``zpktup`` looks like a ZPK-format filter definition,
otherwise `False`
"""
return (
isinstance(zpktup, (tuple, list)) and
len(zpktup) == 3 and
isinstance(zpktup[0], (list, tuple, numpy.ndarray)) and
isinstance(zpktup[1], (list, tuple, numpy.ndarray)) and
isinstance(zpktup[2], float)) | python | def is_zpk(zpktup):
"""Determin whether the given tuple is a ZPK-format filter definition
Returns
-------
iszpk : `bool`
`True` if the ``zpktup`` looks like a ZPK-format filter definition,
otherwise `False`
"""
return (
isinstance(zpktup, (tuple, list)) and
len(zpktup) == 3 and
isinstance(zpktup[0], (list, tuple, numpy.ndarray)) and
isinstance(zpktup[1], (list, tuple, numpy.ndarray)) and
isinstance(zpktup[2], float)) | [
"def",
"is_zpk",
"(",
"zpktup",
")",
":",
"return",
"(",
"isinstance",
"(",
"zpktup",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"zpktup",
")",
"==",
"3",
"and",
"isinstance",
"(",
"zpktup",
"[",
"0",
"]",
",",
"(",
"list",
",",... | Determin whether the given tuple is a ZPK-format filter definition
Returns
-------
iszpk : `bool`
`True` if the ``zpktup`` looks like a ZPK-format filter definition,
otherwise `False` | [
"Determin",
"whether",
"the",
"given",
"tuple",
"is",
"a",
"ZPK",
"-",
"format",
"filter",
"definition"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L135-L149 | train | 211,364 |
gwpy/gwpy | gwpy/signal/filter_design.py | truncate_transfer | def truncate_transfer(transfer, ncorner=None):
"""Smoothly zero the edges of a frequency domain transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
the smoothly truncated transfer function
Notes
-----
By default, the input transfer function will have five samples tapered
off at the left and right boundaries. If `ncorner` is not `None`, then
`ncorner` extra samples will be zeroed on the left as a hard highpass
filter.
See :func:`~gwpy.signal.window.planck` for more information.
"""
nsamp = transfer.size
ncorner = ncorner if ncorner else 0
out = transfer.copy()
out[0:ncorner] = 0
out[ncorner:nsamp] *= planck(nsamp-ncorner, nleft=5, nright=5)
return out | python | def truncate_transfer(transfer, ncorner=None):
"""Smoothly zero the edges of a frequency domain transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
the smoothly truncated transfer function
Notes
-----
By default, the input transfer function will have five samples tapered
off at the left and right boundaries. If `ncorner` is not `None`, then
`ncorner` extra samples will be zeroed on the left as a hard highpass
filter.
See :func:`~gwpy.signal.window.planck` for more information.
"""
nsamp = transfer.size
ncorner = ncorner if ncorner else 0
out = transfer.copy()
out[0:ncorner] = 0
out[ncorner:nsamp] *= planck(nsamp-ncorner, nleft=5, nright=5)
return out | [
"def",
"truncate_transfer",
"(",
"transfer",
",",
"ncorner",
"=",
"None",
")",
":",
"nsamp",
"=",
"transfer",
".",
"size",
"ncorner",
"=",
"ncorner",
"if",
"ncorner",
"else",
"0",
"out",
"=",
"transfer",
".",
"copy",
"(",
")",
"out",
"[",
"0",
":",
"... | Smoothly zero the edges of a frequency domain transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
the smoothly truncated transfer function
Notes
-----
By default, the input transfer function will have five samples tapered
off at the left and right boundaries. If `ncorner` is not `None`, then
`ncorner` extra samples will be zeroed on the left as a hard highpass
filter.
See :func:`~gwpy.signal.window.planck` for more information. | [
"Smoothly",
"zero",
"the",
"edges",
"of",
"a",
"frequency",
"domain",
"transfer",
"function"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L152-L182 | train | 211,365 |
gwpy/gwpy | gwpy/signal/filter_design.py | truncate_impulse | def truncate_impulse(impulse, ntaps, window='hanning'):
"""Smoothly truncate a time domain impulse response
Parameters
----------
impulse : `numpy.ndarray`
the impulse response to start from
ntaps : `int`
number of taps in the final filter
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
Returns
-------
out : `numpy.ndarray`
the smoothly truncated impulse response
"""
out = impulse.copy()
trunc_start = int(ntaps / 2)
trunc_stop = out.size - trunc_start
window = signal.get_window(window, ntaps)
out[0:trunc_start] *= window[trunc_start:ntaps]
out[trunc_stop:out.size] *= window[0:trunc_start]
out[trunc_start:trunc_stop] = 0
return out | python | def truncate_impulse(impulse, ntaps, window='hanning'):
"""Smoothly truncate a time domain impulse response
Parameters
----------
impulse : `numpy.ndarray`
the impulse response to start from
ntaps : `int`
number of taps in the final filter
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
Returns
-------
out : `numpy.ndarray`
the smoothly truncated impulse response
"""
out = impulse.copy()
trunc_start = int(ntaps / 2)
trunc_stop = out.size - trunc_start
window = signal.get_window(window, ntaps)
out[0:trunc_start] *= window[trunc_start:ntaps]
out[trunc_stop:out.size] *= window[0:trunc_start]
out[trunc_start:trunc_stop] = 0
return out | [
"def",
"truncate_impulse",
"(",
"impulse",
",",
"ntaps",
",",
"window",
"=",
"'hanning'",
")",
":",
"out",
"=",
"impulse",
".",
"copy",
"(",
")",
"trunc_start",
"=",
"int",
"(",
"ntaps",
"/",
"2",
")",
"trunc_stop",
"=",
"out",
".",
"size",
"-",
"tru... | Smoothly truncate a time domain impulse response
Parameters
----------
impulse : `numpy.ndarray`
the impulse response to start from
ntaps : `int`
number of taps in the final filter
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
Returns
-------
out : `numpy.ndarray`
the smoothly truncated impulse response | [
"Smoothly",
"truncate",
"a",
"time",
"domain",
"impulse",
"response"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L185-L212 | train | 211,366 |
gwpy/gwpy | gwpy/signal/filter_design.py | fir_from_transfer | def fir_from_transfer(transfer, ntaps, window='hanning', ncorner=None):
"""Design a Type II FIR filter given an arbitrary transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ntaps : `int`
number of taps in the final filter, must be an even number
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
A time domain FIR filter of length `ntaps`
Notes
-----
The final FIR filter will use `~numpy.fft.rfft` FFT normalisation.
If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed
on the left as a hard highpass filter.
See Also
--------
scipy.signal.remez
an alternative FIR filter design using the Remez exchange algorithm
"""
# truncate and highpass the transfer function
transfer = truncate_transfer(transfer, ncorner=ncorner)
# compute and truncate the impulse response
impulse = npfft.irfft(transfer)
impulse = truncate_impulse(impulse, ntaps=ntaps, window=window)
# wrap around and normalise to construct the filter
out = numpy.roll(impulse, int(ntaps/2 - 1))[0:ntaps]
return out | python | def fir_from_transfer(transfer, ntaps, window='hanning', ncorner=None):
"""Design a Type II FIR filter given an arbitrary transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ntaps : `int`
number of taps in the final filter, must be an even number
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
A time domain FIR filter of length `ntaps`
Notes
-----
The final FIR filter will use `~numpy.fft.rfft` FFT normalisation.
If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed
on the left as a hard highpass filter.
See Also
--------
scipy.signal.remez
an alternative FIR filter design using the Remez exchange algorithm
"""
# truncate and highpass the transfer function
transfer = truncate_transfer(transfer, ncorner=ncorner)
# compute and truncate the impulse response
impulse = npfft.irfft(transfer)
impulse = truncate_impulse(impulse, ntaps=ntaps, window=window)
# wrap around and normalise to construct the filter
out = numpy.roll(impulse, int(ntaps/2 - 1))[0:ntaps]
return out | [
"def",
"fir_from_transfer",
"(",
"transfer",
",",
"ntaps",
",",
"window",
"=",
"'hanning'",
",",
"ncorner",
"=",
"None",
")",
":",
"# truncate and highpass the transfer function",
"transfer",
"=",
"truncate_transfer",
"(",
"transfer",
",",
"ncorner",
"=",
"ncorner",... | Design a Type II FIR filter given an arbitrary transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ntaps : `int`
number of taps in the final filter, must be an even number
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
A time domain FIR filter of length `ntaps`
Notes
-----
The final FIR filter will use `~numpy.fft.rfft` FFT normalisation.
If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed
on the left as a hard highpass filter.
See Also
--------
scipy.signal.remez
an alternative FIR filter design using the Remez exchange algorithm | [
"Design",
"a",
"Type",
"II",
"FIR",
"filter",
"given",
"an",
"arbitrary",
"transfer",
"function"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L215-L257 | train | 211,367 |
gwpy/gwpy | gwpy/signal/filter_design.py | bilinear_zpk | def bilinear_zpk(zeros, poles, gain, fs=1.0, unit='Hz'):
"""Convert an analogue ZPK filter to digital using a bilinear transform
Parameters
----------
zeros : array-like
list of zeros
poles : array-like
list of poles
gain : `float`
filter gain
fs : `float`, `~astropy.units.Quantity`
sampling rate at which to evaluate bilinear transform, default: 1.
unit : `str`, `~astropy.units.Unit`
unit of inputs, one or 'Hz' or 'rad/s', default: ``'Hz'``
Returns
-------
zpk : `tuple`
digital version of input zpk
"""
zeros = numpy.array(zeros, dtype=float, copy=False)
zeros = zeros[numpy.isfinite(zeros)]
poles = numpy.array(poles, dtype=float, copy=False)
gain = gain
# convert from Hz to rad/s if needed
unit = Unit(unit)
if unit == Unit('Hz'):
zeros *= -2 * pi
poles *= -2 * pi
elif unit != Unit('rad/s'):
raise ValueError("zpk can only be given with unit='Hz' "
"or 'rad/s'")
# convert to Z-domain via bilinear transform
fs = 2 * Quantity(fs, 'Hz').value
dpoles = (1 + poles/fs) / (1 - poles/fs)
dzeros = (1 + zeros/fs) / (1 - zeros/fs)
dzeros = numpy.concatenate((
dzeros, -numpy.ones(len(dpoles) - len(dzeros)),
))
dgain = gain * numpy.prod(fs - zeros)/numpy.prod(fs - poles)
return dzeros, dpoles, dgain | python | def bilinear_zpk(zeros, poles, gain, fs=1.0, unit='Hz'):
"""Convert an analogue ZPK filter to digital using a bilinear transform
Parameters
----------
zeros : array-like
list of zeros
poles : array-like
list of poles
gain : `float`
filter gain
fs : `float`, `~astropy.units.Quantity`
sampling rate at which to evaluate bilinear transform, default: 1.
unit : `str`, `~astropy.units.Unit`
unit of inputs, one or 'Hz' or 'rad/s', default: ``'Hz'``
Returns
-------
zpk : `tuple`
digital version of input zpk
"""
zeros = numpy.array(zeros, dtype=float, copy=False)
zeros = zeros[numpy.isfinite(zeros)]
poles = numpy.array(poles, dtype=float, copy=False)
gain = gain
# convert from Hz to rad/s if needed
unit = Unit(unit)
if unit == Unit('Hz'):
zeros *= -2 * pi
poles *= -2 * pi
elif unit != Unit('rad/s'):
raise ValueError("zpk can only be given with unit='Hz' "
"or 'rad/s'")
# convert to Z-domain via bilinear transform
fs = 2 * Quantity(fs, 'Hz').value
dpoles = (1 + poles/fs) / (1 - poles/fs)
dzeros = (1 + zeros/fs) / (1 - zeros/fs)
dzeros = numpy.concatenate((
dzeros, -numpy.ones(len(dpoles) - len(dzeros)),
))
dgain = gain * numpy.prod(fs - zeros)/numpy.prod(fs - poles)
return dzeros, dpoles, dgain | [
"def",
"bilinear_zpk",
"(",
"zeros",
",",
"poles",
",",
"gain",
",",
"fs",
"=",
"1.0",
",",
"unit",
"=",
"'Hz'",
")",
":",
"zeros",
"=",
"numpy",
".",
"array",
"(",
"zeros",
",",
"dtype",
"=",
"float",
",",
"copy",
"=",
"False",
")",
"zeros",
"="... | Convert an analogue ZPK filter to digital using a bilinear transform
Parameters
----------
zeros : array-like
list of zeros
poles : array-like
list of poles
gain : `float`
filter gain
fs : `float`, `~astropy.units.Quantity`
sampling rate at which to evaluate bilinear transform, default: 1.
unit : `str`, `~astropy.units.Unit`
unit of inputs, one or 'Hz' or 'rad/s', default: ``'Hz'``
Returns
-------
zpk : `tuple`
digital version of input zpk | [
"Convert",
"an",
"analogue",
"ZPK",
"filter",
"to",
"digital",
"using",
"a",
"bilinear",
"transform"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L260-L307 | train | 211,368 |
gwpy/gwpy | gwpy/signal/filter_design.py | parse_filter | def parse_filter(args, analog=False, sample_rate=None):
"""Parse arbitrary input args into a TF or ZPK filter definition
Parameters
----------
args : `tuple`, `~scipy.signal.lti`
filter definition, normally just captured positional ``*args``
from a function call
analog : `bool`, optional
`True` if filter definition has analogue coefficients
sample_rate : `float`, optional
sampling frequency at which to convert analogue filter to digital
via bilinear transform, required if ``analog=True``
Returns
-------
ftype : `str`
either ``'ba'`` or ``'zpk'``
filt : `tuple`
the filter components for the returned `ftype`, either a 2-tuple
for with transfer function components, or a 3-tuple for ZPK
"""
if analog and not sample_rate:
raise ValueError("Must give sample_rate frequency to convert "
"analog filter to digital")
# unpack filter
if isinstance(args, tuple) and len(args) == 1:
# either packed defintion ((z, p, k)) or simple definition (lti,)
args = args[0]
# parse FIR filter
if isinstance(args, numpy.ndarray) and args.ndim == 1: # fir
b, a = args, [1.]
if analog:
return 'ba', signal.bilinear(b, a)
return 'ba', (b, a)
# parse IIR filter
if isinstance(args, LinearTimeInvariant):
lti = args
elif (isinstance(args, numpy.ndarray) and
args.ndim == 2 and args.shape[1] == 6):
lti = signal.lti(*signal.sos2zpk(args))
else:
lti = signal.lti(*args)
# convert to zpk format
try:
lti = lti.to_zpk()
except AttributeError: # scipy < 0.18, doesn't matter
pass
# convert to digital components
if analog:
return 'zpk', bilinear_zpk(lti.zeros, lti.poles, lti.gain,
fs=sample_rate)
# return zpk
return 'zpk', (lti.zeros, lti.poles, lti.gain) | python | def parse_filter(args, analog=False, sample_rate=None):
"""Parse arbitrary input args into a TF or ZPK filter definition
Parameters
----------
args : `tuple`, `~scipy.signal.lti`
filter definition, normally just captured positional ``*args``
from a function call
analog : `bool`, optional
`True` if filter definition has analogue coefficients
sample_rate : `float`, optional
sampling frequency at which to convert analogue filter to digital
via bilinear transform, required if ``analog=True``
Returns
-------
ftype : `str`
either ``'ba'`` or ``'zpk'``
filt : `tuple`
the filter components for the returned `ftype`, either a 2-tuple
for with transfer function components, or a 3-tuple for ZPK
"""
if analog and not sample_rate:
raise ValueError("Must give sample_rate frequency to convert "
"analog filter to digital")
# unpack filter
if isinstance(args, tuple) and len(args) == 1:
# either packed defintion ((z, p, k)) or simple definition (lti,)
args = args[0]
# parse FIR filter
if isinstance(args, numpy.ndarray) and args.ndim == 1: # fir
b, a = args, [1.]
if analog:
return 'ba', signal.bilinear(b, a)
return 'ba', (b, a)
# parse IIR filter
if isinstance(args, LinearTimeInvariant):
lti = args
elif (isinstance(args, numpy.ndarray) and
args.ndim == 2 and args.shape[1] == 6):
lti = signal.lti(*signal.sos2zpk(args))
else:
lti = signal.lti(*args)
# convert to zpk format
try:
lti = lti.to_zpk()
except AttributeError: # scipy < 0.18, doesn't matter
pass
# convert to digital components
if analog:
return 'zpk', bilinear_zpk(lti.zeros, lti.poles, lti.gain,
fs=sample_rate)
# return zpk
return 'zpk', (lti.zeros, lti.poles, lti.gain) | [
"def",
"parse_filter",
"(",
"args",
",",
"analog",
"=",
"False",
",",
"sample_rate",
"=",
"None",
")",
":",
"if",
"analog",
"and",
"not",
"sample_rate",
":",
"raise",
"ValueError",
"(",
"\"Must give sample_rate frequency to convert \"",
"\"analog filter to digital\"",... | Parse arbitrary input args into a TF or ZPK filter definition
Parameters
----------
args : `tuple`, `~scipy.signal.lti`
filter definition, normally just captured positional ``*args``
from a function call
analog : `bool`, optional
`True` if filter definition has analogue coefficients
sample_rate : `float`, optional
sampling frequency at which to convert analogue filter to digital
via bilinear transform, required if ``analog=True``
Returns
-------
ftype : `str`
either ``'ba'`` or ``'zpk'``
filt : `tuple`
the filter components for the returned `ftype`, either a 2-tuple
for with transfer function components, or a 3-tuple for ZPK | [
"Parse",
"arbitrary",
"input",
"args",
"into",
"a",
"TF",
"or",
"ZPK",
"filter",
"definition"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L310-L370 | train | 211,369 |
gwpy/gwpy | gwpy/signal/filter_design.py | lowpass | def lowpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir',
**kwargs):
"""Design a low-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of low-pass filter (Hertz)
sample_rate : `float`
sampling rate of target data (Hertz)
fstop : `float`, optional
edge-frequency of stop-band (Hertz)
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import lowpass
>>> lp = lowpass(1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(lp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
frequency = _as_float(frequency)
if fstop is None:
fstop = min(frequency * 1.5, sample_rate/2.)
if type == 'iir':
return _design_iir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir(frequency, fstop, sample_rate, gpass, gstop, **kwargs) | python | def lowpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir',
**kwargs):
"""Design a low-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of low-pass filter (Hertz)
sample_rate : `float`
sampling rate of target data (Hertz)
fstop : `float`, optional
edge-frequency of stop-band (Hertz)
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import lowpass
>>> lp = lowpass(1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(lp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
frequency = _as_float(frequency)
if fstop is None:
fstop = min(frequency * 1.5, sample_rate/2.)
if type == 'iir':
return _design_iir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir(frequency, fstop, sample_rate, gpass, gstop, **kwargs) | [
"def",
"lowpass",
"(",
"frequency",
",",
"sample_rate",
",",
"fstop",
"=",
"None",
",",
"gpass",
"=",
"2",
",",
"gstop",
"=",
"30",
",",
"type",
"=",
"'iir'",
",",
"*",
"*",
"kwargs",
")",
":",
"sample_rate",
"=",
"_as_float",
"(",
"sample_rate",
")"... | Design a low-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of low-pass filter (Hertz)
sample_rate : `float`
sampling rate of target data (Hertz)
fstop : `float`, optional
edge-frequency of stop-band (Hertz)
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import lowpass
>>> lp = lowpass(1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(lp, sample_rate=4096)
>>> plot.show() | [
"Design",
"a",
"low",
"-",
"pass",
"filter",
"for",
"the",
"given",
"cutoff",
"frequency"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L375-L434 | train | 211,370 |
gwpy/gwpy | gwpy/signal/filter_design.py | highpass | def highpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir',
**kwargs):
"""Design a high-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of high-pass filter
sample_rate : `float`
sampling rate of target data
fstop : `float`, optional
edge-frequency of stop-band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a high-pass filter at 100 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import highpass
>>> hp = highpass(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(hp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
frequency = _as_float(frequency)
if fstop is None:
fstop = frequency * 2/3.
if type == 'iir':
return _design_iir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs) | python | def highpass(frequency, sample_rate, fstop=None, gpass=2, gstop=30, type='iir',
**kwargs):
"""Design a high-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of high-pass filter
sample_rate : `float`
sampling rate of target data
fstop : `float`, optional
edge-frequency of stop-band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a high-pass filter at 100 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import highpass
>>> hp = highpass(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(hp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
frequency = _as_float(frequency)
if fstop is None:
fstop = frequency * 2/3.
if type == 'iir':
return _design_iir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir(frequency, fstop, sample_rate, gpass, gstop,
**kwargs) | [
"def",
"highpass",
"(",
"frequency",
",",
"sample_rate",
",",
"fstop",
"=",
"None",
",",
"gpass",
"=",
"2",
",",
"gstop",
"=",
"30",
",",
"type",
"=",
"'iir'",
",",
"*",
"*",
"kwargs",
")",
":",
"sample_rate",
"=",
"_as_float",
"(",
"sample_rate",
")... | Design a high-pass filter for the given cutoff frequency
Parameters
----------
frequency : `float`
corner frequency of high-pass filter
sample_rate : `float`
sampling rate of target data
fstop : `float`, optional
edge-frequency of stop-band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a high-pass filter at 100 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import highpass
>>> hp = highpass(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(hp, sample_rate=4096)
>>> plot.show() | [
"Design",
"a",
"high",
"-",
"pass",
"filter",
"for",
"the",
"given",
"cutoff",
"frequency"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L437-L497 | train | 211,371 |
gwpy/gwpy | gwpy/signal/filter_design.py | bandpass | def bandpass(flow, fhigh, sample_rate, fstop=None, gpass=2, gstop=30,
type='iir', **kwargs):
"""Design a band-pass filter for the given cutoff frequencies
Parameters
----------
flow : `float`
lower corner frequency of pass band
fhigh : `float`
upper corner frequency of pass band
sample_rate : `float`
sampling rate of target data
fstop : `tuple` of `float`, optional
`(low, high)` edge-frequencies of stop band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import bandpass
>>> bp = bandpass(100, 1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(bp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
flow = _as_float(flow)
fhigh = _as_float(fhigh)
if fstop is None:
fstop = (flow * 2/3.,
min(fhigh * 1.5, sample_rate/2.))
fstop = (_as_float(fstop[0]), _as_float(fstop[1]))
if type == 'iir':
return _design_iir((flow, fhigh), fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir((flow, fhigh), fstop, sample_rate, gpass, gstop,
pass_zero=False, **kwargs) | python | def bandpass(flow, fhigh, sample_rate, fstop=None, gpass=2, gstop=30,
type='iir', **kwargs):
"""Design a band-pass filter for the given cutoff frequencies
Parameters
----------
flow : `float`
lower corner frequency of pass band
fhigh : `float`
upper corner frequency of pass band
sample_rate : `float`
sampling rate of target data
fstop : `tuple` of `float`, optional
`(low, high)` edge-frequencies of stop band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import bandpass
>>> bp = bandpass(100, 1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(bp, sample_rate=4096)
>>> plot.show()
"""
sample_rate = _as_float(sample_rate)
flow = _as_float(flow)
fhigh = _as_float(fhigh)
if fstop is None:
fstop = (flow * 2/3.,
min(fhigh * 1.5, sample_rate/2.))
fstop = (_as_float(fstop[0]), _as_float(fstop[1]))
if type == 'iir':
return _design_iir((flow, fhigh), fstop, sample_rate, gpass, gstop,
**kwargs)
return _design_fir((flow, fhigh), fstop, sample_rate, gpass, gstop,
pass_zero=False, **kwargs) | [
"def",
"bandpass",
"(",
"flow",
",",
"fhigh",
",",
"sample_rate",
",",
"fstop",
"=",
"None",
",",
"gpass",
"=",
"2",
",",
"gstop",
"=",
"30",
",",
"type",
"=",
"'iir'",
",",
"*",
"*",
"kwargs",
")",
":",
"sample_rate",
"=",
"_as_float",
"(",
"sampl... | Design a band-pass filter for the given cutoff frequencies
Parameters
----------
flow : `float`
lower corner frequency of pass band
fhigh : `float`
upper corner frequency of pass band
sample_rate : `float`
sampling rate of target data
fstop : `tuple` of `float`, optional
`(low, high)` edge-frequencies of stop band
gpass : `float`, optional, default: 2
the maximum loss in the passband (dB)
gstop : `float`, optional, default: 30
the minimum attenuation in the stopband (dB)
type : `str`, optional, default: ``'iir'``
the filter type, either ``'iir'`` or ``'fir'``
**kwargs
other keyword arguments are passed directly to
:func:`~scipy.signal.iirdesign` or :func:`~scipy.signal.firwin`
Returns
-------
filter
the formatted filter. the output format for an IIR filter depends
on the input arguments, default is a tuple of `(zeros, poles, gain)`
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a band-pass filter for 100-1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import bandpass
>>> bp = bandpass(100, 1000, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(bp, sample_rate=4096)
>>> plot.show() | [
"Design",
"a",
"band",
"-",
"pass",
"filter",
"for",
"the",
"given",
"cutoff",
"frequencies"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L500-L566 | train | 211,372 |
gwpy/gwpy | gwpy/signal/filter_design.py | notch | def notch(frequency, sample_rate, type='iir', **kwargs):
"""Design a ZPK notch filter for the given frequency and sampling rate
Parameters
----------
frequency : `float`, `~astropy.units.Quantity`
frequency (default in Hertz) at which to apply the notch
sample_rate : `float`, `~astropy.units.Quantity`
number of samples per second for `TimeSeries` to which this notch
filter will be applied
type : `str`, optional, default: 'iir'
type of filter to apply, currently only 'iir' is supported
**kwargs
other keyword arguments to pass to `scipy.signal.iirdesign`
Returns
-------
zpk : `tuple` of `complex` or `float`
the filter components in digital zero-pole-gain format
See Also
--------
scipy.signal.iirdesign
for details on the IIR filter design method
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import notch
>>> n = notch(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(n, sample_rate=4096)
>>> plot.show()
"""
frequency = Quantity(frequency, 'Hz').value
sample_rate = Quantity(sample_rate, 'Hz').value
nyq = 0.5 * sample_rate
df = 1.0 # pylint: disable=invalid-name
df2 = 0.1
low1 = (frequency - df)/nyq
high1 = (frequency + df)/nyq
low2 = (frequency - df2)/nyq
high2 = (frequency + df2)/nyq
if type == 'iir':
kwargs.setdefault('gpass', 1)
kwargs.setdefault('gstop', 10)
kwargs.setdefault('ftype', 'ellip')
return signal.iirdesign([low1, high1], [low2, high2], output='zpk',
**kwargs)
else:
raise NotImplementedError("Generating %r notch filters has not been "
"implemented yet" % type) | python | def notch(frequency, sample_rate, type='iir', **kwargs):
"""Design a ZPK notch filter for the given frequency and sampling rate
Parameters
----------
frequency : `float`, `~astropy.units.Quantity`
frequency (default in Hertz) at which to apply the notch
sample_rate : `float`, `~astropy.units.Quantity`
number of samples per second for `TimeSeries` to which this notch
filter will be applied
type : `str`, optional, default: 'iir'
type of filter to apply, currently only 'iir' is supported
**kwargs
other keyword arguments to pass to `scipy.signal.iirdesign`
Returns
-------
zpk : `tuple` of `complex` or `float`
the filter components in digital zero-pole-gain format
See Also
--------
scipy.signal.iirdesign
for details on the IIR filter design method
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import notch
>>> n = notch(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(n, sample_rate=4096)
>>> plot.show()
"""
frequency = Quantity(frequency, 'Hz').value
sample_rate = Quantity(sample_rate, 'Hz').value
nyq = 0.5 * sample_rate
df = 1.0 # pylint: disable=invalid-name
df2 = 0.1
low1 = (frequency - df)/nyq
high1 = (frequency + df)/nyq
low2 = (frequency - df2)/nyq
high2 = (frequency + df2)/nyq
if type == 'iir':
kwargs.setdefault('gpass', 1)
kwargs.setdefault('gstop', 10)
kwargs.setdefault('ftype', 'ellip')
return signal.iirdesign([low1, high1], [low2, high2], output='zpk',
**kwargs)
else:
raise NotImplementedError("Generating %r notch filters has not been "
"implemented yet" % type) | [
"def",
"notch",
"(",
"frequency",
",",
"sample_rate",
",",
"type",
"=",
"'iir'",
",",
"*",
"*",
"kwargs",
")",
":",
"frequency",
"=",
"Quantity",
"(",
"frequency",
",",
"'Hz'",
")",
".",
"value",
"sample_rate",
"=",
"Quantity",
"(",
"sample_rate",
",",
... | Design a ZPK notch filter for the given frequency and sampling rate
Parameters
----------
frequency : `float`, `~astropy.units.Quantity`
frequency (default in Hertz) at which to apply the notch
sample_rate : `float`, `~astropy.units.Quantity`
number of samples per second for `TimeSeries` to which this notch
filter will be applied
type : `str`, optional, default: 'iir'
type of filter to apply, currently only 'iir' is supported
**kwargs
other keyword arguments to pass to `scipy.signal.iirdesign`
Returns
-------
zpk : `tuple` of `complex` or `float`
the filter components in digital zero-pole-gain format
See Also
--------
scipy.signal.iirdesign
for details on the IIR filter design method
Notes
-----
By default a digital filter is returned, meaning the zeros and poles
are given in the Z-domain in units of radians/sample.
Examples
--------
To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data:
>>> from gwpy.signal.filter_design import notch
>>> n = notch(100, 4096)
To view the filter, you can use the `~gwpy.plot.BodePlot`:
>>> from gwpy.plot import BodePlot
>>> plot = BodePlot(n, sample_rate=4096)
>>> plot.show() | [
"Design",
"a",
"ZPK",
"notch",
"filter",
"for",
"the",
"given",
"frequency",
"and",
"sampling",
"rate"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L569-L629 | train | 211,373 |
gwpy/gwpy | gwpy/signal/spectral/_scipy.py | welch | def welch(timeseries, segmentlength, noverlap=None, **kwargs):
"""Calculate a PSD of this `TimeSeries` using Welch's method.
"""
# calculate PSD
freqs, psd_ = scipy.signal.welch(
timeseries.value,
noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength,
**kwargs
)
# generate FrequencySeries and return
unit = scale_timeseries_unit(
timeseries.unit,
kwargs.get('scaling', 'density'),
)
return FrequencySeries(
psd_,
unit=unit,
frequencies=freqs,
name=timeseries.name,
epoch=timeseries.epoch,
channel=timeseries.channel,
) | python | def welch(timeseries, segmentlength, noverlap=None, **kwargs):
"""Calculate a PSD of this `TimeSeries` using Welch's method.
"""
# calculate PSD
freqs, psd_ = scipy.signal.welch(
timeseries.value,
noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength,
**kwargs
)
# generate FrequencySeries and return
unit = scale_timeseries_unit(
timeseries.unit,
kwargs.get('scaling', 'density'),
)
return FrequencySeries(
psd_,
unit=unit,
frequencies=freqs,
name=timeseries.name,
epoch=timeseries.epoch,
channel=timeseries.channel,
) | [
"def",
"welch",
"(",
"timeseries",
",",
"segmentlength",
",",
"noverlap",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# calculate PSD",
"freqs",
",",
"psd_",
"=",
"scipy",
".",
"signal",
".",
"welch",
"(",
"timeseries",
".",
"value",
",",
"noverlap"... | Calculate a PSD of this `TimeSeries` using Welch's method. | [
"Calculate",
"a",
"PSD",
"of",
"this",
"TimeSeries",
"using",
"Welch",
"s",
"method",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_scipy.py#L41-L64 | train | 211,374 |
gwpy/gwpy | gwpy/signal/spectral/_scipy.py | bartlett | def bartlett(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Bartlett's method
"""
kwargs.pop('noverlap', None)
return welch(timeseries, segmentlength, noverlap=0, **kwargs) | python | def bartlett(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Bartlett's method
"""
kwargs.pop('noverlap', None)
return welch(timeseries, segmentlength, noverlap=0, **kwargs) | [
"def",
"bartlett",
"(",
"timeseries",
",",
"segmentlength",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"pop",
"(",
"'noverlap'",
",",
"None",
")",
"return",
"welch",
"(",
"timeseries",
",",
"segmentlength",
",",
"noverlap",
"=",
"0",
",",
"*",
"... | Calculate a PSD using Bartlett's method | [
"Calculate",
"a",
"PSD",
"using",
"Bartlett",
"s",
"method"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_scipy.py#L67-L71 | train | 211,375 |
gwpy/gwpy | gwpy/signal/spectral/_scipy.py | median | def median(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Welch's method with a median average
"""
if scipy_version <= '1.1.9999':
raise ValueError(
"median average PSD estimation requires scipy >= 1.2.0",
)
kwargs.setdefault('average', 'median')
return welch(timeseries, segmentlength, **kwargs) | python | def median(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Welch's method with a median average
"""
if scipy_version <= '1.1.9999':
raise ValueError(
"median average PSD estimation requires scipy >= 1.2.0",
)
kwargs.setdefault('average', 'median')
return welch(timeseries, segmentlength, **kwargs) | [
"def",
"median",
"(",
"timeseries",
",",
"segmentlength",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"scipy_version",
"<=",
"'1.1.9999'",
":",
"raise",
"ValueError",
"(",
"\"median average PSD estimation requires scipy >= 1.2.0\"",
",",
")",
"kwargs",
".",
"setdefault... | Calculate a PSD using Welch's method with a median average | [
"Calculate",
"a",
"PSD",
"using",
"Welch",
"s",
"method",
"with",
"a",
"median",
"average"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_scipy.py#L74-L82 | train | 211,376 |
gwpy/gwpy | gwpy/signal/spectral/_scipy.py | rayleigh | def rayleigh(timeseries, segmentlength, noverlap=0):
"""Calculate a Rayleigh statistic spectrum
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
"""
stepsize = segmentlength - noverlap
if noverlap:
numsegs = 1 + int((timeseries.size - segmentlength) / float(noverlap))
else:
numsegs = int(timeseries.size // segmentlength)
tmpdata = numpy.ndarray((numsegs, int(segmentlength//2 + 1)))
for i in range(numsegs):
tmpdata[i, :] = welch(
timeseries[i*stepsize:i*stepsize+segmentlength],
segmentlength)
std = tmpdata.std(axis=0)
mean = tmpdata.mean(axis=0)
return FrequencySeries(std/mean, unit='', copy=False, f0=0,
epoch=timeseries.epoch,
df=timeseries.sample_rate.value/segmentlength,
channel=timeseries.channel,
name='Rayleigh spectrum of %s' % timeseries.name) | python | def rayleigh(timeseries, segmentlength, noverlap=0):
"""Calculate a Rayleigh statistic spectrum
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
"""
stepsize = segmentlength - noverlap
if noverlap:
numsegs = 1 + int((timeseries.size - segmentlength) / float(noverlap))
else:
numsegs = int(timeseries.size // segmentlength)
tmpdata = numpy.ndarray((numsegs, int(segmentlength//2 + 1)))
for i in range(numsegs):
tmpdata[i, :] = welch(
timeseries[i*stepsize:i*stepsize+segmentlength],
segmentlength)
std = tmpdata.std(axis=0)
mean = tmpdata.mean(axis=0)
return FrequencySeries(std/mean, unit='', copy=False, f0=0,
epoch=timeseries.epoch,
df=timeseries.sample_rate.value/segmentlength,
channel=timeseries.channel,
name='Rayleigh spectrum of %s' % timeseries.name) | [
"def",
"rayleigh",
"(",
"timeseries",
",",
"segmentlength",
",",
"noverlap",
"=",
"0",
")",
":",
"stepsize",
"=",
"segmentlength",
"-",
"noverlap",
"if",
"noverlap",
":",
"numsegs",
"=",
"1",
"+",
"int",
"(",
"(",
"timeseries",
".",
"size",
"-",
"segment... | Calculate a Rayleigh statistic spectrum
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries` | [
"Calculate",
"a",
"Rayleigh",
"statistic",
"spectrum"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_scipy.py#L95-L130 | train | 211,377 |
gwpy/gwpy | gwpy/signal/spectral/_scipy.py | csd | def csd(timeseries, other, segmentlength, noverlap=None, **kwargs):
"""Calculate the CSD of two `TimeSeries` using Welch's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
time-series of data
other : `~gwpy.timeseries.TimeSeries`
time-series of data
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
**kwargs
other keyword arguments are passed to :meth:`scipy.signal.csd`
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
scipy.signal.csd
"""
# calculate CSD
try:
freqs, csd_ = scipy.signal.csd(
timeseries.value, other.value, noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength, **kwargs)
except AttributeError as exc:
exc.args = ('{}, scipy>=0.16 is required'.format(str(exc)),)
raise
# generate FrequencySeries and return
unit = scale_timeseries_unit(timeseries.unit,
kwargs.get('scaling', 'density'))
return FrequencySeries(
csd_, unit=unit, frequencies=freqs,
name=str(timeseries.name)+'---'+str(other.name),
epoch=timeseries.epoch, channel=timeseries.channel) | python | def csd(timeseries, other, segmentlength, noverlap=None, **kwargs):
"""Calculate the CSD of two `TimeSeries` using Welch's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
time-series of data
other : `~gwpy.timeseries.TimeSeries`
time-series of data
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
**kwargs
other keyword arguments are passed to :meth:`scipy.signal.csd`
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
scipy.signal.csd
"""
# calculate CSD
try:
freqs, csd_ = scipy.signal.csd(
timeseries.value, other.value, noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength, **kwargs)
except AttributeError as exc:
exc.args = ('{}, scipy>=0.16 is required'.format(str(exc)),)
raise
# generate FrequencySeries and return
unit = scale_timeseries_unit(timeseries.unit,
kwargs.get('scaling', 'density'))
return FrequencySeries(
csd_, unit=unit, frequencies=freqs,
name=str(timeseries.name)+'---'+str(other.name),
epoch=timeseries.epoch, channel=timeseries.channel) | [
"def",
"csd",
"(",
"timeseries",
",",
"other",
",",
"segmentlength",
",",
"noverlap",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# calculate CSD",
"try",
":",
"freqs",
",",
"csd_",
"=",
"scipy",
".",
"signal",
".",
"csd",
"(",
"timeseries",
".",
... | Calculate the CSD of two `TimeSeries` using Welch's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
time-series of data
other : `~gwpy.timeseries.TimeSeries`
time-series of data
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
**kwargs
other keyword arguments are passed to :meth:`scipy.signal.csd`
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
scipy.signal.csd | [
"Calculate",
"the",
"CSD",
"of",
"two",
"TimeSeries",
"using",
"Welch",
"s",
"method"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_scipy.py#L133-L178 | train | 211,378 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.duration | def duration(self):
"""Duration of this series in seconds
:type: `~astropy.units.Quantity` scalar
"""
return units.Quantity(self.span[1] - self.span[0], self.xunit,
dtype=float) | python | def duration(self):
"""Duration of this series in seconds
:type: `~astropy.units.Quantity` scalar
"""
return units.Quantity(self.span[1] - self.span[0], self.xunit,
dtype=float) | [
"def",
"duration",
"(",
"self",
")",
":",
"return",
"units",
".",
"Quantity",
"(",
"self",
".",
"span",
"[",
"1",
"]",
"-",
"self",
".",
"span",
"[",
"0",
"]",
",",
"self",
".",
"xunit",
",",
"dtype",
"=",
"float",
")"
] | Duration of this series in seconds
:type: `~astropy.units.Quantity` scalar | [
"Duration",
"of",
"this",
"series",
"in",
"seconds"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L256-L262 | train | 211,379 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.read | def read(cls, source, *args, **kwargs):
"""Read data into a `TimeSeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the parameters
below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
name : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional
number of parallel processes to use, serial process by
default.
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
Notes
-----"""
from .io.core import read as timeseries_reader
return timeseries_reader(cls, source, *args, **kwargs) | python | def read(cls, source, *args, **kwargs):
"""Read data into a `TimeSeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the parameters
below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
name : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional
number of parallel processes to use, serial process by
default.
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
Notes
-----"""
from .io.core import read as timeseries_reader
return timeseries_reader(cls, source, *args, **kwargs) | [
"def",
"read",
"(",
"cls",
",",
"source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"io",
".",
"core",
"import",
"read",
"as",
"timeseries_reader",
"return",
"timeseries_reader",
"(",
"cls",
",",
"source",
",",
"*",
"args",
",... | Read data into a `TimeSeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the parameters
below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
name : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional
number of parallel processes to use, serial process by
default.
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
Notes
----- | [
"Read",
"data",
"into",
"a",
"TimeSeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L267-L310 | train | 211,380 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.fetch | def fetch(cls, channel, start, end, host=None, port=None, verbose=False,
connection=None, verify=False, pad=None, allow_tape=None,
scaled=None, type=None, dtype=None):
"""Fetch data from NDS
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the data channel for which to query
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
URL of NDS server to use, if blank will try any server
(in a relatively sensible order) to get the data
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `False`
check channels exist in database before asking for data
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
connection : `nds2.connection`, optional
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress, useful for debugging;
if ``verbose`` is specified as a string, this defines the
prefix for the progress meter
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type
"""
return cls.DictClass.fetch(
[channel], start, end, host=host, port=port, verbose=verbose,
connection=connection, verify=verify, pad=pad, scaled=scaled,
allow_tape=allow_tape, type=type, dtype=dtype)[str(channel)] | python | def fetch(cls, channel, start, end, host=None, port=None, verbose=False,
connection=None, verify=False, pad=None, allow_tape=None,
scaled=None, type=None, dtype=None):
"""Fetch data from NDS
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the data channel for which to query
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
URL of NDS server to use, if blank will try any server
(in a relatively sensible order) to get the data
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `False`
check channels exist in database before asking for data
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
connection : `nds2.connection`, optional
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress, useful for debugging;
if ``verbose`` is specified as a string, this defines the
prefix for the progress meter
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type
"""
return cls.DictClass.fetch(
[channel], start, end, host=host, port=port, verbose=verbose,
connection=connection, verify=verify, pad=pad, scaled=scaled,
allow_tape=allow_tape, type=type, dtype=dtype)[str(channel)] | [
"def",
"fetch",
"(",
"cls",
",",
"channel",
",",
"start",
",",
"end",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"connection",
"=",
"None",
",",
"verify",
"=",
"False",
",",
"pad",
"=",
"None",
",",
"al... | Fetch data from NDS
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the data channel for which to query
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
host : `str`, optional
URL of NDS server to use, if blank will try any server
(in a relatively sensible order) to get the data
port : `int`, optional
port number for NDS server query, must be given with `host`
verify : `bool`, optional, default: `False`
check channels exist in database before asking for data
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
connection : `nds2.connection`, optional
open NDS connection to use
verbose : `bool`, optional
print verbose output about NDS progress, useful for debugging;
if ``verbose`` is specified as a string, this defines the
prefix for the progress meter
type : `int`, optional
NDS2 channel type integer
dtype : `type`, `numpy.dtype`, `str`, optional
identifier for desired output data type | [
"Fetch",
"data",
"from",
"NDS"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L330-L379 | train | 211,381 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.fetch_open_data | def fetch_open_data(cls, ifo, start, end, sample_rate=4096,
tag=None, version=None,
format='hdf5', host=GWOSC_DEFAULT_HOST,
verbose=False, cache=None, **kwargs):
"""Fetch open-access data from the LIGO Open Science Center
Parameters
----------
ifo : `str`
the two-character prefix of the IFO in which you are interested,
e.g. `'L1'`
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
sample_rate : `float`, optional,
the sample rate of desired data; most data are stored
by LOSC at 4096 Hz, however there may be event-related
data releases with a 16384 Hz rate, default: `4096`
tag : `str`, optional
file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'``
for 'raw' calibrated data.
version : `int`, optional
version of files to download, defaults to highest discovered
version
format : `str`, optional
the data format to download and parse, default: ``'h5py'``
- ``'hdf5'``
- ``'gwf'`` - requires |LDAStools.frameCPP|_
host : `str`, optional
HTTP host name of LOSC server to access
verbose : `bool`, optional, default: `False`
print verbose output while fetching data
cache : `bool`, optional
save/read a local copy of the remote URL, default: `False`;
useful if the same remote data are to be accessed multiple times.
Set `GWPY_CACHE=1` in the environment to auto-cache.
**kwargs
any other keyword arguments are passed to the `TimeSeries.read`
method that parses the file that was downloaded
Examples
--------
>>> from gwpy.timeseries import (TimeSeries, StateVector)
>>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,
..., 3.55365541e-20, 6.33533516e-20,
7.58121195e-20]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 0.000244140625 s,
name: Strain,
channel: None)
>>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 1.0 s,
name: Data quality,
channel: None,
bits: Bits(0: data present
1: passes cbc CAT1 test
2: passes cbc CAT2 test
3: passes cbc CAT3 test
4: passes burst CAT1 test
5: passes burst CAT2 test
6: passes burst CAT3 test,
channel=None,
epoch=1126259446.0))
For the `StateVector`, the naming of the bits will be
``format``-dependent, because they are recorded differently by LOSC
in different formats.
For events published in O2 and later, LOSC typically provides
multiple data sets containing the original (``'C00'``) and cleaned
(``'CLN'``) data.
To select both data sets and plot a comparison, for example:
>>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='C00')
>>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='CLN')
>>> origasd = orig.asd(fftlength=4, overlap=2)
>>> clnasd = cln.asd(fftlength=4, overlap=2)
>>> plot = origasd.plot(label='Un-cleaned')
>>> ax = plot.gca()
>>> ax.plot(clnasd, label='Cleaned')
>>> ax.set_xlim(10, 1400)
>>> ax.set_ylim(1e-24, 1e-20)
>>> ax.legend()
>>> plot.show()
Notes
-----
`StateVector` data are not available in ``txt.gz`` format.
"""
from .io.losc import fetch_losc_data
return fetch_losc_data(ifo, start, end, sample_rate=sample_rate,
tag=tag, version=version, format=format,
verbose=verbose, cache=cache,
host=host, cls=cls, **kwargs) | python | def fetch_open_data(cls, ifo, start, end, sample_rate=4096,
tag=None, version=None,
format='hdf5', host=GWOSC_DEFAULT_HOST,
verbose=False, cache=None, **kwargs):
"""Fetch open-access data from the LIGO Open Science Center
Parameters
----------
ifo : `str`
the two-character prefix of the IFO in which you are interested,
e.g. `'L1'`
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
sample_rate : `float`, optional,
the sample rate of desired data; most data are stored
by LOSC at 4096 Hz, however there may be event-related
data releases with a 16384 Hz rate, default: `4096`
tag : `str`, optional
file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'``
for 'raw' calibrated data.
version : `int`, optional
version of files to download, defaults to highest discovered
version
format : `str`, optional
the data format to download and parse, default: ``'h5py'``
- ``'hdf5'``
- ``'gwf'`` - requires |LDAStools.frameCPP|_
host : `str`, optional
HTTP host name of LOSC server to access
verbose : `bool`, optional, default: `False`
print verbose output while fetching data
cache : `bool`, optional
save/read a local copy of the remote URL, default: `False`;
useful if the same remote data are to be accessed multiple times.
Set `GWPY_CACHE=1` in the environment to auto-cache.
**kwargs
any other keyword arguments are passed to the `TimeSeries.read`
method that parses the file that was downloaded
Examples
--------
>>> from gwpy.timeseries import (TimeSeries, StateVector)
>>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,
..., 3.55365541e-20, 6.33533516e-20,
7.58121195e-20]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 0.000244140625 s,
name: Strain,
channel: None)
>>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 1.0 s,
name: Data quality,
channel: None,
bits: Bits(0: data present
1: passes cbc CAT1 test
2: passes cbc CAT2 test
3: passes cbc CAT3 test
4: passes burst CAT1 test
5: passes burst CAT2 test
6: passes burst CAT3 test,
channel=None,
epoch=1126259446.0))
For the `StateVector`, the naming of the bits will be
``format``-dependent, because they are recorded differently by LOSC
in different formats.
For events published in O2 and later, LOSC typically provides
multiple data sets containing the original (``'C00'``) and cleaned
(``'CLN'``) data.
To select both data sets and plot a comparison, for example:
>>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='C00')
>>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='CLN')
>>> origasd = orig.asd(fftlength=4, overlap=2)
>>> clnasd = cln.asd(fftlength=4, overlap=2)
>>> plot = origasd.plot(label='Un-cleaned')
>>> ax = plot.gca()
>>> ax.plot(clnasd, label='Cleaned')
>>> ax.set_xlim(10, 1400)
>>> ax.set_ylim(1e-24, 1e-20)
>>> ax.legend()
>>> plot.show()
Notes
-----
`StateVector` data are not available in ``txt.gz`` format.
"""
from .io.losc import fetch_losc_data
return fetch_losc_data(ifo, start, end, sample_rate=sample_rate,
tag=tag, version=version, format=format,
verbose=verbose, cache=cache,
host=host, cls=cls, **kwargs) | [
"def",
"fetch_open_data",
"(",
"cls",
",",
"ifo",
",",
"start",
",",
"end",
",",
"sample_rate",
"=",
"4096",
",",
"tag",
"=",
"None",
",",
"version",
"=",
"None",
",",
"format",
"=",
"'hdf5'",
",",
"host",
"=",
"GWOSC_DEFAULT_HOST",
",",
"verbose",
"="... | Fetch open-access data from the LIGO Open Science Center
Parameters
----------
ifo : `str`
the two-character prefix of the IFO in which you are interested,
e.g. `'L1'`
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
sample_rate : `float`, optional,
the sample rate of desired data; most data are stored
by LOSC at 4096 Hz, however there may be event-related
data releases with a 16384 Hz rate, default: `4096`
tag : `str`, optional
file tag, e.g. ``'CLN'`` to select cleaned data, or ``'C00'``
for 'raw' calibrated data.
version : `int`, optional
version of files to download, defaults to highest discovered
version
format : `str`, optional
the data format to download and parse, default: ``'h5py'``
- ``'hdf5'``
- ``'gwf'`` - requires |LDAStools.frameCPP|_
host : `str`, optional
HTTP host name of LOSC server to access
verbose : `bool`, optional, default: `False`
print verbose output while fetching data
cache : `bool`, optional
save/read a local copy of the remote URL, default: `False`;
useful if the same remote data are to be accessed multiple times.
Set `GWPY_CACHE=1` in the environment to auto-cache.
**kwargs
any other keyword arguments are passed to the `TimeSeries.read`
method that parses the file that was downloaded
Examples
--------
>>> from gwpy.timeseries import (TimeSeries, StateVector)
>>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))
TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,
..., 3.55365541e-20, 6.33533516e-20,
7.58121195e-20]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 0.000244140625 s,
name: Strain,
channel: None)
>>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))
StateVector([127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127]
unit: Unit(dimensionless),
t0: 1126259446.0 s,
dt: 1.0 s,
name: Data quality,
channel: None,
bits: Bits(0: data present
1: passes cbc CAT1 test
2: passes cbc CAT2 test
3: passes cbc CAT3 test
4: passes burst CAT1 test
5: passes burst CAT2 test
6: passes burst CAT3 test,
channel=None,
epoch=1126259446.0))
For the `StateVector`, the naming of the bits will be
``format``-dependent, because they are recorded differently by LOSC
in different formats.
For events published in O2 and later, LOSC typically provides
multiple data sets containing the original (``'C00'``) and cleaned
(``'CLN'``) data.
To select both data sets and plot a comparison, for example:
>>> orig = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='C00')
>>> cln = TimeSeries.fetch_open_data('H1', 1187008870, 1187008896,
... tag='CLN')
>>> origasd = orig.asd(fftlength=4, overlap=2)
>>> clnasd = cln.asd(fftlength=4, overlap=2)
>>> plot = origasd.plot(label='Un-cleaned')
>>> ax = plot.gca()
>>> ax.plot(clnasd, label='Cleaned')
>>> ax.set_xlim(10, 1400)
>>> ax.set_ylim(1e-24, 1e-20)
>>> ax.legend()
>>> plot.show()
Notes
-----
`StateVector` data are not available in ``txt.gz`` format. | [
"Fetch",
"open",
"-",
"access",
"data",
"from",
"the",
"LIGO",
"Open",
"Science",
"Center"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L382-L498 | train | 211,382 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.find | def find(cls, channel, start, end, frametype=None, pad=None,
scaled=None, dtype=None, nproc=1, verbose=False, **readargs):
"""Find and read data from frames for a channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
frametype : `str`, optional
name of frametype in which this channel is stored, will search
for containing frame types if necessary
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
dtype : `numpy.dtype`, `str`, `type`, or `dict`
numeric data type for returned data, e.g. `numpy.float`, or
`dict` of (`channel`, `dtype`) pairs
allow_tape : `bool`, optional, default: `True`
allow reading from frame files on (slow) magnetic tape
verbose : `bool`, optional
print verbose output about read progress, if ``verbose``
is specified as a string, this defines the prefix for the
progress meter
**readargs
any other keyword arguments to be passed to `.read()`
"""
return cls.DictClass.find(
[channel], start, end,
frametype=frametype,
verbose=verbose,
pad=pad,
scaled=scaled,
dtype=dtype,
nproc=nproc,
**readargs
)[str(channel)] | python | def find(cls, channel, start, end, frametype=None, pad=None,
scaled=None, dtype=None, nproc=1, verbose=False, **readargs):
"""Find and read data from frames for a channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
frametype : `str`, optional
name of frametype in which this channel is stored, will search
for containing frame types if necessary
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
dtype : `numpy.dtype`, `str`, `type`, or `dict`
numeric data type for returned data, e.g. `numpy.float`, or
`dict` of (`channel`, `dtype`) pairs
allow_tape : `bool`, optional, default: `True`
allow reading from frame files on (slow) magnetic tape
verbose : `bool`, optional
print verbose output about read progress, if ``verbose``
is specified as a string, this defines the prefix for the
progress meter
**readargs
any other keyword arguments to be passed to `.read()`
"""
return cls.DictClass.find(
[channel], start, end,
frametype=frametype,
verbose=verbose,
pad=pad,
scaled=scaled,
dtype=dtype,
nproc=nproc,
**readargs
)[str(channel)] | [
"def",
"find",
"(",
"cls",
",",
"channel",
",",
"start",
",",
"end",
",",
"frametype",
"=",
"None",
",",
"pad",
"=",
"None",
",",
"scaled",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"nproc",
"=",
"1",
",",
"verbose",
"=",
"False",
",",
"*",
... | Find and read data from frames for a channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
frametype : `str`, optional
name of frametype in which this channel is stored, will search
for containing frame types if necessary
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
dtype : `numpy.dtype`, `str`, `type`, or `dict`
numeric data type for returned data, e.g. `numpy.float`, or
`dict` of (`channel`, `dtype`) pairs
allow_tape : `bool`, optional, default: `True`
allow reading from frame files on (slow) magnetic tape
verbose : `bool`, optional
print verbose output about read progress, if ``verbose``
is specified as a string, this defines the prefix for the
progress meter
**readargs
any other keyword arguments to be passed to `.read()` | [
"Find",
"and",
"read",
"data",
"from",
"frames",
"for",
"a",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L501-L558 | train | 211,383 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.plot | def plot(self, method='plot', figsize=(12, 4), xscale='auto-gps',
**kwargs):
"""Plot the data for this timeseries
Returns
-------
figure : `~matplotlib.figure.Figure`
the newly created figure, with populated Axes.
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
matplotlib.axes.Axes.plot
for documentation of keyword arguments used in rendering the data
"""
kwargs.update(figsize=figsize, xscale=xscale)
return super(TimeSeriesBase, self).plot(method=method, **kwargs) | python | def plot(self, method='plot', figsize=(12, 4), xscale='auto-gps',
**kwargs):
"""Plot the data for this timeseries
Returns
-------
figure : `~matplotlib.figure.Figure`
the newly created figure, with populated Axes.
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
matplotlib.axes.Axes.plot
for documentation of keyword arguments used in rendering the data
"""
kwargs.update(figsize=figsize, xscale=xscale)
return super(TimeSeriesBase, self).plot(method=method, **kwargs) | [
"def",
"plot",
"(",
"self",
",",
"method",
"=",
"'plot'",
",",
"figsize",
"=",
"(",
"12",
",",
"4",
")",
",",
"xscale",
"=",
"'auto-gps'",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"figsize",
"=",
"figsize",
",",
"xscale",
... | Plot the data for this timeseries
Returns
-------
figure : `~matplotlib.figure.Figure`
the newly created figure, with populated Axes.
See Also
--------
matplotlib.pyplot.figure
for documentation of keyword arguments used to create the
figure
matplotlib.figure.Figure.add_subplot
for documentation of keyword arguments used to create the
axes
matplotlib.axes.Axes.plot
for documentation of keyword arguments used in rendering the data | [
"Plot",
"the",
"data",
"for",
"this",
"timeseries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L627-L648 | train | 211,384 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.from_nds2_buffer | def from_nds2_buffer(cls, buffer_, scaled=None, copy=True, **metadata):
"""Construct a new series from an `nds2.buffer` object
**Requires:** |nds2|_
Parameters
----------
buffer_ : `nds2.buffer`
the input NDS2-client buffer to read
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
copy : `bool`, optional
if `True`, copy the contained data array to new to a new array
**metadata
any other metadata keyword arguments to pass to the `TimeSeries`
constructor
Returns
-------
timeseries : `TimeSeries`
a new `TimeSeries` containing the data from the `nds2.buffer`,
and the appropriate metadata
"""
# get Channel from buffer
channel = Channel.from_nds2(buffer_.channel)
# set default metadata
metadata.setdefault('channel', channel)
metadata.setdefault('epoch', LIGOTimeGPS(buffer_.gps_seconds,
buffer_.gps_nanoseconds))
metadata.setdefault('sample_rate', channel.sample_rate)
metadata.setdefault('unit', channel.unit)
metadata.setdefault('name', buffer_.name)
# unwrap data
scaled = _dynamic_scaled(scaled, channel.name)
slope = buffer_.signal_slope
offset = buffer_.signal_offset
null_scaling = slope == 1. and offset == 0.
if scaled and not null_scaling:
data = buffer_.data.copy() * slope + offset
copy = False
else:
data = buffer_.data
# construct new TimeSeries-like object
return cls(data, copy=copy, **metadata) | python | def from_nds2_buffer(cls, buffer_, scaled=None, copy=True, **metadata):
"""Construct a new series from an `nds2.buffer` object
**Requires:** |nds2|_
Parameters
----------
buffer_ : `nds2.buffer`
the input NDS2-client buffer to read
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
copy : `bool`, optional
if `True`, copy the contained data array to new to a new array
**metadata
any other metadata keyword arguments to pass to the `TimeSeries`
constructor
Returns
-------
timeseries : `TimeSeries`
a new `TimeSeries` containing the data from the `nds2.buffer`,
and the appropriate metadata
"""
# get Channel from buffer
channel = Channel.from_nds2(buffer_.channel)
# set default metadata
metadata.setdefault('channel', channel)
metadata.setdefault('epoch', LIGOTimeGPS(buffer_.gps_seconds,
buffer_.gps_nanoseconds))
metadata.setdefault('sample_rate', channel.sample_rate)
metadata.setdefault('unit', channel.unit)
metadata.setdefault('name', buffer_.name)
# unwrap data
scaled = _dynamic_scaled(scaled, channel.name)
slope = buffer_.signal_slope
offset = buffer_.signal_offset
null_scaling = slope == 1. and offset == 0.
if scaled and not null_scaling:
data = buffer_.data.copy() * slope + offset
copy = False
else:
data = buffer_.data
# construct new TimeSeries-like object
return cls(data, copy=copy, **metadata) | [
"def",
"from_nds2_buffer",
"(",
"cls",
",",
"buffer_",
",",
"scaled",
"=",
"None",
",",
"copy",
"=",
"True",
",",
"*",
"*",
"metadata",
")",
":",
"# get Channel from buffer",
"channel",
"=",
"Channel",
".",
"from_nds2",
"(",
"buffer_",
".",
"channel",
")",... | Construct a new series from an `nds2.buffer` object
**Requires:** |nds2|_
Parameters
----------
buffer_ : `nds2.buffer`
the input NDS2-client buffer to read
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect
copy : `bool`, optional
if `True`, copy the contained data array to new to a new array
**metadata
any other metadata keyword arguments to pass to the `TimeSeries`
constructor
Returns
-------
timeseries : `TimeSeries`
a new `TimeSeries` containing the data from the `nds2.buffer`,
and the appropriate metadata | [
"Construct",
"a",
"new",
"series",
"from",
"an",
"nds2",
".",
"buffer",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L651-L701 | train | 211,385 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.from_lal | def from_lal(cls, lalts, copy=True):
"""Generate a new TimeSeries from a LAL TimeSeries of any type.
"""
from ..utils.lal import from_lal_unit
try:
unit = from_lal_unit(lalts.sampleUnits)
except (TypeError, ValueError) as exc:
warnings.warn("%s, defaulting to 'dimensionless'" % str(exc))
unit = None
channel = Channel(lalts.name, sample_rate=1/lalts.deltaT, unit=unit,
dtype=lalts.data.data.dtype)
out = cls(lalts.data.data, channel=channel, t0=lalts.epoch,
dt=lalts.deltaT, unit=unit, name=lalts.name, copy=False)
if copy:
return out.copy()
return out | python | def from_lal(cls, lalts, copy=True):
"""Generate a new TimeSeries from a LAL TimeSeries of any type.
"""
from ..utils.lal import from_lal_unit
try:
unit = from_lal_unit(lalts.sampleUnits)
except (TypeError, ValueError) as exc:
warnings.warn("%s, defaulting to 'dimensionless'" % str(exc))
unit = None
channel = Channel(lalts.name, sample_rate=1/lalts.deltaT, unit=unit,
dtype=lalts.data.data.dtype)
out = cls(lalts.data.data, channel=channel, t0=lalts.epoch,
dt=lalts.deltaT, unit=unit, name=lalts.name, copy=False)
if copy:
return out.copy()
return out | [
"def",
"from_lal",
"(",
"cls",
",",
"lalts",
",",
"copy",
"=",
"True",
")",
":",
"from",
".",
".",
"utils",
".",
"lal",
"import",
"from_lal_unit",
"try",
":",
"unit",
"=",
"from_lal_unit",
"(",
"lalts",
".",
"sampleUnits",
")",
"except",
"(",
"TypeErro... | Generate a new TimeSeries from a LAL TimeSeries of any type. | [
"Generate",
"a",
"new",
"TimeSeries",
"from",
"a",
"LAL",
"TimeSeries",
"of",
"any",
"type",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L704-L719 | train | 211,386 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.to_lal | def to_lal(self):
"""Convert this `TimeSeries` into a LAL TimeSeries.
"""
import lal
from ..utils.lal import (find_typed_function, to_lal_unit)
# map unit
try:
unit = to_lal_unit(self.unit)
except ValueError as e:
warnings.warn("%s, defaulting to lal.DimensionlessUnit" % str(e))
unit = lal.DimensionlessUnit
# create TimeSeries
create = find_typed_function(self.dtype, 'Create', 'TimeSeries')
lalts = create(self.name, lal.LIGOTimeGPS(self.epoch.gps), 0,
self.dt.value, unit, self.shape[0])
lalts.data.data = self.value
return lalts | python | def to_lal(self):
"""Convert this `TimeSeries` into a LAL TimeSeries.
"""
import lal
from ..utils.lal import (find_typed_function, to_lal_unit)
# map unit
try:
unit = to_lal_unit(self.unit)
except ValueError as e:
warnings.warn("%s, defaulting to lal.DimensionlessUnit" % str(e))
unit = lal.DimensionlessUnit
# create TimeSeries
create = find_typed_function(self.dtype, 'Create', 'TimeSeries')
lalts = create(self.name, lal.LIGOTimeGPS(self.epoch.gps), 0,
self.dt.value, unit, self.shape[0])
lalts.data.data = self.value
return lalts | [
"def",
"to_lal",
"(",
"self",
")",
":",
"import",
"lal",
"from",
".",
".",
"utils",
".",
"lal",
"import",
"(",
"find_typed_function",
",",
"to_lal_unit",
")",
"# map unit",
"try",
":",
"unit",
"=",
"to_lal_unit",
"(",
"self",
".",
"unit",
")",
"except",
... | Convert this `TimeSeries` into a LAL TimeSeries. | [
"Convert",
"this",
"TimeSeries",
"into",
"a",
"LAL",
"TimeSeries",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L721-L739 | train | 211,387 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.from_pycbc | def from_pycbc(cls, pycbcseries, copy=True):
"""Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries
"""
return cls(pycbcseries.data, t0=pycbcseries.start_time,
dt=pycbcseries.delta_t, copy=copy) | python | def from_pycbc(cls, pycbcseries, copy=True):
"""Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries
"""
return cls(pycbcseries.data, t0=pycbcseries.start_time,
dt=pycbcseries.delta_t, copy=copy) | [
"def",
"from_pycbc",
"(",
"cls",
",",
"pycbcseries",
",",
"copy",
"=",
"True",
")",
":",
"return",
"cls",
"(",
"pycbcseries",
".",
"data",
",",
"t0",
"=",
"pycbcseries",
".",
"start_time",
",",
"dt",
"=",
"pycbcseries",
".",
"delta_t",
",",
"copy",
"="... | Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries | [
"Convert",
"a",
"pycbc",
".",
"types",
".",
"timeseries",
".",
"TimeSeries",
"into",
"a",
"TimeSeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L742-L759 | train | 211,388 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBase.to_pycbc | def to_pycbc(self, copy=True):
"""Convert this `TimeSeries` into a PyCBC
`~pycbc.types.timeseries.TimeSeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `~pycbc.types.timeseries.TimeSeries`
a PyCBC representation of this `TimeSeries`
"""
from pycbc import types
return types.TimeSeries(self.value,
delta_t=self.dt.to('s').value,
epoch=self.epoch.gps, copy=copy) | python | def to_pycbc(self, copy=True):
"""Convert this `TimeSeries` into a PyCBC
`~pycbc.types.timeseries.TimeSeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `~pycbc.types.timeseries.TimeSeries`
a PyCBC representation of this `TimeSeries`
"""
from pycbc import types
return types.TimeSeries(self.value,
delta_t=self.dt.to('s').value,
epoch=self.epoch.gps, copy=copy) | [
"def",
"to_pycbc",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"from",
"pycbc",
"import",
"types",
"return",
"types",
".",
"TimeSeries",
"(",
"self",
".",
"value",
",",
"delta_t",
"=",
"self",
".",
"dt",
".",
"to",
"(",
"'s'",
")",
".",
"value... | Convert this `TimeSeries` into a PyCBC
`~pycbc.types.timeseries.TimeSeries`
Parameters
----------
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `~pycbc.types.timeseries.TimeSeries`
a PyCBC representation of this `TimeSeries` | [
"Convert",
"this",
"TimeSeries",
"into",
"a",
"PyCBC",
"~pycbc",
".",
"types",
".",
"timeseries",
".",
"TimeSeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L761-L778 | train | 211,389 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBaseList.coalesce | def coalesce(self):
"""Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list.
"""
self.sort(key=lambda ts: ts.t0.value)
i = j = 0
N = len(self)
while j < N:
this = self[j]
j += 1
if j < N and this.is_contiguous(self[j]) == 1:
while j < N and this.is_contiguous(self[j]):
try:
this = self[i] = this.append(self[j])
except ValueError as exc:
if 'cannot resize this array' in str(exc):
this = this.copy()
this = self[i] = this.append(self[j])
else:
raise
j += 1
else:
self[i] = this
i += 1
del self[i:]
return self | python | def coalesce(self):
"""Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list.
"""
self.sort(key=lambda ts: ts.t0.value)
i = j = 0
N = len(self)
while j < N:
this = self[j]
j += 1
if j < N and this.is_contiguous(self[j]) == 1:
while j < N and this.is_contiguous(self[j]):
try:
this = self[i] = this.append(self[j])
except ValueError as exc:
if 'cannot resize this array' in str(exc):
this = this.copy()
this = self[i] = this.append(self[j])
else:
raise
j += 1
else:
self[i] = this
i += 1
del self[i:]
return self | [
"def",
"coalesce",
"(",
"self",
")",
":",
"self",
".",
"sort",
"(",
"key",
"=",
"lambda",
"ts",
":",
"ts",
".",
"t0",
".",
"value",
")",
"i",
"=",
"j",
"=",
"0",
"N",
"=",
"len",
"(",
"self",
")",
"while",
"j",
"<",
"N",
":",
"this",
"=",
... | Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list. | [
"Merge",
"contiguous",
"elements",
"of",
"this",
"list",
"into",
"single",
"objects"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L1557-L1583 | train | 211,390 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBaseList.join | def join(self, pad=None, gap=None):
"""Concatenate all of the elements of this list into a single object
Parameters
----------
pad : `float`, optional, default: `0.0`
value with which to pad gaps
gap : `str`, optional, default: `'raise'`
what to do if there are gaps in the data, one of
- ``'raise'`` - raise a `ValueError`
- ``'ignore'`` - remove gap and join data
- ``'pad'`` - pad gap with zeros
If `pad` is given and is not `None`, the default is ``'pad'``,
otherwise ``'raise'``.
Returns
-------
series : `gwpy.types.TimeSeriesBase` subclass
a single series containing all data from each entry in this list
See Also
--------
TimeSeries.append
for details on how the individual series are concatenated together
"""
if not self:
return self.EntryClass(numpy.empty((0,) * self.EntryClass._ndim))
self.sort(key=lambda t: t.epoch.gps)
out = self[0].copy()
for series in self[1:]:
out.append(series, gap=gap, pad=pad)
return out | python | def join(self, pad=None, gap=None):
"""Concatenate all of the elements of this list into a single object
Parameters
----------
pad : `float`, optional, default: `0.0`
value with which to pad gaps
gap : `str`, optional, default: `'raise'`
what to do if there are gaps in the data, one of
- ``'raise'`` - raise a `ValueError`
- ``'ignore'`` - remove gap and join data
- ``'pad'`` - pad gap with zeros
If `pad` is given and is not `None`, the default is ``'pad'``,
otherwise ``'raise'``.
Returns
-------
series : `gwpy.types.TimeSeriesBase` subclass
a single series containing all data from each entry in this list
See Also
--------
TimeSeries.append
for details on how the individual series are concatenated together
"""
if not self:
return self.EntryClass(numpy.empty((0,) * self.EntryClass._ndim))
self.sort(key=lambda t: t.epoch.gps)
out = self[0].copy()
for series in self[1:]:
out.append(series, gap=gap, pad=pad)
return out | [
"def",
"join",
"(",
"self",
",",
"pad",
"=",
"None",
",",
"gap",
"=",
"None",
")",
":",
"if",
"not",
"self",
":",
"return",
"self",
".",
"EntryClass",
"(",
"numpy",
".",
"empty",
"(",
"(",
"0",
",",
")",
"*",
"self",
".",
"EntryClass",
".",
"_n... | Concatenate all of the elements of this list into a single object
Parameters
----------
pad : `float`, optional, default: `0.0`
value with which to pad gaps
gap : `str`, optional, default: `'raise'`
what to do if there are gaps in the data, one of
- ``'raise'`` - raise a `ValueError`
- ``'ignore'`` - remove gap and join data
- ``'pad'`` - pad gap with zeros
If `pad` is given and is not `None`, the default is ``'pad'``,
otherwise ``'raise'``.
Returns
-------
series : `gwpy.types.TimeSeriesBase` subclass
a single series containing all data from each entry in this list
See Also
--------
TimeSeries.append
for details on how the individual series are concatenated together | [
"Concatenate",
"all",
"of",
"the",
"elements",
"of",
"this",
"list",
"into",
"a",
"single",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L1585-L1619 | train | 211,391 |
gwpy/gwpy | gwpy/timeseries/core.py | TimeSeriesBaseList.copy | def copy(self):
"""Return a copy of this list with each element copied to new memory
"""
out = type(self)()
for series in self:
out.append(series.copy())
return out | python | def copy(self):
"""Return a copy of this list with each element copied to new memory
"""
out = type(self)()
for series in self:
out.append(series.copy())
return out | [
"def",
"copy",
"(",
"self",
")",
":",
"out",
"=",
"type",
"(",
"self",
")",
"(",
")",
"for",
"series",
"in",
"self",
":",
"out",
".",
"append",
"(",
"series",
".",
"copy",
"(",
")",
")",
"return",
"out"
] | Return a copy of this list with each element copied to new memory | [
"Return",
"a",
"copy",
"of",
"this",
"list",
"with",
"each",
"element",
"copied",
"to",
"new",
"memory"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L1630-L1636 | train | 211,392 |
gwpy/gwpy | gwpy/utils/lal.py | to_lal_type_str | def to_lal_type_str(pytype):
"""Convert the input python type to a LAL type string
Examples
--------
To convert a python type:
>>> from gwpy.utils.lal import to_lal_type_str
>>> to_lal_type_str(float)
'REAL8'
To convert a `numpy.dtype`:
>>> import numpy
>>> to_lal_type_str(numpy.dtype('uint32'))
'UINT4'
To convert a LAL type code:
>>> to_lal_type_str(11)
'REAL8'
Raises
------
KeyError
if the input doesn't map to a LAL type string
"""
# noop
if pytype in LAL_TYPE_FROM_STR:
return pytype
# convert type code
if pytype in LAL_TYPE_STR:
return LAL_TYPE_STR[pytype]
# convert python type
try:
dtype = numpy.dtype(pytype)
return LAL_TYPE_STR_FROM_NUMPY[dtype.type]
except (TypeError, KeyError):
raise ValueError("Failed to map {!r} to LAL type string") | python | def to_lal_type_str(pytype):
"""Convert the input python type to a LAL type string
Examples
--------
To convert a python type:
>>> from gwpy.utils.lal import to_lal_type_str
>>> to_lal_type_str(float)
'REAL8'
To convert a `numpy.dtype`:
>>> import numpy
>>> to_lal_type_str(numpy.dtype('uint32'))
'UINT4'
To convert a LAL type code:
>>> to_lal_type_str(11)
'REAL8'
Raises
------
KeyError
if the input doesn't map to a LAL type string
"""
# noop
if pytype in LAL_TYPE_FROM_STR:
return pytype
# convert type code
if pytype in LAL_TYPE_STR:
return LAL_TYPE_STR[pytype]
# convert python type
try:
dtype = numpy.dtype(pytype)
return LAL_TYPE_STR_FROM_NUMPY[dtype.type]
except (TypeError, KeyError):
raise ValueError("Failed to map {!r} to LAL type string") | [
"def",
"to_lal_type_str",
"(",
"pytype",
")",
":",
"# noop",
"if",
"pytype",
"in",
"LAL_TYPE_FROM_STR",
":",
"return",
"pytype",
"# convert type code",
"if",
"pytype",
"in",
"LAL_TYPE_STR",
":",
"return",
"LAL_TYPE_STR",
"[",
"pytype",
"]",
"# convert python type",
... | Convert the input python type to a LAL type string
Examples
--------
To convert a python type:
>>> from gwpy.utils.lal import to_lal_type_str
>>> to_lal_type_str(float)
'REAL8'
To convert a `numpy.dtype`:
>>> import numpy
>>> to_lal_type_str(numpy.dtype('uint32'))
'UINT4'
To convert a LAL type code:
>>> to_lal_type_str(11)
'REAL8'
Raises
------
KeyError
if the input doesn't map to a LAL type string | [
"Convert",
"the",
"input",
"python",
"type",
"to",
"a",
"LAL",
"type",
"string"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L79-L119 | train | 211,393 |
gwpy/gwpy | gwpy/utils/lal.py | find_typed_function | def find_typed_function(pytype, prefix, suffix, module=lal):
"""Returns the lal method for the correct type
Parameters
----------
pytype : `type`, `numpy.dtype`
the python type, or dtype, to map
prefix : `str`
the function name prefix (before the type tag)
suffix : `str`
the function name suffix (after the type tag)
Raises
------
AttributeError
if the function is not found
Examples
--------
>>> from gwpy.utils.lal import find_typed_function
>>> find_typed_function(float, 'Create', 'Sequence')
<built-in function CreateREAL8Sequence>
"""
laltype = to_lal_type_str(pytype)
return getattr(module, '{0}{1}{2}'.format(prefix, laltype, suffix)) | python | def find_typed_function(pytype, prefix, suffix, module=lal):
"""Returns the lal method for the correct type
Parameters
----------
pytype : `type`, `numpy.dtype`
the python type, or dtype, to map
prefix : `str`
the function name prefix (before the type tag)
suffix : `str`
the function name suffix (after the type tag)
Raises
------
AttributeError
if the function is not found
Examples
--------
>>> from gwpy.utils.lal import find_typed_function
>>> find_typed_function(float, 'Create', 'Sequence')
<built-in function CreateREAL8Sequence>
"""
laltype = to_lal_type_str(pytype)
return getattr(module, '{0}{1}{2}'.format(prefix, laltype, suffix)) | [
"def",
"find_typed_function",
"(",
"pytype",
",",
"prefix",
",",
"suffix",
",",
"module",
"=",
"lal",
")",
":",
"laltype",
"=",
"to_lal_type_str",
"(",
"pytype",
")",
"return",
"getattr",
"(",
"module",
",",
"'{0}{1}{2}'",
".",
"format",
"(",
"prefix",
","... | Returns the lal method for the correct type
Parameters
----------
pytype : `type`, `numpy.dtype`
the python type, or dtype, to map
prefix : `str`
the function name prefix (before the type tag)
suffix : `str`
the function name suffix (after the type tag)
Raises
------
AttributeError
if the function is not found
Examples
--------
>>> from gwpy.utils.lal import find_typed_function
>>> find_typed_function(float, 'Create', 'Sequence')
<built-in function CreateREAL8Sequence> | [
"Returns",
"the",
"lal",
"method",
"for",
"the",
"correct",
"type"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L122-L148 | train | 211,394 |
gwpy/gwpy | gwpy/utils/lal.py | to_lal_unit | def to_lal_unit(aunit):
"""Convert the input unit into a `LALUnit`
For example::
>>> u = to_lal_unit('m**2 / kg ** 4')
>>> print(u)
m^2 kg^-4
Parameters
----------
aunit : `~astropy.units.Unit`, `str`
the input unit
Returns
-------
unit : `LALUnit`
the LALUnit representation of the input
Raises
------
ValueError
if LAL doesn't understand the base units for the input
"""
if isinstance(aunit, string_types):
aunit = units.Unit(aunit)
aunit = aunit.decompose()
lunit = lal.Unit()
for base, power in zip(aunit.bases, aunit.powers):
# try this base
try:
lalbase = LAL_UNIT_FROM_ASTROPY[base]
except KeyError:
lalbase = None
# otherwise loop through the equivalent bases
for eqbase in base.find_equivalent_units():
try:
lalbase = LAL_UNIT_FROM_ASTROPY[eqbase]
except KeyError:
continue
# if we didn't find anything, raise an exception
if lalbase is None:
raise ValueError("LAL has no unit corresponding to %r" % base)
lunit *= lalbase ** power
return lunit | python | def to_lal_unit(aunit):
"""Convert the input unit into a `LALUnit`
For example::
>>> u = to_lal_unit('m**2 / kg ** 4')
>>> print(u)
m^2 kg^-4
Parameters
----------
aunit : `~astropy.units.Unit`, `str`
the input unit
Returns
-------
unit : `LALUnit`
the LALUnit representation of the input
Raises
------
ValueError
if LAL doesn't understand the base units for the input
"""
if isinstance(aunit, string_types):
aunit = units.Unit(aunit)
aunit = aunit.decompose()
lunit = lal.Unit()
for base, power in zip(aunit.bases, aunit.powers):
# try this base
try:
lalbase = LAL_UNIT_FROM_ASTROPY[base]
except KeyError:
lalbase = None
# otherwise loop through the equivalent bases
for eqbase in base.find_equivalent_units():
try:
lalbase = LAL_UNIT_FROM_ASTROPY[eqbase]
except KeyError:
continue
# if we didn't find anything, raise an exception
if lalbase is None:
raise ValueError("LAL has no unit corresponding to %r" % base)
lunit *= lalbase ** power
return lunit | [
"def",
"to_lal_unit",
"(",
"aunit",
")",
":",
"if",
"isinstance",
"(",
"aunit",
",",
"string_types",
")",
":",
"aunit",
"=",
"units",
".",
"Unit",
"(",
"aunit",
")",
"aunit",
"=",
"aunit",
".",
"decompose",
"(",
")",
"lunit",
"=",
"lal",
".",
"Unit",... | Convert the input unit into a `LALUnit`
For example::
>>> u = to_lal_unit('m**2 / kg ** 4')
>>> print(u)
m^2 kg^-4
Parameters
----------
aunit : `~astropy.units.Unit`, `str`
the input unit
Returns
-------
unit : `LALUnit`
the LALUnit representation of the input
Raises
------
ValueError
if LAL doesn't understand the base units for the input | [
"Convert",
"the",
"input",
"unit",
"into",
"a",
"LALUnit"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L165-L209 | train | 211,395 |
gwpy/gwpy | gwpy/utils/lal.py | from_lal_unit | def from_lal_unit(lunit):
"""Convert a LALUnit` into a `~astropy.units.Unit`
Parameters
----------
lunit : `lal.Unit`
the input unit
Returns
-------
unit : `~astropy.units.Unit`
the Astropy representation of the input
Raises
------
TypeError
if ``lunit`` cannot be converted to `lal.Unit`
ValueError
if Astropy doesn't understand the base units for the input
"""
return reduce(operator.mul, (
units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for
i, exp in enumerate(lunit.unitNumerator))) | python | def from_lal_unit(lunit):
"""Convert a LALUnit` into a `~astropy.units.Unit`
Parameters
----------
lunit : `lal.Unit`
the input unit
Returns
-------
unit : `~astropy.units.Unit`
the Astropy representation of the input
Raises
------
TypeError
if ``lunit`` cannot be converted to `lal.Unit`
ValueError
if Astropy doesn't understand the base units for the input
"""
return reduce(operator.mul, (
units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for
i, exp in enumerate(lunit.unitNumerator))) | [
"def",
"from_lal_unit",
"(",
"lunit",
")",
":",
"return",
"reduce",
"(",
"operator",
".",
"mul",
",",
"(",
"units",
".",
"Unit",
"(",
"str",
"(",
"LAL_UNIT_INDEX",
"[",
"i",
"]",
")",
")",
"**",
"exp",
"for",
"i",
",",
"exp",
"in",
"enumerate",
"("... | Convert a LALUnit` into a `~astropy.units.Unit`
Parameters
----------
lunit : `lal.Unit`
the input unit
Returns
-------
unit : `~astropy.units.Unit`
the Astropy representation of the input
Raises
------
TypeError
if ``lunit`` cannot be converted to `lal.Unit`
ValueError
if Astropy doesn't understand the base units for the input | [
"Convert",
"a",
"LALUnit",
"into",
"a",
"~astropy",
".",
"units",
".",
"Unit"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L212-L234 | train | 211,396 |
gwpy/gwpy | gwpy/utils/lal.py | to_lal_ligotimegps | def to_lal_ligotimegps(gps):
"""Convert the given GPS time to a `lal.LIGOTimeGPS` object
Parameters
----------
gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps`
Returns
-------
ligotimegps : `lal.LIGOTimeGPS`
a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time
"""
gps = to_gps(gps)
return lal.LIGOTimeGPS(gps.gpsSeconds, gps.gpsNanoSeconds) | python | def to_lal_ligotimegps(gps):
"""Convert the given GPS time to a `lal.LIGOTimeGPS` object
Parameters
----------
gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps`
Returns
-------
ligotimegps : `lal.LIGOTimeGPS`
a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time
"""
gps = to_gps(gps)
return lal.LIGOTimeGPS(gps.gpsSeconds, gps.gpsNanoSeconds) | [
"def",
"to_lal_ligotimegps",
"(",
"gps",
")",
":",
"gps",
"=",
"to_gps",
"(",
"gps",
")",
"return",
"lal",
".",
"LIGOTimeGPS",
"(",
"gps",
".",
"gpsSeconds",
",",
"gps",
".",
"gpsNanoSeconds",
")"
] | Convert the given GPS time to a `lal.LIGOTimeGPS` object
Parameters
----------
gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps`
Returns
-------
ligotimegps : `lal.LIGOTimeGPS`
a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time | [
"Convert",
"the",
"given",
"GPS",
"time",
"to",
"a",
"lal",
".",
"LIGOTimeGPS",
"object"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L237-L251 | train | 211,397 |
gwpy/gwpy | gwpy/table/io/ligolw.py | _get_property_columns | def _get_property_columns(tabletype, columns):
"""Returns list of GPS columns required to read gpsproperties for a table
Examples
--------
>>> _get_property_columns(lsctables.SnglBurstTable, ['peak'])
['peak_time', 'peak_time_ns']
"""
from ligo.lw.lsctables import gpsproperty as GpsProperty
# get properties for row object
rowvars = vars(tabletype.RowType)
# build list of real column names for fancy properties
extracols = {}
for key in columns:
prop = rowvars[key]
if isinstance(prop, GpsProperty):
extracols[key] = (prop.s_name, prop.ns_name)
return extracols | python | def _get_property_columns(tabletype, columns):
"""Returns list of GPS columns required to read gpsproperties for a table
Examples
--------
>>> _get_property_columns(lsctables.SnglBurstTable, ['peak'])
['peak_time', 'peak_time_ns']
"""
from ligo.lw.lsctables import gpsproperty as GpsProperty
# get properties for row object
rowvars = vars(tabletype.RowType)
# build list of real column names for fancy properties
extracols = {}
for key in columns:
prop = rowvars[key]
if isinstance(prop, GpsProperty):
extracols[key] = (prop.s_name, prop.ns_name)
return extracols | [
"def",
"_get_property_columns",
"(",
"tabletype",
",",
"columns",
")",
":",
"from",
"ligo",
".",
"lw",
".",
"lsctables",
"import",
"gpsproperty",
"as",
"GpsProperty",
"# get properties for row object",
"rowvars",
"=",
"vars",
"(",
"tabletype",
".",
"RowType",
")",... | Returns list of GPS columns required to read gpsproperties for a table
Examples
--------
>>> _get_property_columns(lsctables.SnglBurstTable, ['peak'])
['peak_time', 'peak_time_ns'] | [
"Returns",
"list",
"of",
"GPS",
"columns",
"required",
"to",
"read",
"gpsproperties",
"for",
"a",
"table"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/ligolw.py#L79-L96 | train | 211,398 |
gwpy/gwpy | gwpy/table/io/ligolw.py | _get_column_dtype | def _get_column_dtype(llwcol):
"""Get the data type of a LIGO_LW `Column`
Parameters
----------
llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable
a LIGO_LW column, a numpy array, or an iterable
Returns
-------
dtype : `type`, None
the object data type for values in the given column, `None` is
returned if ``llwcol`` is a `numpy.ndarray` with `numpy.object_`
dtype, or no data type can be parsed (e.g. empty list)
"""
try: # maybe its a numpy array already!
dtype = llwcol.dtype
if dtype is numpy.dtype('O'): # don't convert
raise AttributeError
return dtype
except AttributeError: # dang
try: # ligo.lw.table.Column
llwtype = llwcol.parentNode.validcolumns[llwcol.Name]
except AttributeError: # not a column
try:
return type(llwcol[0])
except IndexError:
return None
else: # map column type str to python type
from ligo.lw.types import (ToPyType, ToNumPyType)
try:
return ToNumPyType[llwtype]
except KeyError:
return ToPyType[llwtype] | python | def _get_column_dtype(llwcol):
"""Get the data type of a LIGO_LW `Column`
Parameters
----------
llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable
a LIGO_LW column, a numpy array, or an iterable
Returns
-------
dtype : `type`, None
the object data type for values in the given column, `None` is
returned if ``llwcol`` is a `numpy.ndarray` with `numpy.object_`
dtype, or no data type can be parsed (e.g. empty list)
"""
try: # maybe its a numpy array already!
dtype = llwcol.dtype
if dtype is numpy.dtype('O'): # don't convert
raise AttributeError
return dtype
except AttributeError: # dang
try: # ligo.lw.table.Column
llwtype = llwcol.parentNode.validcolumns[llwcol.Name]
except AttributeError: # not a column
try:
return type(llwcol[0])
except IndexError:
return None
else: # map column type str to python type
from ligo.lw.types import (ToPyType, ToNumPyType)
try:
return ToNumPyType[llwtype]
except KeyError:
return ToPyType[llwtype] | [
"def",
"_get_column_dtype",
"(",
"llwcol",
")",
":",
"try",
":",
"# maybe its a numpy array already!",
"dtype",
"=",
"llwcol",
".",
"dtype",
"if",
"dtype",
"is",
"numpy",
".",
"dtype",
"(",
"'O'",
")",
":",
"# don't convert",
"raise",
"AttributeError",
"return",... | Get the data type of a LIGO_LW `Column`
Parameters
----------
llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable
a LIGO_LW column, a numpy array, or an iterable
Returns
-------
dtype : `type`, None
the object data type for values in the given column, `None` is
returned if ``llwcol`` is a `numpy.ndarray` with `numpy.object_`
dtype, or no data type can be parsed (e.g. empty list) | [
"Get",
"the",
"data",
"type",
"of",
"a",
"LIGO_LW",
"Column"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/ligolw.py#L231-L264 | train | 211,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.