code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from ligo.lw.lsctables import use_in class _ContentHandler(parent): # pylint: disable=too-few-public-methods def __init__(self, document): super(_ContentHandler, self).__init__(document, filter_func) return use_in(_ContentHandler)
def build_content_handler(parent, filter_func)
Build a `~xml.sax.handler.ContentHandler` with a given filter
7.30243
8.146453
0.896394
from ligo.lw.ligolw import Document from ligo.lw import types from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_url, ligolw_add) # mock ToPyType to link to numpy dtypes topytype = types.ToPyType.copy() for key in types.ToPyType: if key in types.ToNumPyType: ...
def read_ligolw(source, contenthandler=LIGOLWContentHandler, **kwargs)
Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file` the open file or file path to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading file...
5.24396
5.085704
1.031118
def decorator(func_): # pylint: disable=missing-docstring @wraps(func_) def decorated_func(source, *args, **kwargs): # pylint: disable=missing-docstring from ligo.lw.ligolw import Document from glue.ligolw.ligolw import Document as GlueDocument ...
def with_read_ligolw(func=None, contenthandler=None)
Decorate a LIGO_LW-reading function to open a filepath if needed ``func`` should be written to presume a :class:`~ligo.lw.ligolw.Document` as the first positional argument
2.900886
2.825028
1.026852
from ligo.lw.ligolw import Document from ligo.lw import (table, lsctables) # get ilwdchar_compat to pass to read_ligolw() if Document.__module__.startswith("glue"): kwargs["ilwdchar_compat"] = True # get content handler to read only this table (if given) if tablename is not None: ...
def read_table(source, tablename=None, columns=None, contenthandler=None, **kwargs)
Read a :class:`~ligo.lw.table.Table` from one or more LIGO_LW files Parameters ---------- source : `Document`, `file`, `str`, `CacheEntry`, `list` object representing one or more files. One of - a LIGO_LW :class:`~ligo.lw.ligolw.Document` - an open `file` - a `str` pointing...
4.760512
4.410399
1.079383
from ligo.lw.ligolw import (Document, LIGOLWContentHandler) from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_filename, load_fileobj) use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler)) try: # try and load existing file if isinstance(fobj, string_type...
def open_xmldoc(fobj, **kwargs)
Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileob...
6.873064
5.32017
1.291888
from ligo.lw.ligolw import LIGO_LW try: from glue.ligolw.ligolw import LIGO_LW as LIGO_LW2 except ImportError: ligolw_types = (LIGO_LW,) else: ligolw_types = (LIGO_LW, LIGO_LW2) if isinstance(xmldoc, ligolw_types): return xmldoc else: for node in xml...
def get_ligolw_element(xmldoc)
Find an existing <LIGO_LW> element in this XML Document
3.041781
2.534585
1.20011
from ligo.lw.ligolw import LIGO_LW from ligo.lw import lsctables # find or create LIGO_LW tag try: llw = get_ligolw_element(xmldoc) except ValueError: llw = LIGO_LW() xmldoc.appendChild(llw) for table in tables: try: # append new data to existing table ...
def write_tables_to_document(xmldoc, tables, overwrite=False)
Write the given LIGO_LW table into a :class:`Document` Parameters ---------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document to write into tables : `list` of :class:`~ligo.lw.table.Table` the set of tables to write overwrite : `bool`, optional, default: `False` if ...
4.896252
4.035317
1.21335
from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc...
def write_tables(target, tables, append=False, overwrite=False, **kwargs)
Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `T...
4.369182
3.855838
1.133134
# pylint: disable=line-too-long # noqa: E501 try: from ligo.lw.ligolw import (Document, Stream) except ImportError: # no python-ligo-lw from glue.ligolw.ligolw import Document, Stream # read file object if isinstance(source, Document): xmldoc = source else: ...
def list_tables(source)
List the names of all tables in this file(s) Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document`s Examples -------- >>> from gwpy.io.ligolw import list_tables >>> print(list_tables('H1-LDA...
7.195781
6.210495
1.158649
from ligo.lw.types import ( ToNumPyType as numpytypes, ToPyType as pytypes, ) # if nothing to do... if val is None or colname not in cls.validcolumns: return val llwtype = cls.validcolumns[colname] # don't mess with formatted IlwdChar if llwtype == 'ilwd:char'...
def to_table_type(val, cls, colname)
Cast a value to the correct type for inclusion in a LIGO_LW table This method returns the input unmodified if a type mapping for ``colname`` isn't found. Parameters ---------- val : `object` The input object to convert, of any type cls : `type`, subclass of :class:`~ligo.lw.table.Tabl...
10.940473
7.859859
1.391943
# pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: line1 = fileobj.readline().lower() line2 = fileobj.readline().lower() try: return (line1.startswith(XML_SIGNATURE) and ...
def is_ligolw(origin, filepath, fileobj, *args, **kwargs)
Identify a file object as LIGO_LW-format XML
2.766265
2.684192
1.030576
# pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: sig = fileobj.read(5).lower() return sig == XML_SIGNATURE finally: fileobj.seek(loc) elif filepath is not None: return filepath...
def is_xml(origin, filepath, fileobj, *args, **kwargs)
Identify a file object as XML (any format)
3.0001
2.942163
1.019692
# read data kwargs.setdefault('array_type', TimeSeries) series = read_hdf5_array(h5f, path=path, **kwargs) # crop if needed if start is not None or end is not None: return series.crop(start, end) return series
def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs)
Read a `TimeSeries` from HDF5
3.468158
3.816686
0.908683
# find group from which to read if group: h5g = h5f[group] else: h5g = h5f # find list of names to read if names is None: names = [key for key in h5g if _is_timeseries_dataset(h5g[key])] # read names out = kwargs.pop('dict_type', TimeSeriesDict)() kwargs.se...
def read_hdf5_dict(h5f, names=None, group=None, **kwargs)
Read a `TimeSeriesDict` from HDF5
3.747495
3.474671
1.078518
# create group if needed if group and group not in h5f: h5g = h5f.create_group(group) elif group: h5g = h5f[group] else: h5g = h5f # write each timeseries kwargs.setdefault('format', 'hdf5') for key, series in tsdict.items(): series.write(h5g, path=str(k...
def write_hdf5_dict(tsdict, h5f, group=None, **kwargs)
Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group
2.682446
2.710033
0.98982
from pycbc.psd import welch as pycbc_welch # default to 'standard' welch kwargs.setdefault('avg_method', 'mean') # get scheme if scheme is None: scheme = null_context() # generate pycbc FrequencySeries with scheme: pycbc_fseries = pycbc_welch(timeseries.to_pycbc(copy=...
def welch(timeseries, segmentlength, noverlap=None, scheme=None, **kwargs)
Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, def...
5.212617
5.113805
1.019323
idx = int((n//10 % 10 != 1) * (n % 10 < 4) * n % 10) return '{}{}'.format(n, "tsnrhtdd"[idx::4])
def _ordinal(n)
Returns the ordinal string for a given integer See https://stackoverflow.com/a/20007730/1307974 Parameters ---------- n : `int` the number to convert to ordinal Examples -------- >>> _ordinal(11) '11th' >>> _ordinal(102) '102nd'
4.774713
6.588858
0.724665
if isinstance(operand, string_types): if operand == 'mean': operand = self.mean(axis=0) elif operand == 'median': operand = self.median(axis=0) else: raise ValueError("operand %r unrecognised, please give a " ...
def ratio(self, operand)
Calculate the ratio of this `Spectrogram` against a reference Parameters ---------- operand : `str`, `FrequencySeries`, `Quantity` a `~gwpy.frequencyseries.FrequencySeries` or `~astropy.units.Quantity` to weight against, or one of - ``'mean'`` : weight again...
3.825184
3.853513
0.992649
if 'imshow' in kwargs: warnings.warn('the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead', DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else ...
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs)
Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data ...
4.721219
4.851197
0.973207
data = numpy.vstack([s.value for s in spectra]) spec1 = list(spectra)[0] if not all(s.f0 == spec1.f0 for s in spectra): raise ValueError("Cannot stack spectra with different f0") if not all(s.df == spec1.df for s in spectra): raise ValueError("Cannot stac...
def from_spectra(cls, *spectra, **kwargs)
Build a new `Spectrogram` from a list of spectra. Parameters ---------- *spectra any number of `~gwpy.frequencyseries.FrequencySeries` series dt : `float`, `~astropy.units.Quantity`, optional stride between given spectra Returns ------- S...
2.856874
2.724212
1.048698
out = scipy.percentile(self.value, percentile, axis=0) if self.name is not None: name = '{}: {} percentile'.format(self.name, _ordinal(percentile)) else: name = None return FrequencySeries(out, epoch=self.epoch, channel=self.channel, ...
def percentile(self, percentile)
Calculate a given spectral percentile for this `Spectrogram`. Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `Frequenc...
4.953671
4.642341
1.067063
from ..frequencyseries import SpectralVariance return SpectralVariance.from_spectrogram( self, bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
def variance(self, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False)
Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, ...
3.748318
3.73703
1.003021
if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and l...
def crop_frequencies(self, low=None, high=None, copy=False)
Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of t...
2.283895
2.415763
0.945414
for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
def find_mappable(*axes)
Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable
6.467548
6.67251
0.969283
@wraps(identifier) def decorated_func(origin, filepath, fileobj, *args, **kwargs): # pylint: disable=missing-docstring try: filepath = file_list(filepath)[0] except ValueError: if filepath is None: try: files = file_list(ar...
def identify_with_list(identifier)
Decorate an I/O identifier to handle a list of files as input This function tries to resolve a single file path as a `str` from any file-like or collection-of-file-likes to pass to the underlying identifier for comparison.
3.333165
2.969064
1.122632
ctx = None if isinstance(source, FILE_LIKE): fileobj = source filepath = source.name if hasattr(source, 'name') else None else: filepath = source try: ctx = get_readable_fileobj(filepath, encoding='binary') fileobj = ctx.__enter__() # pylint: dis...
def get_read_format(cls, source, args, kwargs)
Determine the read format for a given input source
3.157178
3.137054
1.006415
fsamp, arr = wavfile.read(fobj, **kwargs) return TimeSeries(arr, sample_rate=fsamp)
def read(fobj, **kwargs)
Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the...
6.504201
11.444007
0.56835
fsamp = int(series.sample_rate.decompose().value) if scale is None: scale = 1 / numpy.abs(series.value).max() data = (series.value * scale).astype('float32') return wavfile.write(output, fsamp, data)
def write(series, output, scale=None)
Write a `TimeSeries` to a WAV file Parameters ---------- series : `TimeSeries` the series to write output : `file`, `str` the file object or filename to write to scale : `float`, optional the factor to apply to scale the data to (-1.0, 1.0), pass `scale=1` to not a...
4.174592
4.801814
0.869378
# pylint: disable=unused-argument if origin == 'read' and fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: riff, _, fmt = struct.unpack('<4sI4s', fileobj.read(12)) if isinstance(riff, bytes): riff = riff.decode('utf-8') ...
def is_wav(origin, filepath, fileobj, *args, **kwargs)
Identify a file as WAV See `astropy.io.registry` for details on how this function is used.
2.454288
2.639162
0.92995
# remove any surrounding quotes value = QUOTE_REGEX.sub('', value) try: # attempt `float()` conversion return float(value) except ValueError: # just return the input return value
def _float_or_str(value)
Internal method to attempt `float(value)` handling a `ValueError`
7.352807
5.956891
1.234336
# noqa # parse definition into parts (skipping null tokens) parts = list(generate_tokens(StringIO(definition.strip()).readline)) while parts[-1][0] in (token.ENDMARKER, token.NEWLINE): parts = parts[:-1] # parse simple definition: e.g: snr > 5 if len(parts) == 3: a, b, c = par...
def parse_column_filter(definition)
Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- ...
2.868086
3.119291
0.919467
# noqa: E501 fltrs = [] for def_ in _flatten(definitions): if is_filter_tuple(def_): fltrs.append(def_) else: for splitdef in DELIM_REGEX.split(def_)[::2]: fltrs.extend(parse_column_filter(splitdef)) return fltrs
def parse_column_filters(*definitions)
Parse multiple compound column filter definitions Examples -------- >>> parse_column_filters('snr > 10', 'frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] >>> parse_column_filters('snr > 10 && frequency < 1000') [('snr', <function operator....
4.312211
4.624378
0.932495
if isinstance(container, string_types): container = [container] for elem in container: if isinstance(elem, string_types) or is_filter_tuple(elem): yield elem else: for elem2 in _flatten(elem): yield elem2
def _flatten(container)
Flatten arbitrary nested list of filters into a 1-D list
2.873858
2.52672
1.137387
return isinstance(tup, (tuple, list)) and ( len(tup) == 3 and isinstance(tup[0], string_types) and callable(tup[1]))
def is_filter_tuple(tup)
Return whether a `tuple` matches the format for a column filter
2.961611
3.029035
0.977741
keep = numpy.ones(len(table), dtype=bool) for name, op_func, operand in parse_column_filters(*column_filters): col = table[name].view(numpy.ndarray) keep &= op_func(col, operand) return table[keep]
def filter_table(table, *column_filters)
Apply one or more column slice filters to a `Table` Multiple column filters can be given, and will be applied concurrently Parameters ---------- table : `~astropy.table.Table` the table to filter column_filter : `str`, `tuple` a column slice filter definition, in one of two fo...
3.658295
4.834524
0.756702
dataset = io_hdf5.find_dataset(source, path=path) attrs = dict(dataset.attrs) # unpickle channel object try: attrs['channel'] = _unpickle_channel(attrs['channel']) except KeyError: # no channel stored pass # unpack byte strings for python3 for key in attrs: if i...
def read_hdf5_array(source, path=None, array_type=Array)
Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type
3.787569
4.581798
0.826656
try: return pickle.loads(raw) except (ValueError, pickle.UnpicklingError, EOFError, TypeError, IndexError) as exc: # maybe not pickled if isinstance(raw, bytes): raw = raw.decode('utf-8') try: # test if this is a valid channel name Channe...
def _unpickle_channel(raw)
Try and unpickle a channel with sensible error handling
4.565801
4.348591
1.049949
if (value is None or (isinstance(value, Index) and value.regular)): raise IgnoredAttribute # map type to something HDF5 can handle for typekey, func in ATTR_TYPE_MAP.items(): if issubclass(type(value), typekey): return func(value) return value
def _format_metadata_attribute(value)
Format a value for writing to HDF5 as a `h5py.Dataset` attribute
8.77141
7.493371
1.170556
for attr in ('unit',) + array._metadata_slots: # format attribute try: value = _format_metadata_attribute( getattr(array, '_%s' % attr, None)) except IgnoredAttribute: continue # store attribute try: dataset.attrs[attr...
def write_array_metadata(dataset, array)
Write metadata for ``array`` into the `h5py.Dataset`
4.695037
4.528395
1.0368
if path is None: path = array.name if path is None: raise ValueError("Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type(array).__name__) # create dataset dset = io...
def write_hdf5_array(array, h5g, path=None, attrs=None, append=False, overwrite=False, compression='gzip', **kwargs)
Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the grou...
3.872767
4.369248
0.886369
attrs = {} # loop through named axes for i, axis in zip(range(series.ndim), ('x', 'y')): # find property names unit = '{}unit'.format(axis) origin = '{}0'.format(axis) delta = 'd{}'.format(axis) # store attributes aunit = getattr(series, unit) at...
def format_index_array_attrs(series)
Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5.
4.269448
4.565547
0.935145
if attrs is None: attrs = format_index_array_attrs(series) return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs)
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs)
Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords.
4.071559
3.77552
1.07841
def from_hdf5(*args, **kwargs): kwargs.setdefault('array_type', array_type) return read_hdf5_array(*args, **kwargs) io_registry.register_reader(format, array_type, from_hdf5) if issubclass(array_type, Series): io_registry.register_writer(format, array_type, write_hdf5_...
def register_hdf5_array_io(array_type, format='hdf5', identify=True)
Registry read() and write() methods for the HDF5 format
2.312174
2.426806
0.952764
if colors: return itertools.cycle(colors) try: return itertools.cycle(p["color"] for p in rcParams["axes.prop_cycle"]) except KeyError: # matplotlib < 1.5 return itertools.cycle(rcParams["axes.color_cycle"])
def color_cycle(colors=None)
An infinite iterator of the given (or default) colors
3.307276
3.315237
0.997599
verbose = kwargs.pop('verbose', False) # parse input as a list of files try: # try and map to a list of file-like objects files = file_list(source) except ValueError: # otherwise treat as single file files = [source] path = None # to pass to get_read_format() else: ...
def read_multi(flatten, cls, source, *args, **kwargs)
Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ----...
4.734307
4.864501
0.973236
# read from filename if isinstance(fobj, string_types): with open(fobj, 'r') as fobj2: return read_json_flag(fobj2) # read from open file txt = fobj.read() if isinstance(txt, bytes): txt = txt.decode('utf-8') data = json.loads(txt) # format flag name = ...
def read_json_flag(fobj)
Read a `DataQualityFlag` from a segments-web.ligo.org JSON file
5.170742
4.842279
1.067832
# write to filename if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_json_flag(flag, fobj2, **kwargs) # build json packet data = {} data['ifo'] = flag.ifo data['name'] = flag.tag data['version'] = flag.version data['active'] = flag....
def write_json_flag(flag, fobj, **kwargs)
Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for ...
4.000914
4.410175
0.907201
segmentlist = type(segmentlist)(segmentlist).coalesce() idx = column.argsort() contains = numpy.zeros(column.shape[0], dtype=bool) j = 0 try: segstart, segend = segmentlist[j] except IndexError: # no segments, return all False return contains i = 0 while i < contain...
def in_segmentlist(column, segmentlist)
Return the index of values lying inside the given segmentlist A `~gwpy.segments.Segment` represents a semi-open interval, so for any segment `[a, b)`, a value `x` is 'in' the segment if a <= x < b
4.096488
4.294812
0.953822
if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except: ...
def linkcode_resolve(domain, info)
Determine the URL corresponding to Python object This code is stolen with thanks from the scipy team.
2.143233
2.078006
1.03139
from ..frequencyseries import FrequencySeries if nfft is None: nfft = self.size dft = npfft.rfft(self.value, n=nfft) / nfft dft[1:] *= 2.0 new = FrequencySeries(dft, epoch=self.epoch, unit=self.unit, name=self.name, channel=self....
def fft(self, nfft=None)
Compute the one-dimensional discrete Fourier transform of this `TimeSeries`. Parameters ---------- nfft : `int`, optional length of the desired Fourier transform, input will be cropped or padded to match the desired length. If nfft is not given, the l...
3.426427
3.341255
1.025491
from gwpy.spectrogram import Spectrogram # format lengths if fftlength is None: fftlength = self.duration if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) no...
def average_fft(self, fftlength=None, overlap=0, window=None)
Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Paramete...
3.472322
3.531277
0.983305
# get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.psd(self, method_func, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
def psd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs)
Calculate the PSD `FrequencySeries` for this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between ...
5.320234
7.745251
0.686903
return self.psd(method=method, fftlength=fftlength, overlap=overlap, window=window, **kwargs) ** (1/2.)
def asd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs)
Calculate the ASD `FrequencySeries` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between F...
3.88923
5.445631
0.714193
return spectral.psd( (self, other), spectral.csd, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
def csd(self, other, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the CSD `FrequencySeries` for two `TimeSeries` Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full durati...
4.266375
7.109475
0.600097
# get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.average_spectrogram( self, method_func, stride, fftlength=fftlength, overlap=overlap, window=window, ...
def spectrogram(self, stride, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, nproc=1, **kwargs)
Calculate the average power spectrogram of this `TimeSeries` using the specified average spectrum method. Each time-bin of the output `Spectrogram` is calculated by taking a chunk of the `TimeSeries` in the segment `[t - overlap/2., t + stride + overlap/2.)` and calculating the ...
5.591817
6.966973
0.802618
# set kwargs for periodogram() kwargs.setdefault('fs', self.sample_rate.to('Hz').value) # run return spectral.spectrogram(self, signal.periodogram, fftlength=fftlength, overlap=overlap, window=window, **kwar...
def spectrogram2(self, fftlength, overlap=None, window='hann', **kwargs)
Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap ...
5.276002
5.891247
0.895566
from ..spectrogram import Spectrogram try: from scipy.signal import spectrogram except ImportError: raise ImportError("Must have scipy>=0.16 to utilize " "this method.") # format lengths if isinstance(fftlength, unit...
def fftgram(self, fftlength, overlap=None, window='hann', **kwargs)
Calculate the Fourier-gram of this `TimeSeries`. At every ``stride``, a single, complex FFT is calculated. Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, ...
3.805285
4.083977
0.93176
specgram = self.spectrogram(stride, fftlength=fftlength, overlap=overlap, method=method, window=window, nproc=nproc) ** (1/2.) if filter: specgram = specgram.filter(*filter) return specgram.variance(bins...
def spectral_variance(self, stride, fftlength=None, overlap=None, method=DEFAULT_FFT_METHOD, window='hann', nproc=1, filter=None, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False)
Calculate the `SpectralVariance` of this `TimeSeries`. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT method : `str`, optional FFT-averaging ...
2.257056
3.09524
0.729202
return spectral.psd( self, spectral.rayleigh, fftlength=fftlength, overlap=overlap, )
def rayleigh_spectrum(self, fftlength=None, overlap=None)
Calculate the Rayleigh `FrequencySeries` for this `TimeSeries`. The Rayleigh statistic is calculated as the ratio of the standard deviation and the mean of a number of periodograms. Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults ...
5.305702
9.572266
0.554279
specgram = spectral.average_spectrogram( self, spectral.rayleigh, stride, fftlength=fftlength, overlap=overlap, nproc=nproc, **kwargs ) specgram.override_unit('') return specgram
def rayleigh_spectrogram(self, stride, fftlength=None, overlap=0, nproc=1, **kwargs)
Calculate the Rayleigh statistic spectrogram of this `TimeSeries` Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional ...
4.461141
6.643525
0.671502
return spectral.average_spectrogram( (self, other), spectral.csd, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc, **kwargs )
def csd_spectrogram(self, other, stride, fftlength=None, overlap=0, window='hann', nproc=1, **kwargs)
Calculate the cross spectral density spectrogram of this `TimeSeries` with 'other'. Parameters ---------- other : `~gwpy.timeseries.TimeSeries` second time-series for cross spectral density calculation stride : `float` number of seconds in single PSD ...
3.155043
4.817698
0.654886
# design filter filt = filter_design.highpass(frequency, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filt...
def highpass(self, frequency, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs)
Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). ...
3.629555
5.488215
0.661336
# design filter filt = filter_design.bandpass(flow, fhigh, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=fi...
def bandpass(self, flow, fhigh, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs)
Filter this `TimeSeries` with a band-pass filter. Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band gpass : `float` the maximum loss in the passband (dB). gs...
3.458731
4.963628
0.696815
if n is None and ftype == 'iir': n = 8 elif n is None: n = 60 if isinstance(rate, units.Quantity): rate = rate.value factor = (self.sample_rate.value / rate) # NOTE: use math.isclose when python >= 3.5 if numpy.isclose(factor,...
def resample(self, rate, window='hamming', ftype='fir', n=None)
Resample this Series to a new rate Parameters ---------- rate : `float` rate to which to resample this `Series` window : `str`, `numpy.ndarray`, optional window function to apply to signal in the Fourier domain, see :func:`scipy.signal.get_window` fo...
4.353776
4.450081
0.978359
return self.filter(zeros, poles, gain, analog=analog, **kwargs)
def zpk(self, zeros, poles, gain, analog=True, **kwargs)
Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter an...
3.352439
11.510907
0.29124
from matplotlib import mlab from ..frequencyseries import FrequencySeries # check sampling rates if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'): sampling = min(self.sample_rate.value, other.sample_rate.value) # resample higher rate serie...
def coherence(self, other, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the frequency-coherence between this `TimeSeries` and another. Parameters ---------- other : `TimeSeries` `TimeSeries` signal to calculate coherence with fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT ...
2.897224
2.764402
1.048047
# shifting self backwards is the same as forwards dt = abs(dt) # crop inputs self_ = self.crop(self.span[0], self.span[1] - dt) other = self.crop(self.span[0] + dt, self.span[1]) return self_.coherence(other, fftlength=fftlength, ov...
def auto_coherence(self, dt, fftlength=None, overlap=None, window='hann', **kwargs)
Calculate the frequency-coherence between this `TimeSeries` and a time-shifted copy of itself. The standard :meth:`TimeSeries.coherence` is calculated between the input `TimeSeries` and a :meth:`cropped <TimeSeries.crop>` copy of itself. Since the cropped version will be shorter, the ...
4.461641
5.145082
0.867166
from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1)
Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlengt...
3.040777
3.79353
0.801569
stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through TimeSeries, recording RMS data = numpy.zeros(nsteps) for step in range(nsteps): # find step TimeSeries idx = int(stridesamp * step) ...
def rms(self, stride=1)
Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value wit...
4.647561
4.613097
1.007471
stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through the TimeSeries and mix with a local oscillator, # taking the average over each stride out = type(self)(numpy.zeros(nsteps, dtype=complex)) out.__array_finaliz...
def demodulate(self, f, stride=1, exp=False, deg=True)
Compute the average magnitude and phase of this `TimeSeries` once per stride at a given frequency. Parameters ---------- f : `float` frequency (Hz) at which to demodulate the signal stride : `float`, optional stride (seconds) between calculations, defaul...
4.73429
4.574961
1.034826
# check window properties if side not in ('left', 'right', 'leftright'): raise ValueError("side must be one of 'left', 'right', " "or 'leftright'") out = self.copy() # identify the second stationary point away from each boundary, ...
def taper(self, side='leftright')
Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy o...
3.831065
3.244447
1.180807
# compute the ASD fftlength = fftlength if fftlength else _fft_length_default(self.dt) if asd is None: asd = self.asd(fftlength, overlap=overlap, method=method, window=window, **kwargs) asd = asd.interpolate(1./self.duration.decompose().val...
def whiten(self, fftlength=None, overlap=0, method=DEFAULT_FFT_METHOD, window='hanning', detrend='constant', asd=None, fduration=2, highpass=None, **kwargs)
Whiten this `TimeSeries` using inverse spectrum truncation Parameters ---------- fftlength : `float`, optional FFT integration length (in seconds) for ASD estimation, default: choose based on sample rate overlap : `float`, optional number of seconds ...
5.261531
4.265552
1.233494
try: from scipy.signal import find_peaks except ImportError as exc: exc.args = ("Must have scipy>=1.1.0 to utilize this method.",) raise # Find points to gate based on a threshold data = self.whiten(**whiten_kwargs) if whiten else self ...
def gate(self, tzero=1.0, tpad=0.5, whiten=True, threshold=50., cluster_window=0.5, **whiten_kwargs)
Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the tim...
4.274475
4.359374
0.980525
pad = int(numpy.ceil(fir.size/2)) nfft = min(8*fir.size, self.size) # condition the input data in_ = self.copy() window = signal.get_window(window, fir.size) in_.value[:pad] *= window[:pad] in_.value[-pad:] *= window[-pad:] # if FFT length is long...
def convolve(self, fir, window='hanning')
Convolve this `TimeSeries` with an FIR filter using the overlap-save method Parameters ---------- fir : `numpy.ndarray` the time domain filter to convolve with window : `str`, optional window function to apply to boundaries, default: ``'hanning'`` ...
2.931277
2.803582
1.045547
self.is_compatible(mfilter) # condition data if whiten is True: fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None: overlap = re...
def correlate(self, mfilter, window='hanning', detrend='linear', whiten=False, wduration=2, highpass=None, **asd_kw)
Cross-correlate this `TimeSeries` with another signal Parameters ---------- mfilter : `TimeSeries` the time domain signal to correlate with window : `str`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` ...
4.29108
3.835301
1.118838
data = signal.detrend(self.value, type=detrend).view(type(self)) data.__metadata_finalize__(self) data._unit = self.unit return data
def detrend(self, detrend='constant')
Remove the trend from this `TimeSeries` This method just wraps :func:`scipy.signal.detrend` to return an object of the same type as the input. Parameters ---------- detrend : `str`, optional the type of detrending. Returns ------- detrended ...
7.325825
13.733705
0.533419
zpk = filter_design.notch(frequency, self.sample_rate.value, type=type, **kwargs) return self.filter(*zpk, filtfilt=filtfilt)
def notch(self, frequency, type='iir', filtfilt=True, **kwargs)
Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported *...
4.593302
9.281566
0.494884
qscan, _ = qtransform.q_scan(self, mismatch=mismatch, qrange=qrange, frange=frange, **kwargs) qgram = qscan.table(snrthresh=snrthresh) return qgram
def q_gram(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, mismatch=qtransform.DEFAULT_MISMATCH, snrthresh=5.5, **kwargs)
Scan a `TimeSeries` using the multi-Q transform and return an `EventTable` of the most significant tiles Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` ran...
4.054126
4.442862
0.912503
# delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): ...
def _update_index(self, axis, key, value)
Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0)...
3.515366
3.885486
0.904743
axis = key[0] origin = "{}0".format(axis) delta = "d{}".format(axis) if index is None: return delattr(self, key) if not isinstance(index, Index): try: unit = index.unit except AttributeError: unit = geta...
def _set_index(self, key, index)
Set a new index array for this series
3.375111
3.350677
1.007292
try: return self._x0 except AttributeError: self._x0 = Quantity(0, self.xunit) return self._x0
def x0(self)
X-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar
3.766933
3.359772
1.121187
try: return self._dx except AttributeError: try: self._xindex except AttributeError: self._dx = Quantity(1, self.xunit) else: if not self.xindex.regular: raise AttributeError("Thi...
def dx(self)
X-axis sample separation :type: `~astropy.units.Quantity` scalar
4.014585
3.729476
1.076447
try: return self._xindex except AttributeError: self._xindex = Index.define(self.x0, self.dx, self.shape[0]) return self._xindex
def xindex(self)
Positions of the data on the x-axis :type: `~astropy.units.Quantity` array
4.169078
4.798732
0.868787
try: return self._dx.unit except AttributeError: try: return self._x0.unit except AttributeError: return self._default_xunit
def xunit(self)
Unit of x-axis index :type: `~astropy.units.Unit`
5.320044
4.801516
1.107992
from ..plot import Plot from ..plot.text import default_unit_label # correct for log scales and zeros if kwargs.get('xscale') == 'log' and self.x0.value == 0: kwargs.setdefault('xlim', (self.dx.value, self.xspan[1])) # make plot plot = Plot(self, me...
def plot(self, method='plot', **kwargs)
Plot the data for this series Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the fig...
6.377798
6.972721
0.914679
kwargs.setdefault('linestyle', kwargs.pop('where', 'steps-post')) data = self.append(self.value[-1:], inplace=False) return data.plot(**kwargs)
def step(self, **kwargs)
Create a step plot of this series
10.864863
8.668324
1.253398
self.x0 = self.x0 + Quantity(delta, self.xunit)
def shift(self, delta)
Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards ...
9.859048
10.354621
0.95214
x = Quantity(x, self.xindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as e: e.args = ("Value %r not found in array index" % x,) raise return self[idx]
def value_at(self, x)
Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `...
4.452055
4.744074
0.938446
out = super(Array, self).diff(n=n, axis=axis) try: out.x0 = self.x0 + self.dx * n except AttributeError: # irregular xindex out.x0 = self.xindex[n] return out
def diff(self, n=1, axis=-1)
Calculate the n-th order discrete difference along given axis. The first order difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher order differences are calculated by using `diff` recursively. Parameters ---------- n : int, optional ...
4.747847
7.633544
0.621971
self.is_compatible(other) if isinstance(other, type(self)): if abs(float(self.xspan[1] - other.xspan[0])) < tol: return 1 elif abs(float(other.xspan[1] - self.xspan[0])) < tol: return -1 return 0 elif type(other) in [li...
def is_contiguous(self, other, tol=1/2.**18)
Check whether other is contiguous with self. Parameters ---------- other : `Series`, `numpy.ndarray` another series of the same type to test for contiguity tol : `float`, optional the numerical tolerance of the test Returns ------- 1 ...
2.663657
3.120688
0.853548
if isinstance(other, type(self)): # check step size, if possible try: if not self.dx == other.dx: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, ...
def is_compatible(self, other)
Check whether this series and other have compatible metadata This method tests that the `sample size <Series.dx>`, and the `~Series.unit` match.
3.325328
3.052759
1.089286
out = other.append(self, inplace=False, gap=gap, pad=pad, resize=resize) if inplace: self.resize(out.shape, refcheck=False) self[:] = out[:] self.x0 = out.x0.copy() del out return self return out
def prepend(self, other, inplace=True, pad=None, gap=None, resize=True)
Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and retu...
4.027052
5.091743
0.790899
return self.append(other, inplace=inplace, resize=False)
def update(self, other, inplace=True)
Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`.
9.144273
6.329958
1.444602
x0, x1 = self.xspan xtype = type(x0) if isinstance(start, Quantity): start = start.to(self.xunit).value if isinstance(end, Quantity): end = end.to(self.xunit).value # pin early starts to time-series start if start == x0: start...
def crop(self, start=None, end=None, copy=False)
Crop this series to the given x-axis extent. Parameters ---------- start : `float`, optional lower limit of x-axis to crop to, defaults to current `~Series.x0` end : `float`, optional upper limit of x-axis to crop to, defaults to current series end ...
2.621276
2.653542
0.98784
# format arguments kwargs.setdefault('mode', 'constant') if isinstance(pad_width, int): pad_width = (pad_width,) # form pad and view to this type new = numpy.pad(self, pad_width, **kwargs).view(type(self)) # numpy.pad has stripped all metadata, so cop...
def pad(self, pad_width, **kwargs)
Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs ...
7.385843
8.818339
0.837555
# check Series compatibility self.is_compatible(other) if (self.xunit == second) and (other.xspan[0] < self.xspan[0]): other = other.crop(start=self.xspan[0]) if (self.xunit == second) and (other.xspan[1] > self.xspan[1]): other = other.crop(end=self.xspa...
def inject(self, other)
Add two compatible `Series` along their shared x-axis values. Parameters ---------- other : `Series` a `Series` whose xindex intersects with `self.xindex` Returns ------- out : `Series` the sum of `self` and `other` along their shared x-axis valu...
4.636046
3.372339
1.374727
if urlparse(url).netloc.startswith('geosegdb.'): # only DB2 server return cls.query_segdb return cls.query_dqsegdb
def _select_query_method(cls, url)
Select the correct query method based on the URL Works for `DataQualityFlag` and `DataQualityDict`
27.597717
28.351383
0.973417
# user passed SegmentList if len(args) == 1 and isinstance(args[0], SegmentList): return args[0] # otherwise unpack two arguments as a segment if len(args) == 1: args = args[0] # if not two arguments, panic try: start, end = args except ValueError as exc: ...
def _parse_query_segments(args, func)
Parse *args for query_dqsegdb() or query_segdb() Returns a SegmentList in all cases
5.494829
5.232364
1.050162
query_ = _select_query_method( cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER)) return query_(flag, *args, **kwargs)
def query(cls, flag, *args, **kwargs)
Query for segments of a given flag This method intelligently selects the `~DataQualityFlag.query_segdb` or the `~DataQualityFlag.query_dqsegdb` methods based on the ``url`` kwarg given. Parameters ---------- flag : `str` The name of the flag for which to que...
10.703208
5.982172
1.789184
warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse arguments qsegs = _parse_query_segments(args, cls.query_segdb) # process query try: flags = DataQualityDict.query_segdb([...
def query_segdb(cls, flag, *args, **kwargs)
Query the initial LIGO segment database for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` def...
3.493557
3.422475
1.020769