repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
gwpy/gwpy
gwpy/io/ligolw.py
build_content_handler
def build_content_handler(parent, filter_func): """Build a `~xml.sax.handler.ContentHandler` with a given filter """ from ligo.lw.lsctables import use_in class _ContentHandler(parent): # pylint: disable=too-few-public-methods def __init__(self, document): super(_ContentHandler, self).__init__(document, filter_func) return use_in(_ContentHandler)
python
def build_content_handler(parent, filter_func): """Build a `~xml.sax.handler.ContentHandler` with a given filter """ from ligo.lw.lsctables import use_in class _ContentHandler(parent): # pylint: disable=too-few-public-methods def __init__(self, document): super(_ContentHandler, self).__init__(document, filter_func) return use_in(_ContentHandler)
[ "def", "build_content_handler", "(", "parent", ",", "filter_func", ")", ":", "from", "ligo", ".", "lw", ".", "lsctables", "import", "use_in", "class", "_ContentHandler", "(", "parent", ")", ":", "# pylint: disable=too-few-public-methods", "def", "__init__", "(", "...
Build a `~xml.sax.handler.ContentHandler` with a given filter
[ "Build", "a", "~xml", ".", "sax", ".", "handler", ".", "ContentHandler", "with", "a", "given", "filter" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L194-L204
train
211,500
gwpy/gwpy
gwpy/io/ligolw.py
read_ligolw
def read_ligolw(source, contenthandler=LIGOLWContentHandler, **kwargs): """Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file` the open file or file path to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading files, default: `False` Returns ------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document object as parsed from the file(s) """ from ligo.lw.ligolw import Document from ligo.lw import types from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_url, ligolw_add) # mock ToPyType to link to numpy dtypes topytype = types.ToPyType.copy() for key in types.ToPyType: if key in types.ToNumPyType: types.ToPyType[key] = numpy.dtype(types.ToNumPyType[key]).type contenthandler = use_in(contenthandler) # read one or more files into a single Document source = file_list(source) try: if len(source) == 1: return load_url( source[0], contenthandler=contenthandler, **kwargs ) return ligolw_add.ligolw_add( Document(), source, contenthandler=contenthandler, **kwargs ) except LigolwElementError as exc: # failed to read with ligo.lw, # try again with glue.ligolw (ilwdchar_compat) if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return read_ligolw( source, contenthandler=contenthandler, ilwdchar_compat=True, **kwargs ) except Exception: # if fails for any reason, use original error pass raise finally: # replace ToPyType types.ToPyType = topytype
python
def read_ligolw(source, contenthandler=LIGOLWContentHandler, **kwargs): """Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file` the open file or file path to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading files, default: `False` Returns ------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document object as parsed from the file(s) """ from ligo.lw.ligolw import Document from ligo.lw import types from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_url, ligolw_add) # mock ToPyType to link to numpy dtypes topytype = types.ToPyType.copy() for key in types.ToPyType: if key in types.ToNumPyType: types.ToPyType[key] = numpy.dtype(types.ToNumPyType[key]).type contenthandler = use_in(contenthandler) # read one or more files into a single Document source = file_list(source) try: if len(source) == 1: return load_url( source[0], contenthandler=contenthandler, **kwargs ) return ligolw_add.ligolw_add( Document(), source, contenthandler=contenthandler, **kwargs ) except LigolwElementError as exc: # failed to read with ligo.lw, # try again with glue.ligolw (ilwdchar_compat) if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return read_ligolw( source, contenthandler=contenthandler, ilwdchar_compat=True, **kwargs ) except Exception: # if fails for any reason, use original error pass raise finally: # replace ToPyType types.ToPyType = topytype
[ "def", "read_ligolw", "(", "source", ",", "contenthandler", "=", "LIGOLWContentHandler", ",", "*", "*", "kwargs", ")", ":", "from", "ligo", ".", "lw", ".", "ligolw", "import", "Document", "from", "ligo", ".", "lw", "import", "types", "from", "ligo", ".", ...
Read one or more LIGO_LW format files Parameters ---------- source : `str`, `file` the open file or file path to read contenthandler : `~xml.sax.handler.ContentHandler`, optional content handler used to parse document verbose : `bool`, optional be verbose when reading files, default: `False` Returns ------- xmldoc : :class:`~ligo.lw.ligolw.Document` the document object as parsed from the file(s)
[ "Read", "one", "or", "more", "LIGO_LW", "format", "files" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L210-L272
train
211,501
gwpy/gwpy
gwpy/io/ligolw.py
with_read_ligolw
def with_read_ligolw(func=None, contenthandler=None): """Decorate a LIGO_LW-reading function to open a filepath if needed ``func`` should be written to presume a :class:`~ligo.lw.ligolw.Document` as the first positional argument """ def decorator(func_): # pylint: disable=missing-docstring @wraps(func_) def decorated_func(source, *args, **kwargs): # pylint: disable=missing-docstring from ligo.lw.ligolw import Document from glue.ligolw.ligolw import Document as GlueDocument if not isinstance(source, (Document, GlueDocument)): read_kw = { 'contenthandler': kwargs.pop('contenthandler', contenthandler), 'verbose': kwargs.pop('verbose', False), } return func_(read_ligolw(source, **read_kw), *args, **kwargs) return func_(source, *args, **kwargs) return decorated_func if func is not None: return decorator(func) return decorator
python
def with_read_ligolw(func=None, contenthandler=None): """Decorate a LIGO_LW-reading function to open a filepath if needed ``func`` should be written to presume a :class:`~ligo.lw.ligolw.Document` as the first positional argument """ def decorator(func_): # pylint: disable=missing-docstring @wraps(func_) def decorated_func(source, *args, **kwargs): # pylint: disable=missing-docstring from ligo.lw.ligolw import Document from glue.ligolw.ligolw import Document as GlueDocument if not isinstance(source, (Document, GlueDocument)): read_kw = { 'contenthandler': kwargs.pop('contenthandler', contenthandler), 'verbose': kwargs.pop('verbose', False), } return func_(read_ligolw(source, **read_kw), *args, **kwargs) return func_(source, *args, **kwargs) return decorated_func if func is not None: return decorator(func) return decorator
[ "def", "with_read_ligolw", "(", "func", "=", "None", ",", "contenthandler", "=", "None", ")", ":", "def", "decorator", "(", "func_", ")", ":", "# pylint: disable=missing-docstring", "@", "wraps", "(", "func_", ")", "def", "decorated_func", "(", "source", ",", ...
Decorate a LIGO_LW-reading function to open a filepath if needed ``func`` should be written to presume a :class:`~ligo.lw.ligolw.Document` as the first positional argument
[ "Decorate", "a", "LIGO_LW", "-", "reading", "function", "to", "open", "a", "filepath", "if", "needed" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L275-L301
train
211,502
gwpy/gwpy
gwpy/io/ligolw.py
open_xmldoc
def open_xmldoc(fobj, **kwargs): """Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~ligo.lw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document` """ from ligo.lw.ligolw import (Document, LIGOLWContentHandler) from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_filename, load_fileobj) use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler)) try: # try and load existing file if isinstance(fobj, string_types): return load_filename(fobj, **kwargs) if isinstance(fobj, FILE_LIKE): return load_fileobj(fobj, **kwargs)[0] except (OSError, IOError): # or just create a new Document return Document() except LigolwElementError as exc: if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return open_xmldoc(fobj, ilwdchar_compat=True, **kwargs) except Exception: # for any reason, raise original pass raise
python
def open_xmldoc(fobj, **kwargs): """Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~ligo.lw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document` """ from ligo.lw.ligolw import (Document, LIGOLWContentHandler) from ligo.lw.lsctables import use_in from ligo.lw.utils import (load_filename, load_fileobj) use_in(kwargs.setdefault('contenthandler', LIGOLWContentHandler)) try: # try and load existing file if isinstance(fobj, string_types): return load_filename(fobj, **kwargs) if isinstance(fobj, FILE_LIKE): return load_fileobj(fobj, **kwargs)[0] except (OSError, IOError): # or just create a new Document return Document() except LigolwElementError as exc: if LIGO_LW_COMPAT_ERROR.search(str(exc)): try: return open_xmldoc(fobj, ilwdchar_compat=True, **kwargs) except Exception: # for any reason, raise original pass raise
[ "def", "open_xmldoc", "(", "fobj", ",", "*", "*", "kwargs", ")", ":", "from", "ligo", ".", "lw", ".", "ligolw", "import", "(", "Document", ",", "LIGOLWContentHandler", ")", "from", "ligo", ".", "lw", ".", "lsctables", "import", "use_in", "from", "ligo", ...
Try and open an existing LIGO_LW-format file, or create a new Document Parameters ---------- fobj : `str`, `file` file path or open file object to read **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate Returns -------- xmldoc : :class:`~ligo.lw.ligolw.Document` either the `Document` as parsed from an existing file, or a new, empty `Document`
[ "Try", "and", "open", "an", "existing", "LIGO_LW", "-", "format", "file", "or", "create", "a", "new", "Document" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L392-L430
train
211,503
gwpy/gwpy
gwpy/io/ligolw.py
write_tables
def write_tables(target, tables, append=False, overwrite=False, **kwargs): """Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate """ from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
python
def write_tables(target, tables, append=False, overwrite=False, **kwargs): """Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate """ from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler) from ligo.lw import utils as ligolw_utils # allow writing directly to XML if isinstance(target, (Document, LIGO_LW)): xmldoc = target # open existing document, if possible elif append: xmldoc = open_xmldoc( target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # fail on existing document and not overwriting elif (not overwrite and isinstance(target, string_types) and os.path.isfile(target)): raise IOError("File exists: {}".format(target)) else: # or create a new document xmldoc = Document() # convert table to format write_tables_to_document(xmldoc, tables, overwrite=overwrite) # write file if isinstance(target, string_types): kwargs.setdefault('gz', target.endswith('.gz')) ligolw_utils.write_filename(xmldoc, target, **kwargs) elif isinstance(target, FILE_LIKE): kwargs.setdefault('gz', target.name.endswith('.gz')) ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
[ "def", "write_tables", "(", "target", ",", "tables", ",", "append", "=", "False", ",", "overwrite", "=", "False", ",", "*", "*", "kwargs", ")", ":", "from", "ligo", ".", "lw", ".", "ligolw", "import", "(", "Document", ",", "LIGO_LW", ",", "LIGOLWConten...
Write an LIGO_LW table to file Parameters ---------- target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into tables : `list`, `tuple` of :class:`~ligo.lw.table.Table` the tables to write append : `bool`, optional, default: `False` if `True`, append to an existing file/table, otherwise `overwrite` overwrite : `bool`, optional, default: `False` if `True`, delete an existing instance of the table type, otherwise append new rows **kwargs other keyword arguments to pass to :func:`~ligo.lw.utils.load_filename`, or :func:`~ligo.lw.utils.load_fileobj` as appropriate
[ "Write", "an", "LIGO_LW", "table", "to", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L498-L548
train
211,504
gwpy/gwpy
gwpy/io/ligolw.py
to_table_type
def to_table_type(val, cls, colname): """Cast a value to the correct type for inclusion in a LIGO_LW table This method returns the input unmodified if a type mapping for ``colname`` isn't found. Parameters ---------- val : `object` The input object to convert, of any type cls : `type`, subclass of :class:`~ligo.lw.table.Table` the table class to map against colname : `str` The name of the mapping column Returns ------- obj : `object` The input ``val`` cast to the correct type Examples -------- >>> from gwpy.io.ligolw import to_table_type as to_ligolw_type >>> from ligo.lw.lsctables import SnglBurstTable >>> print(to_ligolw_type(1.0, SnglBurstTable, 'central_freq'))) 1.0 ID integers are converted to fancy ILWD objects >>> print(to_ligolw_type(1, SnglBurstTable, 'process_id'))) sngl_burst:process_id:1 Formatted fancy ILWD objects are left untouched: >>> from ligo.lw.ilwd import ilwdchar >>> pid = ilwdchar('process:process_id:0') >>> print(to_ligolw_type(pid, SnglBurstTable, 'process_id'))) process:process_id:1 """ from ligo.lw.types import ( ToNumPyType as numpytypes, ToPyType as pytypes, ) # if nothing to do... if val is None or colname not in cls.validcolumns: return val llwtype = cls.validcolumns[colname] # don't mess with formatted IlwdChar if llwtype == 'ilwd:char': return _to_ilwd(val, cls.tableName, colname, ilwdchar_compat=_is_glue_ligolw_object(cls)) # otherwise map to numpy or python types try: return numpy.typeDict[numpytypes[llwtype]](val) except KeyError: return pytypes[llwtype](val)
python
def to_table_type(val, cls, colname): """Cast a value to the correct type for inclusion in a LIGO_LW table This method returns the input unmodified if a type mapping for ``colname`` isn't found. Parameters ---------- val : `object` The input object to convert, of any type cls : `type`, subclass of :class:`~ligo.lw.table.Table` the table class to map against colname : `str` The name of the mapping column Returns ------- obj : `object` The input ``val`` cast to the correct type Examples -------- >>> from gwpy.io.ligolw import to_table_type as to_ligolw_type >>> from ligo.lw.lsctables import SnglBurstTable >>> print(to_ligolw_type(1.0, SnglBurstTable, 'central_freq'))) 1.0 ID integers are converted to fancy ILWD objects >>> print(to_ligolw_type(1, SnglBurstTable, 'process_id'))) sngl_burst:process_id:1 Formatted fancy ILWD objects are left untouched: >>> from ligo.lw.ilwd import ilwdchar >>> pid = ilwdchar('process:process_id:0') >>> print(to_ligolw_type(pid, SnglBurstTable, 'process_id'))) process:process_id:1 """ from ligo.lw.types import ( ToNumPyType as numpytypes, ToPyType as pytypes, ) # if nothing to do... if val is None or colname not in cls.validcolumns: return val llwtype = cls.validcolumns[colname] # don't mess with formatted IlwdChar if llwtype == 'ilwd:char': return _to_ilwd(val, cls.tableName, colname, ilwdchar_compat=_is_glue_ligolw_object(cls)) # otherwise map to numpy or python types try: return numpy.typeDict[numpytypes[llwtype]](val) except KeyError: return pytypes[llwtype](val)
[ "def", "to_table_type", "(", "val", ",", "cls", ",", "colname", ")", ":", "from", "ligo", ".", "lw", ".", "types", "import", "(", "ToNumPyType", "as", "numpytypes", ",", "ToPyType", "as", "pytypes", ",", ")", "# if nothing to do...", "if", "val", "is", "...
Cast a value to the correct type for inclusion in a LIGO_LW table This method returns the input unmodified if a type mapping for ``colname`` isn't found. Parameters ---------- val : `object` The input object to convert, of any type cls : `type`, subclass of :class:`~ligo.lw.table.Table` the table class to map against colname : `str` The name of the mapping column Returns ------- obj : `object` The input ``val`` cast to the correct type Examples -------- >>> from gwpy.io.ligolw import to_table_type as to_ligolw_type >>> from ligo.lw.lsctables import SnglBurstTable >>> print(to_ligolw_type(1.0, SnglBurstTable, 'central_freq'))) 1.0 ID integers are converted to fancy ILWD objects >>> print(to_ligolw_type(1, SnglBurstTable, 'process_id'))) sngl_burst:process_id:1 Formatted fancy ILWD objects are left untouched: >>> from ligo.lw.ilwd import ilwdchar >>> pid = ilwdchar('process:process_id:0') >>> print(to_ligolw_type(pid, SnglBurstTable, 'process_id'))) process:process_id:1
[ "Cast", "a", "value", "to", "the", "correct", "type", "for", "inclusion", "in", "a", "LIGO_LW", "table" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L592-L652
train
211,505
gwpy/gwpy
gwpy/io/ligolw.py
is_ligolw
def is_ligolw(origin, filepath, fileobj, *args, **kwargs): """Identify a file object as LIGO_LW-format XML """ # pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: line1 = fileobj.readline().lower() line2 = fileobj.readline().lower() try: return (line1.startswith(XML_SIGNATURE) and line2.startswith((LIGOLW_SIGNATURE, LIGOLW_ELEMENT))) except TypeError: # bytes vs str return (line1.startswith(XML_SIGNATURE.decode('utf-8')) and line2.startswith((LIGOLW_SIGNATURE.decode('utf-8'), LIGOLW_ELEMENT.decode('utf-8')))) finally: fileobj.seek(loc) try: from ligo.lw.ligolw import Element except ImportError: return False try: from glue.ligolw.ligolw import Element as GlueElement except ImportError: element_types = (Element,) else: element_types = (Element, GlueElement) return len(args) > 0 and isinstance(args[0], element_types)
python
def is_ligolw(origin, filepath, fileobj, *args, **kwargs): """Identify a file object as LIGO_LW-format XML """ # pylint: disable=unused-argument if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: line1 = fileobj.readline().lower() line2 = fileobj.readline().lower() try: return (line1.startswith(XML_SIGNATURE) and line2.startswith((LIGOLW_SIGNATURE, LIGOLW_ELEMENT))) except TypeError: # bytes vs str return (line1.startswith(XML_SIGNATURE.decode('utf-8')) and line2.startswith((LIGOLW_SIGNATURE.decode('utf-8'), LIGOLW_ELEMENT.decode('utf-8')))) finally: fileobj.seek(loc) try: from ligo.lw.ligolw import Element except ImportError: return False try: from glue.ligolw.ligolw import Element as GlueElement except ImportError: element_types = (Element,) else: element_types = (Element, GlueElement) return len(args) > 0 and isinstance(args[0], element_types)
[ "def", "is_ligolw", "(", "origin", ",", "filepath", ",", "fileobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "if", "fileobj", "is", "not", "None", ":", "loc", "=", "fileobj", ".", "tell", "(", ")", "fileob...
Identify a file object as LIGO_LW-format XML
[ "Identify", "a", "file", "object", "as", "LIGO_LW", "-", "format", "XML" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L672-L701
train
211,506
gwpy/gwpy
gwpy/timeseries/io/hdf5.py
read_hdf5_timeseries
def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs): """Read a `TimeSeries` from HDF5 """ # read data kwargs.setdefault('array_type', TimeSeries) series = read_hdf5_array(h5f, path=path, **kwargs) # crop if needed if start is not None or end is not None: return series.crop(start, end) return series
python
def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs): """Read a `TimeSeries` from HDF5 """ # read data kwargs.setdefault('array_type', TimeSeries) series = read_hdf5_array(h5f, path=path, **kwargs) # crop if needed if start is not None or end is not None: return series.crop(start, end) return series
[ "def", "read_hdf5_timeseries", "(", "h5f", ",", "path", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# read data", "kwargs", ".", "setdefault", "(", "'array_type'", ",", "TimeSeries", ")", "series",...
Read a `TimeSeries` from HDF5
[ "Read", "a", "TimeSeries", "from", "HDF5" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/hdf5.py#L37-L46
train
211,507
gwpy/gwpy
gwpy/timeseries/io/hdf5.py
read_hdf5_dict
def read_hdf5_dict(h5f, names=None, group=None, **kwargs): """Read a `TimeSeriesDict` from HDF5 """ # find group from which to read if group: h5g = h5f[group] else: h5g = h5f # find list of names to read if names is None: names = [key for key in h5g if _is_timeseries_dataset(h5g[key])] # read names out = kwargs.pop('dict_type', TimeSeriesDict)() kwargs.setdefault('array_type', out.EntryClass) for name in names: out[name] = read_hdf5_timeseries(h5g[name], **kwargs) return out
python
def read_hdf5_dict(h5f, names=None, group=None, **kwargs): """Read a `TimeSeriesDict` from HDF5 """ # find group from which to read if group: h5g = h5f[group] else: h5g = h5f # find list of names to read if names is None: names = [key for key in h5g if _is_timeseries_dataset(h5g[key])] # read names out = kwargs.pop('dict_type', TimeSeriesDict)() kwargs.setdefault('array_type', out.EntryClass) for name in names: out[name] = read_hdf5_timeseries(h5g[name], **kwargs) return out
[ "def", "read_hdf5_dict", "(", "h5f", ",", "names", "=", "None", ",", "group", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# find group from which to read", "if", "group", ":", "h5g", "=", "h5f", "[", "group", "]", "else", ":", "h5g", "=", "h5f", ...
Read a `TimeSeriesDict` from HDF5
[ "Read", "a", "TimeSeriesDict", "from", "HDF5" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/hdf5.py#L56-L75
train
211,508
gwpy/gwpy
gwpy/timeseries/io/hdf5.py
write_hdf5_dict
def write_hdf5_dict(tsdict, h5f, group=None, **kwargs): """Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group """ # create group if needed if group and group not in h5f: h5g = h5f.create_group(group) elif group: h5g = h5f[group] else: h5g = h5f # write each timeseries kwargs.setdefault('format', 'hdf5') for key, series in tsdict.items(): series.write(h5g, path=str(key), **kwargs)
python
def write_hdf5_dict(tsdict, h5f, group=None, **kwargs): """Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group """ # create group if needed if group and group not in h5f: h5g = h5f.create_group(group) elif group: h5g = h5f[group] else: h5g = h5f # write each timeseries kwargs.setdefault('format', 'hdf5') for key, series in tsdict.items(): series.write(h5g, path=str(key), **kwargs)
[ "def", "write_hdf5_dict", "(", "tsdict", ",", "h5f", ",", "group", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# create group if needed", "if", "group", "and", "group", "not", "in", "h5f", ":", "h5g", "=", "h5f", ".", "create_group", "(", "group", ...
Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group
[ "Write", "a", "TimeSeriesBaseDict", "to", "HDF5" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/hdf5.py#L94-L110
train
211,509
gwpy/gwpy
gwpy/signal/spectral/_pycbc.py
welch
def welch(timeseries, segmentlength, noverlap=None, scheme=None, **kwargs): """Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch """ from pycbc.psd import welch as pycbc_welch # default to 'standard' welch kwargs.setdefault('avg_method', 'mean') # get scheme if scheme is None: scheme = null_context() # generate pycbc FrequencySeries with scheme: pycbc_fseries = pycbc_welch(timeseries.to_pycbc(copy=False), seg_len=segmentlength, seg_stride=segmentlength-noverlap, **kwargs) # return GWpy FrequencySeries fseries = FrequencySeries.from_pycbc(pycbc_fseries, copy=False) fseries.name = timeseries.name fseries.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return fseries
python
def welch(timeseries, segmentlength, noverlap=None, scheme=None, **kwargs): """Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch """ from pycbc.psd import welch as pycbc_welch # default to 'standard' welch kwargs.setdefault('avg_method', 'mean') # get scheme if scheme is None: scheme = null_context() # generate pycbc FrequencySeries with scheme: pycbc_fseries = pycbc_welch(timeseries.to_pycbc(copy=False), seg_len=segmentlength, seg_stride=segmentlength-noverlap, **kwargs) # return GWpy FrequencySeries fseries = FrequencySeries.from_pycbc(pycbc_fseries, copy=False) fseries.name = timeseries.name fseries.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return fseries
[ "def", "welch", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "scheme", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "pycbc", ".", "psd", "import", "welch", "as", "pycbc_welch", "# default to 'standard' welch", "kwarg...
Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch
[ "Calculate", "a", "PSD", "using", "Welch", "s", "method", "with", "a", "mean", "average" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_pycbc.py#L34-L84
train
211,510
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
_ordinal
def _ordinal(n): """Returns the ordinal string for a given integer See https://stackoverflow.com/a/20007730/1307974 Parameters ---------- n : `int` the number to convert to ordinal Examples -------- >>> _ordinal(11) '11th' >>> _ordinal(102) '102nd' """ idx = int((n//10 % 10 != 1) * (n % 10 < 4) * n % 10) return '{}{}'.format(n, "tsnrhtdd"[idx::4])
python
def _ordinal(n): """Returns the ordinal string for a given integer See https://stackoverflow.com/a/20007730/1307974 Parameters ---------- n : `int` the number to convert to ordinal Examples -------- >>> _ordinal(11) '11th' >>> _ordinal(102) '102nd' """ idx = int((n//10 % 10 != 1) * (n % 10 < 4) * n % 10) return '{}{}'.format(n, "tsnrhtdd"[idx::4])
[ "def", "_ordinal", "(", "n", ")", ":", "idx", "=", "int", "(", "(", "n", "//", "10", "%", "10", "!=", "1", ")", "*", "(", "n", "%", "10", "<", "4", ")", "*", "n", "%", "10", ")", "return", "'{}{}'", ".", "format", "(", "n", ",", "\"tsnrht...
Returns the ordinal string for a given integer See https://stackoverflow.com/a/20007730/1307974 Parameters ---------- n : `int` the number to convert to ordinal Examples -------- >>> _ordinal(11) '11th' >>> _ordinal(102) '102nd'
[ "Returns", "the", "ordinal", "string", "for", "a", "given", "integer" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L44-L62
train
211,511
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.ratio
def ratio(self, operand): """Calculate the ratio of this `Spectrogram` against a reference Parameters ---------- operand : `str`, `FrequencySeries`, `Quantity` a `~gwpy.frequencyseries.FrequencySeries` or `~astropy.units.Quantity` to weight against, or one of - ``'mean'`` : weight against the mean of each spectrum in this Spectrogram - ``'median'`` : weight against the median of each spectrum in this Spectrogram Returns ------- spectrogram : `Spectrogram` a new `Spectrogram` Raises ------ ValueError if ``operand`` is given as a `str` that isn't supported """ if isinstance(operand, string_types): if operand == 'mean': operand = self.mean(axis=0) elif operand == 'median': operand = self.median(axis=0) else: raise ValueError("operand %r unrecognised, please give a " "Quantity or one of: 'mean', 'median'" % operand) out = self / operand return out
python
def ratio(self, operand): """Calculate the ratio of this `Spectrogram` against a reference Parameters ---------- operand : `str`, `FrequencySeries`, `Quantity` a `~gwpy.frequencyseries.FrequencySeries` or `~astropy.units.Quantity` to weight against, or one of - ``'mean'`` : weight against the mean of each spectrum in this Spectrogram - ``'median'`` : weight against the median of each spectrum in this Spectrogram Returns ------- spectrogram : `Spectrogram` a new `Spectrogram` Raises ------ ValueError if ``operand`` is given as a `str` that isn't supported """ if isinstance(operand, string_types): if operand == 'mean': operand = self.mean(axis=0) elif operand == 'median': operand = self.median(axis=0) else: raise ValueError("operand %r unrecognised, please give a " "Quantity or one of: 'mean', 'median'" % operand) out = self / operand return out
[ "def", "ratio", "(", "self", ",", "operand", ")", ":", "if", "isinstance", "(", "operand", ",", "string_types", ")", ":", "if", "operand", "==", "'mean'", ":", "operand", "=", "self", ".", "mean", "(", "axis", "=", "0", ")", "elif", "operand", "==", ...
Calculate the ratio of this `Spectrogram` against a reference Parameters ---------- operand : `str`, `FrequencySeries`, `Quantity` a `~gwpy.frequencyseries.FrequencySeries` or `~astropy.units.Quantity` to weight against, or one of - ``'mean'`` : weight against the mean of each spectrum in this Spectrogram - ``'median'`` : weight against the median of each spectrum in this Spectrogram Returns ------- spectrogram : `Spectrogram` a new `Spectrogram` Raises ------ ValueError if ``operand`` is given as a `str` that isn't supported
[ "Calculate", "the", "ratio", "of", "this", "Spectrogram", "against", "a", "reference" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L288-L322
train
211,512
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.plot
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs): """Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data """ if 'imshow' in kwargs: warnings.warn('the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead', DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else 'pcolormesh') kwargs.update(figsize=figsize, xscale=xscale) return super(Spectrogram, self).plot(**kwargs)
python
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs): """Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data """ if 'imshow' in kwargs: warnings.warn('the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead', DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else 'pcolormesh') kwargs.update(figsize=figsize, xscale=xscale) return super(Spectrogram, self).plot(**kwargs)
[ "def", "plot", "(", "self", ",", "figsize", "=", "(", "12", ",", "6", ")", ",", "xscale", "=", "'auto-gps'", ",", "*", "*", "kwargs", ")", ":", "if", "'imshow'", "in", "kwargs", ":", "warnings", ".", "warn", "(", "'the imshow keyword for Spectrogram.plot...
Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data
[ "Plot", "the", "data", "for", "this", "Spectrogram" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L324-L357
train
211,513
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.from_spectra
def from_spectra(cls, *spectra, **kwargs): """Build a new `Spectrogram` from a list of spectra. Parameters ---------- *spectra any number of `~gwpy.frequencyseries.FrequencySeries` series dt : `float`, `~astropy.units.Quantity`, optional stride between given spectra Returns ------- Spectrogram a new `Spectrogram` from a vertical stacking of the spectra The new object takes the metadata from the first given `~gwpy.frequencyseries.FrequencySeries` if not given explicitly Notes ----- Each `~gwpy.frequencyseries.FrequencySeries` passed to this constructor must be the same length. """ data = numpy.vstack([s.value for s in spectra]) spec1 = list(spectra)[0] if not all(s.f0 == spec1.f0 for s in spectra): raise ValueError("Cannot stack spectra with different f0") if not all(s.df == spec1.df for s in spectra): raise ValueError("Cannot stack spectra with different df") kwargs.setdefault('name', spec1.name) kwargs.setdefault('channel', spec1.channel) kwargs.setdefault('epoch', spec1.epoch) kwargs.setdefault('f0', spec1.f0) kwargs.setdefault('df', spec1.df) kwargs.setdefault('unit', spec1.unit) if not ('dt' in kwargs or 'times' in kwargs): try: kwargs.setdefault('dt', spectra[1].epoch.gps - spec1.epoch.gps) except (AttributeError, IndexError): raise ValueError("Cannot determine dt (time-spacing) for " "Spectrogram from inputs") return Spectrogram(data, **kwargs)
python
def from_spectra(cls, *spectra, **kwargs): """Build a new `Spectrogram` from a list of spectra. Parameters ---------- *spectra any number of `~gwpy.frequencyseries.FrequencySeries` series dt : `float`, `~astropy.units.Quantity`, optional stride between given spectra Returns ------- Spectrogram a new `Spectrogram` from a vertical stacking of the spectra The new object takes the metadata from the first given `~gwpy.frequencyseries.FrequencySeries` if not given explicitly Notes ----- Each `~gwpy.frequencyseries.FrequencySeries` passed to this constructor must be the same length. """ data = numpy.vstack([s.value for s in spectra]) spec1 = list(spectra)[0] if not all(s.f0 == spec1.f0 for s in spectra): raise ValueError("Cannot stack spectra with different f0") if not all(s.df == spec1.df for s in spectra): raise ValueError("Cannot stack spectra with different df") kwargs.setdefault('name', spec1.name) kwargs.setdefault('channel', spec1.channel) kwargs.setdefault('epoch', spec1.epoch) kwargs.setdefault('f0', spec1.f0) kwargs.setdefault('df', spec1.df) kwargs.setdefault('unit', spec1.unit) if not ('dt' in kwargs or 'times' in kwargs): try: kwargs.setdefault('dt', spectra[1].epoch.gps - spec1.epoch.gps) except (AttributeError, IndexError): raise ValueError("Cannot determine dt (time-spacing) for " "Spectrogram from inputs") return Spectrogram(data, **kwargs)
[ "def", "from_spectra", "(", "cls", ",", "*", "spectra", ",", "*", "*", "kwargs", ")", ":", "data", "=", "numpy", ".", "vstack", "(", "[", "s", ".", "value", "for", "s", "in", "spectra", "]", ")", "spec1", "=", "list", "(", "spectra", ")", "[", ...
Build a new `Spectrogram` from a list of spectra. Parameters ---------- *spectra any number of `~gwpy.frequencyseries.FrequencySeries` series dt : `float`, `~astropy.units.Quantity`, optional stride between given spectra Returns ------- Spectrogram a new `Spectrogram` from a vertical stacking of the spectra The new object takes the metadata from the first given `~gwpy.frequencyseries.FrequencySeries` if not given explicitly Notes ----- Each `~gwpy.frequencyseries.FrequencySeries` passed to this constructor must be the same length.
[ "Build", "a", "new", "Spectrogram", "from", "a", "list", "of", "spectra", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L360-L400
train
211,514
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.percentile
def percentile(self, percentile): """Calculate a given spectral percentile for this `Spectrogram`. Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence` """ out = scipy.percentile(self.value, percentile, axis=0) if self.name is not None: name = '{}: {} percentile'.format(self.name, _ordinal(percentile)) else: name = None return FrequencySeries(out, epoch=self.epoch, channel=self.channel, name=name, f0=self.f0, df=self.df, frequencies=(hasattr(self, '_frequencies') and self.frequencies or None))
python
def percentile(self, percentile): """Calculate a given spectral percentile for this `Spectrogram`. Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence` """ out = scipy.percentile(self.value, percentile, axis=0) if self.name is not None: name = '{}: {} percentile'.format(self.name, _ordinal(percentile)) else: name = None return FrequencySeries(out, epoch=self.epoch, channel=self.channel, name=name, f0=self.f0, df=self.df, frequencies=(hasattr(self, '_frequencies') and self.frequencies or None))
[ "def", "percentile", "(", "self", ",", "percentile", ")", ":", "out", "=", "scipy", ".", "percentile", "(", "self", ".", "value", ",", "percentile", ",", "axis", "=", "0", ")", "if", "self", ".", "name", "is", "not", "None", ":", "name", "=", "'{}:...
Calculate a given spectral percentile for this `Spectrogram`. Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence`
[ "Calculate", "a", "given", "spectral", "percentile", "for", "this", "Spectrogram", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L402-L424
train
211,515
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.variance
def variance(self, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False): """Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional, default: `None` right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional, default: `500` number of bins to generate, only read if ``bins`` is not given log : `bool`, optional, default: `False` calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional, default: `False` normalise bin counts to a unit sum density : `bool`, optional, default: `False` normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights """ from ..frequencyseries import SpectralVariance return SpectralVariance.from_spectrogram( self, bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
python
def variance(self, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False): """Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional, default: `None` right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional, default: `500` number of bins to generate, only read if ``bins`` is not given log : `bool`, optional, default: `False` calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional, default: `False` normalise bin counts to a unit sum density : `bool`, optional, default: `False` normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights """ from ..frequencyseries import SpectralVariance return SpectralVariance.from_spectrogram( self, bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
[ "def", "variance", "(", "self", ",", "bins", "=", "None", ",", "low", "=", "None", ",", "high", "=", "None", ",", "nbins", "=", "500", ",", "log", "=", "False", ",", "norm", "=", "False", ",", "density", "=", "False", ")", ":", "from", ".", "."...
Calculate the `SpectralVariance` of this `Spectrogram`. Parameters ---------- bins : `~numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional, default: `None` left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional, default: `None` right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional, default: `500` number of bins to generate, only read if ``bins`` is not given log : `bool`, optional, default: `False` calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional, default: `False` normalise bin counts to a unit sum density : `bool`, optional, default: `False` normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights
[ "Calculate", "the", "SpectralVariance", "of", "this", "Spectrogram", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L499-L537
train
211,516
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.crop_frequencies
def crop_frequencies(self, low=None, high=None, copy=False): """Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis """ if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
python
def crop_frequencies(self, low=None, high=None, copy=False): """Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis """ if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
[ "def", "crop_frequencies", "(", "self", ",", "low", "=", "None", ",", "high", "=", "None", ",", "copy", "=", "False", ")", ":", "if", "low", "is", "not", "None", ":", "low", "=", "units", ".", "Quantity", "(", "low", ",", "self", ".", "_default_yun...
Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis
[ "Crop", "this", "Spectrogram", "to", "the", "specified", "frequencies" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L541-L591
train
211,517
gwpy/gwpy
gwpy/plot/colorbar.py
find_mappable
def find_mappable(*axes): """Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable """ for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
python
def find_mappable(*axes): """Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable """ for ax in axes: for aset in ('collections', 'images'): try: return getattr(ax, aset)[-1] except (AttributeError, IndexError): continue raise ValueError("Cannot determine mappable layer on any axes " "for this colorbar")
[ "def", "find_mappable", "(", "*", "axes", ")", ":", "for", "ax", "in", "axes", ":", "for", "aset", "in", "(", "'collections'", ",", "'images'", ")", ":", "try", ":", "return", "getattr", "(", "ax", ",", "aset", ")", "[", "-", "1", "]", "except", ...
Find the most recently added mappable layer in the given axes Parameters ---------- *axes : `~matplotlib.axes.Axes` one or more axes to search for a mappable
[ "Find", "the", "most", "recently", "added", "mappable", "layer", "in", "the", "given", "axes" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/colorbar.py#L99-L114
train
211,518
gwpy/gwpy
gwpy/io/registry.py
get_read_format
def get_read_format(cls, source, args, kwargs): """Determine the read format for a given input source """ ctx = None if isinstance(source, FILE_LIKE): fileobj = source filepath = source.name if hasattr(source, 'name') else None else: filepath = source try: ctx = get_readable_fileobj(filepath, encoding='binary') fileobj = ctx.__enter__() # pylint: disable=no-member except IOError: raise except Exception: # pylint: disable=broad-except fileobj = None try: return get_format('read', cls, filepath, fileobj, args, kwargs) finally: if ctx is not None: ctx.__exit__(*sys.exc_info())
python
def get_read_format(cls, source, args, kwargs): """Determine the read format for a given input source """ ctx = None if isinstance(source, FILE_LIKE): fileobj = source filepath = source.name if hasattr(source, 'name') else None else: filepath = source try: ctx = get_readable_fileobj(filepath, encoding='binary') fileobj = ctx.__enter__() # pylint: disable=no-member except IOError: raise except Exception: # pylint: disable=broad-except fileobj = None try: return get_format('read', cls, filepath, fileobj, args, kwargs) finally: if ctx is not None: ctx.__exit__(*sys.exc_info())
[ "def", "get_read_format", "(", "cls", ",", "source", ",", "args", ",", "kwargs", ")", ":", "ctx", "=", "None", "if", "isinstance", "(", "source", ",", "FILE_LIKE", ")", ":", "fileobj", "=", "source", "filepath", "=", "source", ".", "name", "if", "hasat...
Determine the read format for a given input source
[ "Determine", "the", "read", "format", "for", "a", "given", "input", "source" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/registry.py#L78-L98
train
211,519
gwpy/gwpy
gwpy/timeseries/io/wav.py
read
def read(fobj, **kwargs): """Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the WAV file is actually read Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries.read('test.wav') """ fsamp, arr = wavfile.read(fobj, **kwargs) return TimeSeries(arr, sample_rate=fsamp)
python
def read(fobj, **kwargs): """Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the WAV file is actually read Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries.read('test.wav') """ fsamp, arr = wavfile.read(fobj, **kwargs) return TimeSeries(arr, sample_rate=fsamp)
[ "def", "read", "(", "fobj", ",", "*", "*", "kwargs", ")", ":", "fsamp", ",", "arr", "=", "wavfile", ".", "read", "(", "fobj", ",", "*", "*", "kwargs", ")", "return", "TimeSeries", "(", "arr", ",", "sample_rate", "=", "fsamp", ")" ]
Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the WAV file is actually read Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries.read('test.wav')
[ "Read", "a", "WAV", "file", "into", "a", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/wav.py#L35-L57
train
211,520
gwpy/gwpy
gwpy/timeseries/io/wav.py
write
def write(series, output, scale=None): """Write a `TimeSeries` to a WAV file Parameters ---------- series : `TimeSeries` the series to write output : `file`, `str` the file object or filename to write to scale : `float`, optional the factor to apply to scale the data to (-1.0, 1.0), pass `scale=1` to not apply any scale, otherwise the data will be auto-scaled See also -------- scipy.io.wavfile.write for details on how the WAV file is actually written Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries([1, 2, 3, 4, 5]) >>> t = TimeSeries.write('test.wav') """ fsamp = int(series.sample_rate.decompose().value) if scale is None: scale = 1 / numpy.abs(series.value).max() data = (series.value * scale).astype('float32') return wavfile.write(output, fsamp, data)
python
def write(series, output, scale=None): """Write a `TimeSeries` to a WAV file Parameters ---------- series : `TimeSeries` the series to write output : `file`, `str` the file object or filename to write to scale : `float`, optional the factor to apply to scale the data to (-1.0, 1.0), pass `scale=1` to not apply any scale, otherwise the data will be auto-scaled See also -------- scipy.io.wavfile.write for details on how the WAV file is actually written Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries([1, 2, 3, 4, 5]) >>> t = TimeSeries.write('test.wav') """ fsamp = int(series.sample_rate.decompose().value) if scale is None: scale = 1 / numpy.abs(series.value).max() data = (series.value * scale).astype('float32') return wavfile.write(output, fsamp, data)
[ "def", "write", "(", "series", ",", "output", ",", "scale", "=", "None", ")", ":", "fsamp", "=", "int", "(", "series", ".", "sample_rate", ".", "decompose", "(", ")", ".", "value", ")", "if", "scale", "is", "None", ":", "scale", "=", "1", "/", "n...
Write a `TimeSeries` to a WAV file Parameters ---------- series : `TimeSeries` the series to write output : `file`, `str` the file object or filename to write to scale : `float`, optional the factor to apply to scale the data to (-1.0, 1.0), pass `scale=1` to not apply any scale, otherwise the data will be auto-scaled See also -------- scipy.io.wavfile.write for details on how the WAV file is actually written Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries([1, 2, 3, 4, 5]) >>> t = TimeSeries.write('test.wav')
[ "Write", "a", "TimeSeries", "to", "a", "WAV", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/wav.py#L60-L91
train
211,521
gwpy/gwpy
gwpy/timeseries/io/wav.py
is_wav
def is_wav(origin, filepath, fileobj, *args, **kwargs): """Identify a file as WAV See `astropy.io.registry` for details on how this function is used. """ # pylint: disable=unused-argument if origin == 'read' and fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: riff, _, fmt = struct.unpack('<4sI4s', fileobj.read(12)) if isinstance(riff, bytes): riff = riff.decode('utf-8') fmt = fmt.decode('utf-8') return riff == WAV_SIGNATURE[0] and fmt == WAV_SIGNATURE[1] except (UnicodeDecodeError, struct.error): return False finally: fileobj.seek(loc) elif filepath is not None: return filepath.endswith(('.wav', '.wave')) else: try: wave.open(args[0]) except (wave.Error, AttributeError): return False else: return True
python
def is_wav(origin, filepath, fileobj, *args, **kwargs): """Identify a file as WAV See `astropy.io.registry` for details on how this function is used. """ # pylint: disable=unused-argument if origin == 'read' and fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: riff, _, fmt = struct.unpack('<4sI4s', fileobj.read(12)) if isinstance(riff, bytes): riff = riff.decode('utf-8') fmt = fmt.decode('utf-8') return riff == WAV_SIGNATURE[0] and fmt == WAV_SIGNATURE[1] except (UnicodeDecodeError, struct.error): return False finally: fileobj.seek(loc) elif filepath is not None: return filepath.endswith(('.wav', '.wave')) else: try: wave.open(args[0]) except (wave.Error, AttributeError): return False else: return True
[ "def", "is_wav", "(", "origin", ",", "filepath", ",", "fileobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "if", "origin", "==", "'read'", "and", "fileobj", "is", "not", "None", ":", "loc", "=", "fileobj", ...
Identify a file as WAV See `astropy.io.registry` for details on how this function is used.
[ "Identify", "a", "file", "as", "WAV" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/wav.py#L94-L121
train
211,522
gwpy/gwpy
gwpy/table/filter.py
parse_column_filter
def parse_column_filter(definition): """Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- filters : `list` of `tuple` a `list` of filter 3-`tuple`s, where each `tuple` contains the following elements: - ``column`` (`str`) - the name of the column on which to operate - ``operator`` (`callable`) - the operator to call when evaluating the filter - ``operand`` (`anything`) - the argument to the operator function Raises ------ ValueError if the filter definition cannot be parsed KeyError if any parsed operator string cannnot be mapped to a function from the `operator` module Notes ----- Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should be quoted inside the filter definition, to prevent such characters being interpreted as operators, e.g. ``channel = X1:TEST`` should always be passed as ``channel = "X1:TEST"``. Examples -------- >>> parse_column_filter("frequency>10") [('frequency', <function operator.gt>, 10.)] >>> parse_column_filter("50 < snr < 100") [('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)] >>> parse_column_filter("channel = "H1:TEST") [('channel', <function operator.eq>, 'H1:TEST')] """ # noqa # parse definition into parts (skipping null tokens) parts = list(generate_tokens(StringIO(definition.strip()).readline)) while parts[-1][0] in (token.ENDMARKER, token.NEWLINE): parts = parts[:-1] # parse simple definition: e.g: snr > 5 if len(parts) == 3: a, b, c = parts # pylint: disable=invalid-name if a[0] in [token.NAME, token.STRING]: # string comparison name = QUOTE_REGEX.sub('', a[1]) oprtr = OPERATORS[b[1]] value = _float_or_str(c[1]) return [(name, oprtr, value)] elif b[0] in [token.NAME, token.STRING]: name = QUOTE_REGEX.sub('', b[1]) oprtr = OPERATORS_INV[b[1]] value = _float_or_str(a[1]) return [(name, oprtr, value)] # parse between definition: e.g: 5 < snr < 10 elif len(parts) == 5: a, b, c, d, e = list(zip(*parts))[1] # pylint: disable=invalid-name name = QUOTE_REGEX.sub('', c) return [(name, OPERATORS_INV[b], _float_or_str(a)), (name, OPERATORS[d], _float_or_str(e))] raise ValueError("Cannot parse filter definition from %r" % definition)
python
def parse_column_filter(definition): """Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- filters : `list` of `tuple` a `list` of filter 3-`tuple`s, where each `tuple` contains the following elements: - ``column`` (`str`) - the name of the column on which to operate - ``operator`` (`callable`) - the operator to call when evaluating the filter - ``operand`` (`anything`) - the argument to the operator function Raises ------ ValueError if the filter definition cannot be parsed KeyError if any parsed operator string cannnot be mapped to a function from the `operator` module Notes ----- Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should be quoted inside the filter definition, to prevent such characters being interpreted as operators, e.g. ``channel = X1:TEST`` should always be passed as ``channel = "X1:TEST"``. Examples -------- >>> parse_column_filter("frequency>10") [('frequency', <function operator.gt>, 10.)] >>> parse_column_filter("50 < snr < 100") [('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)] >>> parse_column_filter("channel = "H1:TEST") [('channel', <function operator.eq>, 'H1:TEST')] """ # noqa # parse definition into parts (skipping null tokens) parts = list(generate_tokens(StringIO(definition.strip()).readline)) while parts[-1][0] in (token.ENDMARKER, token.NEWLINE): parts = parts[:-1] # parse simple definition: e.g: snr > 5 if len(parts) == 3: a, b, c = parts # pylint: disable=invalid-name if a[0] in [token.NAME, token.STRING]: # string comparison name = QUOTE_REGEX.sub('', a[1]) oprtr = OPERATORS[b[1]] value = _float_or_str(c[1]) return [(name, oprtr, value)] elif b[0] in [token.NAME, token.STRING]: name = QUOTE_REGEX.sub('', b[1]) oprtr = OPERATORS_INV[b[1]] value = _float_or_str(a[1]) return [(name, oprtr, value)] # parse between definition: e.g: 5 < snr < 10 elif len(parts) == 5: a, b, c, d, e = list(zip(*parts))[1] # pylint: disable=invalid-name name = QUOTE_REGEX.sub('', c) return [(name, OPERATORS_INV[b], _float_or_str(a)), (name, OPERATORS[d], _float_or_str(e))] raise ValueError("Cannot parse filter definition from %r" % definition)
[ "def", "parse_column_filter", "(", "definition", ")", ":", "# noqa", "# parse definition into parts (skipping null tokens)", "parts", "=", "list", "(", "generate_tokens", "(", "StringIO", "(", "definition", ".", "strip", "(", ")", ")", ".", "readline", ")", ")", "...
Parse a `str` of the form 'column>50' Parameters ---------- definition : `str` a column filter definition of the form ``<name><operator><threshold>`` or ``<threshold><operator><name><operator><threshold>``, e.g. ``frequency >= 10``, or ``50 < snr < 100`` Returns ------- filters : `list` of `tuple` a `list` of filter 3-`tuple`s, where each `tuple` contains the following elements: - ``column`` (`str`) - the name of the column on which to operate - ``operator`` (`callable`) - the operator to call when evaluating the filter - ``operand`` (`anything`) - the argument to the operator function Raises ------ ValueError if the filter definition cannot be parsed KeyError if any parsed operator string cannnot be mapped to a function from the `operator` module Notes ----- Strings that contain non-alphanumeric characters (e.g. hyphen `-`) should be quoted inside the filter definition, to prevent such characters being interpreted as operators, e.g. ``channel = X1:TEST`` should always be passed as ``channel = "X1:TEST"``. Examples -------- >>> parse_column_filter("frequency>10") [('frequency', <function operator.gt>, 10.)] >>> parse_column_filter("50 < snr < 100") [('snr', <function operator.gt>, 50.), ('snr', <function operator.lt>, 100.)] >>> parse_column_filter("channel = "H1:TEST") [('channel', <function operator.eq>, 'H1:TEST')]
[ "Parse", "a", "str", "of", "the", "form", "column", ">", "50" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L99-L171
train
211,523
gwpy/gwpy
gwpy/table/filter.py
parse_column_filters
def parse_column_filters(*definitions): """Parse multiple compound column filter definitions Examples -------- >>> parse_column_filters('snr > 10', 'frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] >>> parse_column_filters('snr > 10 && frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] """ # noqa: E501 fltrs = [] for def_ in _flatten(definitions): if is_filter_tuple(def_): fltrs.append(def_) else: for splitdef in DELIM_REGEX.split(def_)[::2]: fltrs.extend(parse_column_filter(splitdef)) return fltrs
python
def parse_column_filters(*definitions): """Parse multiple compound column filter definitions Examples -------- >>> parse_column_filters('snr > 10', 'frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] >>> parse_column_filters('snr > 10 && frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] """ # noqa: E501 fltrs = [] for def_ in _flatten(definitions): if is_filter_tuple(def_): fltrs.append(def_) else: for splitdef in DELIM_REGEX.split(def_)[::2]: fltrs.extend(parse_column_filter(splitdef)) return fltrs
[ "def", "parse_column_filters", "(", "*", "definitions", ")", ":", "# noqa: E501", "fltrs", "=", "[", "]", "for", "def_", "in", "_flatten", "(", "definitions", ")", ":", "if", "is_filter_tuple", "(", "def_", ")", ":", "fltrs", ".", "append", "(", "def_", ...
Parse multiple compound column filter definitions Examples -------- >>> parse_column_filters('snr > 10', 'frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)] >>> parse_column_filters('snr > 10 && frequency < 1000') [('snr', <function operator.gt>, 10.), ('frequency', <function operator.lt>, 1000.)]
[ "Parse", "multiple", "compound", "column", "filter", "definitions" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L174-L191
train
211,524
gwpy/gwpy
gwpy/table/filter.py
_flatten
def _flatten(container): """Flatten arbitrary nested list of filters into a 1-D list """ if isinstance(container, string_types): container = [container] for elem in container: if isinstance(elem, string_types) or is_filter_tuple(elem): yield elem else: for elem2 in _flatten(elem): yield elem2
python
def _flatten(container): """Flatten arbitrary nested list of filters into a 1-D list """ if isinstance(container, string_types): container = [container] for elem in container: if isinstance(elem, string_types) or is_filter_tuple(elem): yield elem else: for elem2 in _flatten(elem): yield elem2
[ "def", "_flatten", "(", "container", ")", ":", "if", "isinstance", "(", "container", ",", "string_types", ")", ":", "container", "=", "[", "container", "]", "for", "elem", "in", "container", ":", "if", "isinstance", "(", "elem", ",", "string_types", ")", ...
Flatten arbitrary nested list of filters into a 1-D list
[ "Flatten", "arbitrary", "nested", "list", "of", "filters", "into", "a", "1", "-", "D", "list" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L194-L204
train
211,525
gwpy/gwpy
gwpy/table/filter.py
is_filter_tuple
def is_filter_tuple(tup): """Return whether a `tuple` matches the format for a column filter """ return isinstance(tup, (tuple, list)) and ( len(tup) == 3 and isinstance(tup[0], string_types) and callable(tup[1]))
python
def is_filter_tuple(tup): """Return whether a `tuple` matches the format for a column filter """ return isinstance(tup, (tuple, list)) and ( len(tup) == 3 and isinstance(tup[0], string_types) and callable(tup[1]))
[ "def", "is_filter_tuple", "(", "tup", ")", ":", "return", "isinstance", "(", "tup", ",", "(", "tuple", ",", "list", ")", ")", "and", "(", "len", "(", "tup", ")", "==", "3", "and", "isinstance", "(", "tup", "[", "0", "]", ",", "string_types", ")", ...
Return whether a `tuple` matches the format for a column filter
[ "Return", "whether", "a", "tuple", "matches", "the", "format", "for", "a", "column", "filter" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L207-L213
train
211,526
gwpy/gwpy
gwpy/table/filter.py
filter_table
def filter_table(table, *column_filters): """Apply one or more column slice filters to a `Table` Multiple column filters can be given, and will be applied concurrently Parameters ---------- table : `~astropy.table.Table` the table to filter column_filter : `str`, `tuple` a column slice filter definition, in one of two formats: - `str` - e.g. ``'snr > 10`` - `tuple` - ``(<column>, <operator>, <operand>)``, e.g. ``('snr', operator.gt, 10)`` multiple filters can be given and will be applied in order Returns ------- table : `~astropy.table.Table` a view of the input table with only those rows matching the filters Examples -------- >>> filter(my_table, 'snr>10', 'frequency<1000') custom operations can be defined using filter tuple definitions: >>> from gwpy.table.filters import in_segmentlist >>> filter(my_table, ('time', in_segmentlist, segs)) """ keep = numpy.ones(len(table), dtype=bool) for name, op_func, operand in parse_column_filters(*column_filters): col = table[name].view(numpy.ndarray) keep &= op_func(col, operand) return table[keep]
python
def filter_table(table, *column_filters): """Apply one or more column slice filters to a `Table` Multiple column filters can be given, and will be applied concurrently Parameters ---------- table : `~astropy.table.Table` the table to filter column_filter : `str`, `tuple` a column slice filter definition, in one of two formats: - `str` - e.g. ``'snr > 10`` - `tuple` - ``(<column>, <operator>, <operand>)``, e.g. ``('snr', operator.gt, 10)`` multiple filters can be given and will be applied in order Returns ------- table : `~astropy.table.Table` a view of the input table with only those rows matching the filters Examples -------- >>> filter(my_table, 'snr>10', 'frequency<1000') custom operations can be defined using filter tuple definitions: >>> from gwpy.table.filters import in_segmentlist >>> filter(my_table, ('time', in_segmentlist, segs)) """ keep = numpy.ones(len(table), dtype=bool) for name, op_func, operand in parse_column_filters(*column_filters): col = table[name].view(numpy.ndarray) keep &= op_func(col, operand) return table[keep]
[ "def", "filter_table", "(", "table", ",", "*", "column_filters", ")", ":", "keep", "=", "numpy", ".", "ones", "(", "len", "(", "table", ")", ",", "dtype", "=", "bool", ")", "for", "name", ",", "op_func", ",", "operand", "in", "parse_column_filters", "(...
Apply one or more column slice filters to a `Table` Multiple column filters can be given, and will be applied concurrently Parameters ---------- table : `~astropy.table.Table` the table to filter column_filter : `str`, `tuple` a column slice filter definition, in one of two formats: - `str` - e.g. ``'snr > 10`` - `tuple` - ``(<column>, <operator>, <operand>)``, e.g. ``('snr', operator.gt, 10)`` multiple filters can be given and will be applied in order Returns ------- table : `~astropy.table.Table` a view of the input table with only those rows matching the filters Examples -------- >>> filter(my_table, 'snr>10', 'frequency<1000') custom operations can be defined using filter tuple definitions: >>> from gwpy.table.filters import in_segmentlist >>> filter(my_table, ('time', in_segmentlist, segs))
[ "Apply", "one", "or", "more", "column", "slice", "filters", "to", "a", "Table" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filter.py#L218-L256
train
211,527
gwpy/gwpy
gwpy/types/io/hdf5.py
read_hdf5_array
def read_hdf5_array(source, path=None, array_type=Array): """Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type """ dataset = io_hdf5.find_dataset(source, path=path) attrs = dict(dataset.attrs) # unpickle channel object try: attrs['channel'] = _unpickle_channel(attrs['channel']) except KeyError: # no channel stored pass # unpack byte strings for python3 for key in attrs: if isinstance(attrs[key], bytes): attrs[key] = attrs[key].decode('utf-8') return array_type(dataset[()], **attrs)
python
def read_hdf5_array(source, path=None, array_type=Array): """Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type """ dataset = io_hdf5.find_dataset(source, path=path) attrs = dict(dataset.attrs) # unpickle channel object try: attrs['channel'] = _unpickle_channel(attrs['channel']) except KeyError: # no channel stored pass # unpack byte strings for python3 for key in attrs: if isinstance(attrs[key], bytes): attrs[key] = attrs[key].decode('utf-8') return array_type(dataset[()], **attrs)
[ "def", "read_hdf5_array", "(", "source", ",", "path", "=", "None", ",", "array_type", "=", "Array", ")", ":", "dataset", "=", "io_hdf5", ".", "find_dataset", "(", "source", ",", "path", "=", "path", ")", "attrs", "=", "dict", "(", "dataset", ".", "attr...
Read an `Array` from the given HDF5 object Parameters ---------- source : `str`, :class:`h5py.HLObject` path to HDF file on disk, or open `h5py.HLObject`. path : `str` path in HDF hierarchy of dataset. array_type : `type` desired return type
[ "Read", "an", "Array", "from", "the", "given", "HDF5", "object" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L47-L72
train
211,528
gwpy/gwpy
gwpy/types/io/hdf5.py
_unpickle_channel
def _unpickle_channel(raw): """Try and unpickle a channel with sensible error handling """ try: return pickle.loads(raw) except (ValueError, pickle.UnpicklingError, EOFError, TypeError, IndexError) as exc: # maybe not pickled if isinstance(raw, bytes): raw = raw.decode('utf-8') try: # test if this is a valid channel name Channel.MATCH.match(raw) except ValueError: raise exc return raw
python
def _unpickle_channel(raw): """Try and unpickle a channel with sensible error handling """ try: return pickle.loads(raw) except (ValueError, pickle.UnpicklingError, EOFError, TypeError, IndexError) as exc: # maybe not pickled if isinstance(raw, bytes): raw = raw.decode('utf-8') try: # test if this is a valid channel name Channel.MATCH.match(raw) except ValueError: raise exc return raw
[ "def", "_unpickle_channel", "(", "raw", ")", ":", "try", ":", "return", "pickle", ".", "loads", "(", "raw", ")", "except", "(", "ValueError", ",", "pickle", ".", "UnpicklingError", ",", "EOFError", ",", "TypeError", ",", "IndexError", ")", "as", "exc", "...
Try and unpickle a channel with sensible error handling
[ "Try", "and", "unpickle", "a", "channel", "with", "sensible", "error", "handling" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L75-L89
train
211,529
gwpy/gwpy
gwpy/types/io/hdf5.py
_format_metadata_attribute
def _format_metadata_attribute(value): """Format a value for writing to HDF5 as a `h5py.Dataset` attribute """ if (value is None or (isinstance(value, Index) and value.regular)): raise IgnoredAttribute # map type to something HDF5 can handle for typekey, func in ATTR_TYPE_MAP.items(): if issubclass(type(value), typekey): return func(value) return value
python
def _format_metadata_attribute(value): """Format a value for writing to HDF5 as a `h5py.Dataset` attribute """ if (value is None or (isinstance(value, Index) and value.regular)): raise IgnoredAttribute # map type to something HDF5 can handle for typekey, func in ATTR_TYPE_MAP.items(): if issubclass(type(value), typekey): return func(value) return value
[ "def", "_format_metadata_attribute", "(", "value", ")", ":", "if", "(", "value", "is", "None", "or", "(", "isinstance", "(", "value", ",", "Index", ")", "and", "value", ".", "regular", ")", ")", ":", "raise", "IgnoredAttribute", "# map type to something HDF5 c...
Format a value for writing to HDF5 as a `h5py.Dataset` attribute
[ "Format", "a", "value", "for", "writing", "to", "HDF5", "as", "a", "h5py", ".", "Dataset", "attribute" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L100-L111
train
211,530
gwpy/gwpy
gwpy/types/io/hdf5.py
write_array_metadata
def write_array_metadata(dataset, array): """Write metadata for ``array`` into the `h5py.Dataset` """ for attr in ('unit',) + array._metadata_slots: # format attribute try: value = _format_metadata_attribute( getattr(array, '_%s' % attr, None)) except IgnoredAttribute: continue # store attribute try: dataset.attrs[attr] = value except (TypeError, ValueError, RuntimeError) as exc: exc.args = ("Failed to store {} ({}) for {}: {}".format( attr, type(value).__name__, type(array).__name__, str(exc))) raise
python
def write_array_metadata(dataset, array): """Write metadata for ``array`` into the `h5py.Dataset` """ for attr in ('unit',) + array._metadata_slots: # format attribute try: value = _format_metadata_attribute( getattr(array, '_%s' % attr, None)) except IgnoredAttribute: continue # store attribute try: dataset.attrs[attr] = value except (TypeError, ValueError, RuntimeError) as exc: exc.args = ("Failed to store {} ({}) for {}: {}".format( attr, type(value).__name__, type(array).__name__, str(exc))) raise
[ "def", "write_array_metadata", "(", "dataset", ",", "array", ")", ":", "for", "attr", "in", "(", "'unit'", ",", ")", "+", "array", ".", "_metadata_slots", ":", "# format attribute", "try", ":", "value", "=", "_format_metadata_attribute", "(", "getattr", "(", ...
Write metadata for ``array`` into the `h5py.Dataset`
[ "Write", "metadata", "for", "array", "into", "the", "h5py", ".", "Dataset" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L114-L131
train
211,531
gwpy/gwpy
gwpy/types/io/hdf5.py
write_hdf5_array
def write_hdf5_array(array, h5g, path=None, attrs=None, append=False, overwrite=False, compression='gzip', **kwargs): """Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset """ if path is None: path = array.name if path is None: raise ValueError("Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type(array).__name__) # create dataset dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite, data=array.value, compression=compression, **kwargs) # write default metadata write_array_metadata(dset, array) # allow caller to specify their own metadata dict if attrs: for key in attrs: dset.attrs[key] = attrs[key] return dset
python
def write_hdf5_array(array, h5g, path=None, attrs=None, append=False, overwrite=False, compression='gzip', **kwargs): """Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset """ if path is None: path = array.name if path is None: raise ValueError("Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type(array).__name__) # create dataset dset = io_hdf5.create_dataset(h5g, path, overwrite=overwrite, data=array.value, compression=compression, **kwargs) # write default metadata write_array_metadata(dset, array) # allow caller to specify their own metadata dict if attrs: for key in attrs: dset.attrs[key] = attrs[key] return dset
[ "def", "write_hdf5_array", "(", "array", ",", "h5g", ",", "path", "=", "None", ",", "attrs", "=", "None", ",", "append", "=", "False", ",", "overwrite", "=", "False", ",", "compression", "=", "'gzip'", ",", "*", "*", "kwargs", ")", ":", "if", "path",...
Write the ``array`` to an `h5py.Dataset` Parameters ---------- array : `gwpy.types.Array` the data object to write h5g : `str`, `h5py.Group` a file path to write to, or an `h5py.Group` in which to create a new dataset path : `str`, optional the path inside the group at which to create the new dataset, defaults to ``array.name`` attrs : `dict`, optional extra metadata to write into `h5py.Dataset.attrs`, on top of the default metadata append : `bool`, default: `False` if `True`, write new dataset to existing file, otherwise an exception will be raised if the output file exists (only used if ``f`` is `str`) overwrite : `bool`, default: `False` if `True`, overwrite an existing dataset in an existing file, otherwise an exception will be raised if a dataset exists with the given name (only used if ``f`` is `str`) compression : `str`, `int`, optional compression option to pass to :meth:`h5py.Group.create_dataset` **kwargs other keyword arguments for :meth:`h5py.Group.create_dataset` Returns ------- datasets : `h5py.Dataset` the newly created dataset
[ "Write", "the", "array", "to", "an", "h5py", ".", "Dataset" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L135-L198
train
211,532
gwpy/gwpy
gwpy/types/io/hdf5.py
format_index_array_attrs
def format_index_array_attrs(series): """Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5. """ attrs = {} # loop through named axes for i, axis in zip(range(series.ndim), ('x', 'y')): # find property names unit = '{}unit'.format(axis) origin = '{}0'.format(axis) delta = 'd{}'.format(axis) # store attributes aunit = getattr(series, unit) attrs.update({ unit: str(aunit), origin: getattr(series, origin).to(aunit).value, delta: getattr(series, delta).to(aunit).value, }) return attrs
python
def format_index_array_attrs(series): """Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5. """ attrs = {} # loop through named axes for i, axis in zip(range(series.ndim), ('x', 'y')): # find property names unit = '{}unit'.format(axis) origin = '{}0'.format(axis) delta = 'd{}'.format(axis) # store attributes aunit = getattr(series, unit) attrs.update({ unit: str(aunit), origin: getattr(series, origin).to(aunit).value, delta: getattr(series, delta).to(aunit).value, }) return attrs
[ "def", "format_index_array_attrs", "(", "series", ")", ":", "attrs", "=", "{", "}", "# loop through named axes", "for", "i", ",", "axis", "in", "zip", "(", "range", "(", "series", ".", "ndim", ")", ",", "(", "'x'", ",", "'y'", ")", ")", ":", "# find pr...
Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5.
[ "Format", "metadata", "attributes", "for", "and", "indexed", "array" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L201-L223
train
211,533
gwpy/gwpy
gwpy/types/io/hdf5.py
write_hdf5_series
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs): """Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords. """ if attrs is None: attrs = format_index_array_attrs(series) return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs)
python
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs): """Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords. """ if attrs is None: attrs = format_index_array_attrs(series) return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs)
[ "def", "write_hdf5_series", "(", "series", ",", "output", ",", "path", "=", "None", ",", "attrs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "format_index_array_attrs", "(", "series", ")", "return", ...
Write a Series to HDF5. See :func:`write_hdf5_array` for details of arguments and keywords.
[ "Write", "a", "Series", "to", "HDF5", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/hdf5.py#L226-L233
train
211,534
gwpy/gwpy
gwpy/io/mp.py
read_multi
def read_multi(flatten, cls, source, *args, **kwargs): """Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader """ verbose = kwargs.pop('verbose', False) # parse input as a list of files try: # try and map to a list of file-like objects files = file_list(source) except ValueError: # otherwise treat as single file files = [source] path = None # to pass to get_read_format() else: path = files[0] if files else None # determine input format (so we don't have to do it multiple times) if kwargs.get('format', None) is None: kwargs['format'] = get_read_format(cls, path, (source,) + args, kwargs) # calculate maximum number of processes nproc = min(kwargs.pop('nproc', 1), len(files)) # define multiprocessing method def _read_single_file(fobj): try: return fobj, io_read(cls, fobj, *args, **kwargs) # pylint: disable=broad-except,redefine-in-handler except Exception as exc: if nproc == 1: raise if isinstance(exc, SAXException): # SAXExceptions don't pickle return fobj, exc.getException() # pylint: disable=no-member return fobj, exc # format verbosity if verbose is True: verbose = 'Reading ({})'.format(kwargs['format']) # read files output = mp_utils.multiprocess_with_queues( nproc, _read_single_file, files, verbose=verbose, unit='files') # raise exceptions (from multiprocessing, single process raises inline) for fobj, exc in output: if isinstance(exc, Exception): exc.args = ('Failed to read %s: %s' % (fobj, str(exc)),) raise exc # return combined object _, out = zip(*output) return flatten(out)
python
def read_multi(flatten, cls, source, *args, **kwargs): """Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader """ verbose = kwargs.pop('verbose', False) # parse input as a list of files try: # try and map to a list of file-like objects files = file_list(source) except ValueError: # otherwise treat as single file files = [source] path = None # to pass to get_read_format() else: path = files[0] if files else None # determine input format (so we don't have to do it multiple times) if kwargs.get('format', None) is None: kwargs['format'] = get_read_format(cls, path, (source,) + args, kwargs) # calculate maximum number of processes nproc = min(kwargs.pop('nproc', 1), len(files)) # define multiprocessing method def _read_single_file(fobj): try: return fobj, io_read(cls, fobj, *args, **kwargs) # pylint: disable=broad-except,redefine-in-handler except Exception as exc: if nproc == 1: raise if isinstance(exc, SAXException): # SAXExceptions don't pickle return fobj, exc.getException() # pylint: disable=no-member return fobj, exc # format verbosity if verbose is True: verbose = 'Reading ({})'.format(kwargs['format']) # read files output = mp_utils.multiprocess_with_queues( nproc, _read_single_file, files, verbose=verbose, unit='files') # raise exceptions (from multiprocessing, single process raises inline) for fobj, exc in output: if isinstance(exc, Exception): exc.args = ('Failed to read %s: %s' % (fobj, str(exc)),) raise exc # return combined object _, out = zip(*output) return flatten(out)
[ "def", "read_multi", "(", "flatten", ",", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "verbose", "=", "kwargs", ".", "pop", "(", "'verbose'", ",", "False", ")", "# parse input as a list of files", "try", ":", "# try and map t...
Read sources into a `cls` with multiprocessing This method should be called by `cls.read` and uses the `nproc` keyword to enable and handle pool-based multiprocessing of multiple source files, using `flatten` to combine the chunked data into a single object of the correct type. Parameters ---------- flatten : `callable` a method to take a list of ``cls`` instances, and combine them into a single ``cls`` instance cls : `type` the object type to read source : `str`, `list` of `str`, ... the input data source, can be of in many different forms *args positional arguments to pass to the reader **kwargs keyword arguments to pass to the reader
[ "Read", "sources", "into", "a", "cls", "with", "multiprocessing" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/mp.py#L34-L106
train
211,535
gwpy/gwpy
gwpy/segments/io/json.py
read_json_flag
def read_json_flag(fobj): """Read a `DataQualityFlag` from a segments-web.ligo.org JSON file """ # read from filename if isinstance(fobj, string_types): with open(fobj, 'r') as fobj2: return read_json_flag(fobj2) # read from open file txt = fobj.read() if isinstance(txt, bytes): txt = txt.decode('utf-8') data = json.loads(txt) # format flag name = '{ifo}:{name}:{version}'.format(**data) out = DataQualityFlag(name, active=data['active'], known=data['known']) # parse 'metadata' try: out.description = data['metadata'].get('flag_description', None) except KeyError: # no metadata available, but that's ok pass else: out.isgood = not data['metadata'].get( 'active_indicates_ifo_badness', False) return out
python
def read_json_flag(fobj): """Read a `DataQualityFlag` from a segments-web.ligo.org JSON file """ # read from filename if isinstance(fobj, string_types): with open(fobj, 'r') as fobj2: return read_json_flag(fobj2) # read from open file txt = fobj.read() if isinstance(txt, bytes): txt = txt.decode('utf-8') data = json.loads(txt) # format flag name = '{ifo}:{name}:{version}'.format(**data) out = DataQualityFlag(name, active=data['active'], known=data['known']) # parse 'metadata' try: out.description = data['metadata'].get('flag_description', None) except KeyError: # no metadata available, but that's ok pass else: out.isgood = not data['metadata'].get( 'active_indicates_ifo_badness', False) return out
[ "def", "read_json_flag", "(", "fobj", ")", ":", "# read from filename", "if", "isinstance", "(", "fobj", ",", "string_types", ")", ":", "with", "open", "(", "fobj", ",", "'r'", ")", "as", "fobj2", ":", "return", "read_json_flag", "(", "fobj2", ")", "# read...
Read a `DataQualityFlag` from a segments-web.ligo.org JSON file
[ "Read", "a", "DataQualityFlag", "from", "a", "segments", "-", "web", ".", "ligo", ".", "org", "JSON", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/json.py#L37-L65
train
211,536
gwpy/gwpy
gwpy/segments/io/json.py
write_json_flag
def write_json_flag(flag, fobj, **kwargs): """Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for details on acceptable keyword arguments """ # write to filename if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_json_flag(flag, fobj2, **kwargs) # build json packet data = {} data['ifo'] = flag.ifo data['name'] = flag.tag data['version'] = flag.version data['active'] = flag.active data['known'] = flag.known data['metadata'] = {} data['metadata']['active_indicates_ifo_badness'] = not flag.isgood data['metadata']['flag_description'] = flag.description # write json.dump(data, fobj, **kwargs)
python
def write_json_flag(flag, fobj, **kwargs): """Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for details on acceptable keyword arguments """ # write to filename if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_json_flag(flag, fobj2, **kwargs) # build json packet data = {} data['ifo'] = flag.ifo data['name'] = flag.tag data['version'] = flag.version data['active'] = flag.active data['known'] = flag.known data['metadata'] = {} data['metadata']['active_indicates_ifo_badness'] = not flag.isgood data['metadata']['flag_description'] = flag.description # write json.dump(data, fobj, **kwargs)
[ "def", "write_json_flag", "(", "flag", ",", "fobj", ",", "*", "*", "kwargs", ")", ":", "# write to filename", "if", "isinstance", "(", "fobj", ",", "string_types", ")", ":", "with", "open", "(", "fobj", ",", "'w'", ")", "as", "fobj2", ":", "return", "w...
Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for details on acceptable keyword arguments
[ "Write", "a", "DataQualityFlag", "to", "a", "JSON", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/json.py#L70-L106
train
211,537
gwpy/gwpy
gwpy/table/filters.py
in_segmentlist
def in_segmentlist(column, segmentlist): """Return the index of values lying inside the given segmentlist A `~gwpy.segments.Segment` represents a semi-open interval, so for any segment `[a, b)`, a value `x` is 'in' the segment if a <= x < b """ segmentlist = type(segmentlist)(segmentlist).coalesce() idx = column.argsort() contains = numpy.zeros(column.shape[0], dtype=bool) j = 0 try: segstart, segend = segmentlist[j] except IndexError: # no segments, return all False return contains i = 0 while i < contains.shape[0]: # extract time for this index x = idx[i] # <- index in original column time = column[x] # if before start, move to next value if time < segstart: i += 1 continue # if after end, find the next segment and check value again if time >= segend: j += 1 try: segstart, segend = segmentlist[j] continue except IndexError: break # otherwise value must be in this segment contains[x] = True i += 1 return contains
python
def in_segmentlist(column, segmentlist): """Return the index of values lying inside the given segmentlist A `~gwpy.segments.Segment` represents a semi-open interval, so for any segment `[a, b)`, a value `x` is 'in' the segment if a <= x < b """ segmentlist = type(segmentlist)(segmentlist).coalesce() idx = column.argsort() contains = numpy.zeros(column.shape[0], dtype=bool) j = 0 try: segstart, segend = segmentlist[j] except IndexError: # no segments, return all False return contains i = 0 while i < contains.shape[0]: # extract time for this index x = idx[i] # <- index in original column time = column[x] # if before start, move to next value if time < segstart: i += 1 continue # if after end, find the next segment and check value again if time >= segend: j += 1 try: segstart, segend = segmentlist[j] continue except IndexError: break # otherwise value must be in this segment contains[x] = True i += 1 return contains
[ "def", "in_segmentlist", "(", "column", ",", "segmentlist", ")", ":", "segmentlist", "=", "type", "(", "segmentlist", ")", "(", "segmentlist", ")", ".", "coalesce", "(", ")", "idx", "=", "column", ".", "argsort", "(", ")", "contains", "=", "numpy", ".", ...
Return the index of values lying inside the given segmentlist A `~gwpy.segments.Segment` represents a semi-open interval, so for any segment `[a, b)`, a value `x` is 'in' the segment if a <= x < b
[ "Return", "the", "index", "of", "values", "lying", "inside", "the", "given", "segmentlist" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/filters.py#L37-L73
train
211,538
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.fft
def fft(self, nfft=None): """Compute the one-dimensional discrete Fourier transform of this `TimeSeries`. Parameters ---------- nfft : `int`, optional length of the desired Fourier transform, input will be cropped or padded to match the desired length. If nfft is not given, the length of the `TimeSeries` will be used Returns ------- out : `~gwpy.frequencyseries.FrequencySeries` the normalised, complex-valued FFT `FrequencySeries`. See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. Notes ----- This method, in constrast to the :func:`numpy.fft.rfft` method it calls, applies the necessary normalisation such that the amplitude of the output `~gwpy.frequencyseries.FrequencySeries` is correct. """ from ..frequencyseries import FrequencySeries if nfft is None: nfft = self.size dft = npfft.rfft(self.value, n=nfft) / nfft dft[1:] *= 2.0 new = FrequencySeries(dft, epoch=self.epoch, unit=self.unit, name=self.name, channel=self.channel) try: new.frequencies = npfft.rfftfreq(nfft, d=self.dx.value) except AttributeError: new.frequencies = numpy.arange(new.size) / (nfft * self.dx.value) return new
python
def fft(self, nfft=None): """Compute the one-dimensional discrete Fourier transform of this `TimeSeries`. Parameters ---------- nfft : `int`, optional length of the desired Fourier transform, input will be cropped or padded to match the desired length. If nfft is not given, the length of the `TimeSeries` will be used Returns ------- out : `~gwpy.frequencyseries.FrequencySeries` the normalised, complex-valued FFT `FrequencySeries`. See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. Notes ----- This method, in constrast to the :func:`numpy.fft.rfft` method it calls, applies the necessary normalisation such that the amplitude of the output `~gwpy.frequencyseries.FrequencySeries` is correct. """ from ..frequencyseries import FrequencySeries if nfft is None: nfft = self.size dft = npfft.rfft(self.value, n=nfft) / nfft dft[1:] *= 2.0 new = FrequencySeries(dft, epoch=self.epoch, unit=self.unit, name=self.name, channel=self.channel) try: new.frequencies = npfft.rfftfreq(nfft, d=self.dx.value) except AttributeError: new.frequencies = numpy.arange(new.size) / (nfft * self.dx.value) return new
[ "def", "fft", "(", "self", ",", "nfft", "=", "None", ")", ":", "from", ".", ".", "frequencyseries", "import", "FrequencySeries", "if", "nfft", "is", "None", ":", "nfft", "=", "self", ".", "size", "dft", "=", "npfft", ".", "rfft", "(", "self", ".", ...
Compute the one-dimensional discrete Fourier transform of this `TimeSeries`. Parameters ---------- nfft : `int`, optional length of the desired Fourier transform, input will be cropped or padded to match the desired length. If nfft is not given, the length of the `TimeSeries` will be used Returns ------- out : `~gwpy.frequencyseries.FrequencySeries` the normalised, complex-valued FFT `FrequencySeries`. See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. Notes ----- This method, in constrast to the :func:`numpy.fft.rfft` method it calls, applies the necessary normalisation such that the amplitude of the output `~gwpy.frequencyseries.FrequencySeries` is correct.
[ "Compute", "the", "one", "-", "dimensional", "discrete", "Fourier", "transform", "of", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L133-L173
train
211,539
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.average_fft
def average_fft(self, fftlength=None, overlap=0, window=None): """Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. """ from gwpy.spectrogram import Spectrogram # format lengths if fftlength is None: fftlength = self.duration if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) noverlap = int((overlap * self.sample_rate).decompose().value) navg = divmod(self.size-noverlap, (nfft-noverlap))[0] # format window if window is None: window = 'boxcar' if isinstance(window, (str, tuple)): win = signal.get_window(window, nfft) else: win = numpy.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') elif win.shape[0] != nfft: raise ValueError('Window is the wrong size.') win = win.astype(self.dtype) scaling = 1. / numpy.absolute(win).mean() if nfft % 2: nfreqs = (nfft + 1) // 2 else: nfreqs = nfft // 2 + 1 ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex), channel=self.channel, epoch=self.epoch, f0=0, df=1 / fftlength, dt=1, copy=True) # stride through TimeSeries, recording FFTs as columns of Spectrogram idx = 0 for i in range(navg): # find step TimeSeries idx_end = idx + nfft if idx_end > self.size: continue stepseries = self[idx:idx_end].detrend() * win # calculated FFT, weight, and stack fft_ = stepseries.fft(nfft=nfft) * scaling ffts.value[i, :] = fft_.value idx += (nfft - noverlap) mean = ffts.mean(0) mean.name = self.name mean.epoch = self.epoch mean.channel = self.channel return mean
python
def average_fft(self, fftlength=None, overlap=0, window=None): """Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. """ from gwpy.spectrogram import Spectrogram # format lengths if fftlength is None: fftlength = self.duration if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) noverlap = int((overlap * self.sample_rate).decompose().value) navg = divmod(self.size-noverlap, (nfft-noverlap))[0] # format window if window is None: window = 'boxcar' if isinstance(window, (str, tuple)): win = signal.get_window(window, nfft) else: win = numpy.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') elif win.shape[0] != nfft: raise ValueError('Window is the wrong size.') win = win.astype(self.dtype) scaling = 1. / numpy.absolute(win).mean() if nfft % 2: nfreqs = (nfft + 1) // 2 else: nfreqs = nfft // 2 + 1 ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex), channel=self.channel, epoch=self.epoch, f0=0, df=1 / fftlength, dt=1, copy=True) # stride through TimeSeries, recording FFTs as columns of Spectrogram idx = 0 for i in range(navg): # find step TimeSeries idx_end = idx + nfft if idx_end > self.size: continue stepseries = self[idx:idx_end].detrend() * win # calculated FFT, weight, and stack fft_ = stepseries.fft(nfft=nfft) * scaling ffts.value[i, :] = fft_.value idx += (nfft - noverlap) mean = ffts.mean(0) mean.name = self.name mean.epoch = self.epoch mean.channel = self.channel return mean
[ "def", "average_fft", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "0", ",", "window", "=", "None", ")", ":", "from", "gwpy", ".", "spectrogram", "import", "Spectrogram", "# format lengths", "if", "fftlength", "is", "None", ":", "fftlen...
Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used.
[ "Compute", "the", "averaged", "one", "-", "dimensional", "DFT", "of", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L175-L257
train
211,540
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.psd
def psd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs): """Calculate the PSD `FrequencySeries` for this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details **kwargs other keyword arguments are passed to the underlying PSD-generation method Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ # get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.psd(self, method_func, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
python
def psd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs): """Calculate the PSD `FrequencySeries` for this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details **kwargs other keyword arguments are passed to the underlying PSD-generation method Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ # get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.psd(self, method_func, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
[ "def", "psd", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "method", "=", "DEFAULT_FFT_METHOD", ",", "*", "*", "kwargs", ")", ":", "# get method", "method_func", "=", "spectral", ".", "get_me...
Calculate the PSD `FrequencySeries` for this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details **kwargs other keyword arguments are passed to the underlying PSD-generation method Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
[ "Calculate", "the", "PSD", "FrequencySeries", "for", "this", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L259-L303
train
211,541
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.asd
def asd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs): """Calculate the ASD `FrequencySeries` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. See also -------- TimeSeries.psd Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ return self.psd(method=method, fftlength=fftlength, overlap=overlap, window=window, **kwargs) ** (1/2.)
python
def asd(self, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, **kwargs): """Calculate the ASD `FrequencySeries` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. See also -------- TimeSeries.psd Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ return self.psd(method=method, fftlength=fftlength, overlap=overlap, window=window, **kwargs) ** (1/2.)
[ "def", "asd", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "method", "=", "DEFAULT_FFT_METHOD", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "psd", "(", "method", "=", "metho...
Calculate the ASD `FrequencySeries` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. See also -------- TimeSeries.psd Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
[ "Calculate", "the", "ASD", "FrequencySeries", "of", "this", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L305-L345
train
211,542
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.csd
def csd(self, other, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the CSD `FrequencySeries` for two `TimeSeries` Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- csd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the CSD. """ return spectral.psd( (self, other), spectral.csd, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
python
def csd(self, other, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the CSD `FrequencySeries` for two `TimeSeries` Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- csd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the CSD. """ return spectral.psd( (self, other), spectral.csd, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
[ "def", "csd", "(", "self", ",", "other", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "return", "spectral", ".", "psd", "(", "(", "self", ",", "other", ")", ",", "spe...
Calculate the CSD `FrequencySeries` for two `TimeSeries` Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- csd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the CSD.
[ "Calculate", "the", "CSD", "FrequencySeries", "for", "two", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L347-L381
train
211,543
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.spectrogram
def spectrogram(self, stride, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, nproc=1, **kwargs): """Calculate the average power spectrogram of this `TimeSeries` using the specified average spectrum method. Each time-bin of the output `Spectrogram` is calculated by taking a chunk of the `TimeSeries` in the segment `[t - overlap/2., t + stride + overlap/2.)` and calculating the :meth:`~gwpy.timeseries.TimeSeries.psd` of those data. As a result, each time-bin is calculated using `stride + overlap` seconds of data. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details nproc : `int` number of CPUs to use in parallel processing of FFTs Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency power spectrogram as generated from the input time-series. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ # get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.average_spectrogram( self, method_func, stride, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
python
def spectrogram(self, stride, fftlength=None, overlap=None, window='hann', method=DEFAULT_FFT_METHOD, nproc=1, **kwargs): """Calculate the average power spectrogram of this `TimeSeries` using the specified average spectrum method. Each time-bin of the output `Spectrogram` is calculated by taking a chunk of the `TimeSeries` in the segment `[t - overlap/2., t + stride + overlap/2.)` and calculating the :meth:`~gwpy.timeseries.TimeSeries.psd` of those data. As a result, each time-bin is calculated using `stride + overlap` seconds of data. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details nproc : `int` number of CPUs to use in parallel processing of FFTs Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency power spectrogram as generated from the input time-series. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ # get method method_func = spectral.get_method(method) # calculate PSD using UI method return spectral.average_spectrogram( self, method_func, stride, fftlength=fftlength, overlap=overlap, window=window, **kwargs )
[ "def", "spectrogram", "(", "self", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "method", "=", "DEFAULT_FFT_METHOD", ",", "nproc", "=", "1", ",", "*", "*", "kwargs", ")", ":", "# get meth...
Calculate the average power spectrogram of this `TimeSeries` using the specified average spectrum method. Each time-bin of the output `Spectrogram` is calculated by taking a chunk of the `TimeSeries` in the segment `[t - overlap/2., t + stride + overlap/2.)` and calculating the :meth:`~gwpy.timeseries.TimeSeries.psd` of those data. As a result, each time-bin is calculated using `stride + overlap` seconds of data. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats method : `str`, optional FFT-averaging method, see *Notes* for more details nproc : `int` number of CPUs to use in parallel processing of FFTs Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency power spectrogram as generated from the input time-series. Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
[ "Calculate", "the", "average", "power", "spectrogram", "of", "this", "TimeSeries", "using", "the", "specified", "average", "spectrum", "method", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L383-L445
train
211,544
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.spectrogram2
def spectrogram2(self, fftlength, overlap=None, window='hann', **kwargs): """Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample. """ # set kwargs for periodogram() kwargs.setdefault('fs', self.sample_rate.to('Hz').value) # run return spectral.spectrogram(self, signal.periodogram, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
python
def spectrogram2(self, fftlength, overlap=None, window='hann', **kwargs): """Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample. """ # set kwargs for periodogram() kwargs.setdefault('fs', self.sample_rate.to('Hz').value) # run return spectral.spectrogram(self, signal.periodogram, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
[ "def", "spectrogram2", "(", "self", ",", "fftlength", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "# set kwargs for periodogram()", "kwargs", ".", "setdefault", "(", "'fs'", ",", "self", ".", "sample_rate", ...
Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample.
[ "Calculate", "the", "non", "-", "averaged", "power", "Spectrogram", "of", "this", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L447-L499
train
211,545
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.fftgram
def fftgram(self, fftlength, overlap=None, window='hann', **kwargs): """Calculate the Fourier-gram of this `TimeSeries`. At every ``stride``, a single, complex FFT is calculated. Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable Returns ------- a Fourier-gram """ from ..spectrogram import Spectrogram try: from scipy.signal import spectrogram except ImportError: raise ImportError("Must have scipy>=0.16 to utilize " "this method.") # format lengths if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) if not overlap: # use scipy.signal.spectrogram noverlap default noverlap = nfft // 8 else: noverlap = int((overlap * self.sample_rate).decompose().value) # generate output spectrogram [frequencies, times, sxx] = spectrogram(self, fs=self.sample_rate.value, window=window, nperseg=nfft, noverlap=noverlap, mode='complex', **kwargs) return Spectrogram(sxx.T, name=self.name, unit=self.unit, xindex=self.t0.value + times, yindex=frequencies)
python
def fftgram(self, fftlength, overlap=None, window='hann', **kwargs): """Calculate the Fourier-gram of this `TimeSeries`. At every ``stride``, a single, complex FFT is calculated. Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable Returns ------- a Fourier-gram """ from ..spectrogram import Spectrogram try: from scipy.signal import spectrogram except ImportError: raise ImportError("Must have scipy>=0.16 to utilize " "this method.") # format lengths if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) if not overlap: # use scipy.signal.spectrogram noverlap default noverlap = nfft // 8 else: noverlap = int((overlap * self.sample_rate).decompose().value) # generate output spectrogram [frequencies, times, sxx] = spectrogram(self, fs=self.sample_rate.value, window=window, nperseg=nfft, noverlap=noverlap, mode='complex', **kwargs) return Spectrogram(sxx.T, name=self.name, unit=self.unit, xindex=self.t0.value + times, yindex=frequencies)
[ "def", "fftgram", "(", "self", ",", "fftlength", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "spectrogram", "import", "Spectrogram", "try", ":", "from", "scipy", ".", "signal", "import...
Calculate the Fourier-gram of this `TimeSeries`. At every ``stride``, a single, complex FFT is calculated. Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable Returns ------- a Fourier-gram
[ "Calculate", "the", "Fourier", "-", "gram", "of", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L501-L554
train
211,546
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.spectral_variance
def spectral_variance(self, stride, fftlength=None, overlap=None, method=DEFAULT_FFT_METHOD, window='hann', nproc=1, filter=None, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False): """Calculate the `SpectralVariance` of this `TimeSeries`. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT method : `str`, optional FFT-averaging method, see *Notes* for more details overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. bins : `numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional number of bins to generate, only read if ``bins`` is not given log : `bool`, optional calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional normalise bin counts to a unit sum density : `bool`, optional normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ specgram = self.spectrogram(stride, fftlength=fftlength, overlap=overlap, method=method, window=window, nproc=nproc) ** (1/2.) if filter: specgram = specgram.filter(*filter) return specgram.variance(bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
python
def spectral_variance(self, stride, fftlength=None, overlap=None, method=DEFAULT_FFT_METHOD, window='hann', nproc=1, filter=None, bins=None, low=None, high=None, nbins=500, log=False, norm=False, density=False): """Calculate the `SpectralVariance` of this `TimeSeries`. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT method : `str`, optional FFT-averaging method, see *Notes* for more details overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. bins : `numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional number of bins to generate, only read if ``bins`` is not given log : `bool`, optional calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional normalise bin counts to a unit sum density : `bool`, optional normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms """ specgram = self.spectrogram(stride, fftlength=fftlength, overlap=overlap, method=method, window=window, nproc=nproc) ** (1/2.) if filter: specgram = specgram.filter(*filter) return specgram.variance(bins=bins, low=low, high=high, nbins=nbins, log=log, norm=norm, density=density)
[ "def", "spectral_variance", "(", "self", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "method", "=", "DEFAULT_FFT_METHOD", ",", "window", "=", "'hann'", ",", "nproc", "=", "1", ",", "filter", "=", "None", ",", "bins", ...
Calculate the `SpectralVariance` of this `TimeSeries`. Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT method : `str`, optional FFT-averaging method, see *Notes* for more details overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. bins : `numpy.ndarray`, optional, default `None` array of histogram bin edges, including the rightmost edge low : `float`, optional left edge of lowest amplitude bin, only read if ``bins`` is not given high : `float`, optional right edge of highest amplitude bin, only read if ``bins`` is not given nbins : `int`, optional number of bins to generate, only read if ``bins`` is not given log : `bool`, optional calculate amplitude bins over a logarithmic scale, only read if ``bins`` is not given norm : `bool`, optional normalise bin counts to a unit sum density : `bool`, optional normalise bin counts to a unit integral Returns ------- specvar : `SpectralVariance` 2D-array of spectral frequency-amplitude counts See Also -------- :func:`numpy.histogram` for details on specifying bins and weights Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms
[ "Calculate", "the", "SpectralVariance", "of", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L556-L635
train
211,547
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.rayleigh_spectrum
def rayleigh_spectrum(self, fftlength=None, overlap=None): """Calculate the Rayleigh `FrequencySeries` for this `TimeSeries`. The Rayleigh statistic is calculated as the ratio of the standard deviation and the mean of a number of periodograms. Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to that of the relevant method. Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. """ return spectral.psd( self, spectral.rayleigh, fftlength=fftlength, overlap=overlap, )
python
def rayleigh_spectrum(self, fftlength=None, overlap=None): """Calculate the Rayleigh `FrequencySeries` for this `TimeSeries`. The Rayleigh statistic is calculated as the ratio of the standard deviation and the mean of a number of periodograms. Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to that of the relevant method. Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD. """ return spectral.psd( self, spectral.rayleigh, fftlength=fftlength, overlap=overlap, )
[ "def", "rayleigh_spectrum", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ")", ":", "return", "spectral", ".", "psd", "(", "self", ",", "spectral", ".", "rayleigh", ",", "fftlength", "=", "fftlength", ",", "overlap", "=", "over...
Calculate the Rayleigh `FrequencySeries` for this `TimeSeries`. The Rayleigh statistic is calculated as the ratio of the standard deviation and the mean of a number of periodograms. Parameters ---------- fftlength : `float` number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to that of the relevant method. Returns ------- psd : `~gwpy.frequencyseries.FrequencySeries` a data series containing the PSD.
[ "Calculate", "the", "Rayleigh", "FrequencySeries", "for", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L637-L663
train
211,548
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.rayleigh_spectrogram
def rayleigh_spectrogram(self, stride, fftlength=None, overlap=0, nproc=1, **kwargs): """Calculate the Rayleigh statistic spectrogram of this `TimeSeries` Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, default: ``0`` nproc : `int`, optional maximum number of independent frame reading processes, default default: ``1`` Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency Rayleigh spectrogram as generated from the input time-series. See Also -------- TimeSeries.rayleigh for details of the statistic calculation """ specgram = spectral.average_spectrogram( self, spectral.rayleigh, stride, fftlength=fftlength, overlap=overlap, nproc=nproc, **kwargs ) specgram.override_unit('') return specgram
python
def rayleigh_spectrogram(self, stride, fftlength=None, overlap=0, nproc=1, **kwargs): """Calculate the Rayleigh statistic spectrogram of this `TimeSeries` Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, default: ``0`` nproc : `int`, optional maximum number of independent frame reading processes, default default: ``1`` Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency Rayleigh spectrogram as generated from the input time-series. See Also -------- TimeSeries.rayleigh for details of the statistic calculation """ specgram = spectral.average_spectrogram( self, spectral.rayleigh, stride, fftlength=fftlength, overlap=overlap, nproc=nproc, **kwargs ) specgram.override_unit('') return specgram
[ "def", "rayleigh_spectrogram", "(", "self", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "0", ",", "nproc", "=", "1", ",", "*", "*", "kwargs", ")", ":", "specgram", "=", "spectral", ".", "average_spectrogram", "(", "self", ",", "s...
Calculate the Rayleigh statistic spectrogram of this `TimeSeries` Parameters ---------- stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, default: ``0`` nproc : `int`, optional maximum number of independent frame reading processes, default default: ``1`` Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency Rayleigh spectrogram as generated from the input time-series. See Also -------- TimeSeries.rayleigh for details of the statistic calculation
[ "Calculate", "the", "Rayleigh", "statistic", "spectrogram", "of", "this", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L665-L705
train
211,549
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.csd_spectrogram
def csd_spectrogram(self, other, stride, fftlength=None, overlap=0, window='hann', nproc=1, **kwargs): """Calculate the cross spectral density spectrogram of this `TimeSeries` with 'other'. Parameters ---------- other : `~gwpy.timeseries.TimeSeries` second time-series for cross spectral density calculation stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency cross spectrogram as generated from the two input time-series. """ return spectral.average_spectrogram( (self, other), spectral.csd, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc, **kwargs )
python
def csd_spectrogram(self, other, stride, fftlength=None, overlap=0, window='hann', nproc=1, **kwargs): """Calculate the cross spectral density spectrogram of this `TimeSeries` with 'other'. Parameters ---------- other : `~gwpy.timeseries.TimeSeries` second time-series for cross spectral density calculation stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency cross spectrogram as generated from the two input time-series. """ return spectral.average_spectrogram( (self, other), spectral.csd, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc, **kwargs )
[ "def", "csd_spectrogram", "(", "self", ",", "other", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "0", ",", "window", "=", "'hann'", ",", "nproc", "=", "1", ",", "*", "*", "kwargs", ")", ":", "return", "spectral", ".", "average_...
Calculate the cross spectral density spectrogram of this `TimeSeries` with 'other'. Parameters ---------- other : `~gwpy.timeseries.TimeSeries` second time-series for cross spectral density calculation stride : `float` number of seconds in single PSD (column of spectrogram). fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` maximum number of independent frame reading processes, default is set to single-process file reading. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency cross spectrogram as generated from the two input time-series.
[ "Calculate", "the", "cross", "spectral", "density", "spectrogram", "of", "this", "TimeSeries", "with", "other", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L707-L751
train
211,550
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.highpass
def highpass(self, frequency, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs): """Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `float` stop-band edge frequency, defaults to `frequency * 1.5` type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.highpass` Returns ------- hpseries : `TimeSeries` a high-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.highpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable. """ # design filter filt = filter_design.highpass(frequency, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
python
def highpass(self, frequency, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs): """Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `float` stop-band edge frequency, defaults to `frequency * 1.5` type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.highpass` Returns ------- hpseries : `TimeSeries` a high-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.highpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable. """ # design filter filt = filter_design.highpass(frequency, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
[ "def", "highpass", "(", "self", ",", "frequency", ",", "gpass", "=", "2", ",", "gstop", "=", "30", ",", "fstop", "=", "None", ",", "type", "=", "'iir'", ",", "filtfilt", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# design filter", "filt", "="...
Filter this `TimeSeries` with a high-pass filter. Parameters ---------- frequency : `float` high-pass corner frequency gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `float` stop-band edge frequency, defaults to `frequency * 1.5` type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.highpass` Returns ------- hpseries : `TimeSeries` a high-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.highpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable.
[ "Filter", "this", "TimeSeries", "with", "a", "high", "-", "pass", "filter", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L755-L803
train
211,551
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.bandpass
def bandpass(self, flow, fhigh, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs): """Filter this `TimeSeries` with a band-pass filter. Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.bandpass` Returns ------- bpseries : `TimeSeries` a band-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.bandpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable. """ # design filter filt = filter_design.bandpass(flow, fhigh, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
python
def bandpass(self, flow, fhigh, gpass=2, gstop=30, fstop=None, type='iir', filtfilt=True, **kwargs): """Filter this `TimeSeries` with a band-pass filter. Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.bandpass` Returns ------- bpseries : `TimeSeries` a band-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.bandpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable. """ # design filter filt = filter_design.bandpass(flow, fhigh, self.sample_rate, fstop=fstop, gpass=gpass, gstop=gstop, analog=False, type=type, **kwargs) # apply filter return self.filter(*filt, filtfilt=filtfilt)
[ "def", "bandpass", "(", "self", ",", "flow", ",", "fhigh", ",", "gpass", "=", "2", ",", "gstop", "=", "30", ",", "fstop", "=", "None", ",", "type", "=", "'iir'", ",", "filtfilt", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# design filter", ...
Filter this `TimeSeries` with a band-pass filter. Parameters ---------- flow : `float` lower corner frequency of pass band fhigh : `float` upper corner frequency of pass band gpass : `float` the maximum loss in the passband (dB). gstop : `float` the minimum attenuation in the stopband (dB). fstop : `tuple` of `float`, optional `(low, high)` edge-frequencies of stop band type : `str` the filter type, either ``'iir'`` or ``'fir'`` **kwargs other keyword arguments are passed to :func:`gwpy.signal.filter_design.bandpass` Returns ------- bpseries : `TimeSeries` a band-passed version of the input `TimeSeries` See Also -------- gwpy.signal.filter_design.bandpass for details on the filter design TimeSeries.filter for details on how the filter is applied .. note:: When using `scipy < 0.16.0` some higher-order filters may be unstable. With `scipy >= 0.16.0` higher-order filters are decomposed into second-order-sections, and so are much more stable.
[ "Filter", "this", "TimeSeries", "with", "a", "band", "-", "pass", "filter", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L855-L906
train
211,552
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.resample
def resample(self, rate, window='hamming', ftype='fir', n=None): """Resample this Series to a new rate Parameters ---------- rate : `float` rate to which to resample this `Series` window : `str`, `numpy.ndarray`, optional window function to apply to signal in the Fourier domain, see :func:`scipy.signal.get_window` for details on acceptable formats, only used for `ftype='fir'` or irregular downsampling ftype : `str`, optional type of filter, either 'fir' or 'iir', defaults to 'fir' n : `int`, optional if `ftype='fir'` the number of taps in the filter, otherwise the order of the Chebyshev type I IIR filter Returns ------- Series a new Series with the resampling applied, and the same metadata """ if n is None and ftype == 'iir': n = 8 elif n is None: n = 60 if isinstance(rate, units.Quantity): rate = rate.value factor = (self.sample_rate.value / rate) # NOTE: use math.isclose when python >= 3.5 if numpy.isclose(factor, 1., rtol=1e-09, atol=0.): warnings.warn( "resample() rate matches current sample_rate ({}), returning " "input data unmodified; please double-check your " "parameters".format(self.sample_rate), UserWarning, ) return self # if integer down-sampling, use decimate if factor.is_integer(): if ftype == 'iir': filt = signal.cheby1(n, 0.05, 0.8/factor, output='zpk') else: filt = signal.firwin(n+1, 1./factor, window=window) return self.filter(filt, filtfilt=True)[::int(factor)] # otherwise use Fourier filtering else: nsamp = int(self.shape[0] * self.dx.value * rate) new = signal.resample(self.value, nsamp, window=window).view(self.__class__) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate return new
python
def resample(self, rate, window='hamming', ftype='fir', n=None): """Resample this Series to a new rate Parameters ---------- rate : `float` rate to which to resample this `Series` window : `str`, `numpy.ndarray`, optional window function to apply to signal in the Fourier domain, see :func:`scipy.signal.get_window` for details on acceptable formats, only used for `ftype='fir'` or irregular downsampling ftype : `str`, optional type of filter, either 'fir' or 'iir', defaults to 'fir' n : `int`, optional if `ftype='fir'` the number of taps in the filter, otherwise the order of the Chebyshev type I IIR filter Returns ------- Series a new Series with the resampling applied, and the same metadata """ if n is None and ftype == 'iir': n = 8 elif n is None: n = 60 if isinstance(rate, units.Quantity): rate = rate.value factor = (self.sample_rate.value / rate) # NOTE: use math.isclose when python >= 3.5 if numpy.isclose(factor, 1., rtol=1e-09, atol=0.): warnings.warn( "resample() rate matches current sample_rate ({}), returning " "input data unmodified; please double-check your " "parameters".format(self.sample_rate), UserWarning, ) return self # if integer down-sampling, use decimate if factor.is_integer(): if ftype == 'iir': filt = signal.cheby1(n, 0.05, 0.8/factor, output='zpk') else: filt = signal.firwin(n+1, 1./factor, window=window) return self.filter(filt, filtfilt=True)[::int(factor)] # otherwise use Fourier filtering else: nsamp = int(self.shape[0] * self.dx.value * rate) new = signal.resample(self.value, nsamp, window=window).view(self.__class__) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate return new
[ "def", "resample", "(", "self", ",", "rate", ",", "window", "=", "'hamming'", ",", "ftype", "=", "'fir'", ",", "n", "=", "None", ")", ":", "if", "n", "is", "None", "and", "ftype", "==", "'iir'", ":", "n", "=", "8", "elif", "n", "is", "None", ":...
Resample this Series to a new rate Parameters ---------- rate : `float` rate to which to resample this `Series` window : `str`, `numpy.ndarray`, optional window function to apply to signal in the Fourier domain, see :func:`scipy.signal.get_window` for details on acceptable formats, only used for `ftype='fir'` or irregular downsampling ftype : `str`, optional type of filter, either 'fir' or 'iir', defaults to 'fir' n : `int`, optional if `ftype='fir'` the number of taps in the filter, otherwise the order of the Chebyshev type I IIR filter Returns ------- Series a new Series with the resampling applied, and the same metadata
[ "Resample", "this", "Series", "to", "a", "new", "rate" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L908-L966
train
211,553
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.zpk
def zpk(self, zeros, poles, gain, analog=True, **kwargs): """Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter analog : `bool`, optional type of ZPK being applied, if `analog=True` all parameters will be converted in the Z-domain for digital filtering Returns ------- timeseries : `TimeSeries` the filtered version of the input data See Also -------- TimeSeries.filter for details on how a digital ZPK-format filter is applied Examples -------- To apply a zpk filter with file poles at 100 Hz, and five zeros at 1 Hz (giving an overall DC gain of 1e-10):: >>> data2 = data.zpk([100]*5, [1]*5, 1e-10) """ return self.filter(zeros, poles, gain, analog=analog, **kwargs)
python
def zpk(self, zeros, poles, gain, analog=True, **kwargs): """Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter analog : `bool`, optional type of ZPK being applied, if `analog=True` all parameters will be converted in the Z-domain for digital filtering Returns ------- timeseries : `TimeSeries` the filtered version of the input data See Also -------- TimeSeries.filter for details on how a digital ZPK-format filter is applied Examples -------- To apply a zpk filter with file poles at 100 Hz, and five zeros at 1 Hz (giving an overall DC gain of 1e-10):: >>> data2 = data.zpk([100]*5, [1]*5, 1e-10) """ return self.filter(zeros, poles, gain, analog=analog, **kwargs)
[ "def", "zpk", "(", "self", ",", "zeros", ",", "poles", ",", "gain", ",", "analog", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "filter", "(", "zeros", ",", "poles", ",", "gain", ",", "analog", "=", "analog", ",", "*", ...
Filter this `TimeSeries` by applying a zero-pole-gain filter Parameters ---------- zeros : `array-like` list of zero frequencies (in Hertz) poles : `array-like` list of pole frequencies (in Hertz) gain : `float` DC gain of filter analog : `bool`, optional type of ZPK being applied, if `analog=True` all parameters will be converted in the Z-domain for digital filtering Returns ------- timeseries : `TimeSeries` the filtered version of the input data See Also -------- TimeSeries.filter for details on how a digital ZPK-format filter is applied Examples -------- To apply a zpk filter with file poles at 100 Hz, and five zeros at 1 Hz (giving an overall DC gain of 1e-10):: >>> data2 = data.zpk([100]*5, [1]*5, 1e-10)
[ "Filter", "this", "TimeSeries", "by", "applying", "a", "zero", "-", "pole", "-", "gain", "filter" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L968-L1003
train
211,554
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.filter
def filter(self, *filt, **kwargs): """Filter this `TimeSeries` with an IIR or FIR filter Parameters ---------- *filt : filter arguments 1, 2, 3, or 4 arguments defining the filter to be applied, - an ``Nx1`` `~numpy.ndarray` of FIR coefficients - an ``Nx6`` `~numpy.ndarray` of SOS coefficients - ``(numerator, denominator)`` polynomials - ``(zeros, poles, gain)`` - ``(A, B, C, D)`` 'state-space' representation filtfilt : `bool`, optional filter forward and backwards to preserve phase, default: `False` analog : `bool`, optional if `True`, filter coefficients will be converted from Hz to Z-domain digital representation, default: `False` inplace : `bool`, optional if `True`, this array will be overwritten with the filtered version, default: `False` **kwargs other keyword arguments are passed to the filter method Returns ------- result : `TimeSeries` the filtered version of the input `TimeSeries` Notes ----- IIR filters are converted either into cascading second-order sections (if `scipy >= 0.16` is installed), or into the ``(numerator, denominator)`` representation before being applied to this `TimeSeries`. .. note:: When using `scipy < 0.16` some higher-order filters may be unstable. With `scipy >= 0.16` higher-order filters are decomposed into second-order-sections, and so are much more stable. FIR filters are passed directly to :func:`scipy.signal.lfilter` or :func:`scipy.signal.filtfilt` without any conversions. See also -------- scipy.signal.sosfilt for details on filtering with second-order sections (`scipy >= 0.16` only) scipy.signal.sosfiltfilt for details on forward-backward filtering with second-order sections (`scipy >= 0.18` only) scipy.signal.lfilter for details on filtering (without SOS) scipy.signal.filtfilt for details on forward-backward filtering (without SOS) Raises ------ ValueError if ``filt`` arguments cannot be interpreted properly Examples -------- We can design an arbitrarily complicated filter using :mod:`gwpy.signal.filter_design` >>> from gwpy.signal import filter_design >>> bp = filter_design.bandpass(50, 250, 4096.) >>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)] >>> zpk = filter_design.concatenate_zpks(bp, *notches) And then can download some data from LOSC to apply it using `TimeSeries.filter`: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> filtered = data.filter(zpk, filtfilt=True) We can plot the original signal, and the filtered version, cutting off either end of the filtered data to remove filter-edge artefacts >>> from gwpy.plot import Plot >>> plot = Plot(data, filtered[128:-128], separate=True) >>> plot.show() """ # parse keyword arguments filtfilt = kwargs.pop('filtfilt', False) # parse filter form, filt = filter_design.parse_filter( filt, analog=kwargs.pop('analog', False), sample_rate=self.sample_rate.to('Hz').value, ) if form == 'zpk': try: sos = signal.zpk2sos(*filt) except AttributeError: # scipy < 0.16, no SOS filtering sos = None b, a = signal.zpk2tf(*filt) else: sos = None b, a = filt # perform filter kwargs.setdefault('axis', 0) if sos is not None and filtfilt: out = signal.sosfiltfilt(sos, self, **kwargs) elif sos is not None: out = signal.sosfilt(sos, self, **kwargs) elif filtfilt: out = signal.filtfilt(b, a, self, **kwargs) else: out = signal.lfilter(b, a, self, **kwargs) # format as type(self) new = out.view(type(self)) new.__metadata_finalize__(self) new._unit = self.unit return new
python
def filter(self, *filt, **kwargs): """Filter this `TimeSeries` with an IIR or FIR filter Parameters ---------- *filt : filter arguments 1, 2, 3, or 4 arguments defining the filter to be applied, - an ``Nx1`` `~numpy.ndarray` of FIR coefficients - an ``Nx6`` `~numpy.ndarray` of SOS coefficients - ``(numerator, denominator)`` polynomials - ``(zeros, poles, gain)`` - ``(A, B, C, D)`` 'state-space' representation filtfilt : `bool`, optional filter forward and backwards to preserve phase, default: `False` analog : `bool`, optional if `True`, filter coefficients will be converted from Hz to Z-domain digital representation, default: `False` inplace : `bool`, optional if `True`, this array will be overwritten with the filtered version, default: `False` **kwargs other keyword arguments are passed to the filter method Returns ------- result : `TimeSeries` the filtered version of the input `TimeSeries` Notes ----- IIR filters are converted either into cascading second-order sections (if `scipy >= 0.16` is installed), or into the ``(numerator, denominator)`` representation before being applied to this `TimeSeries`. .. note:: When using `scipy < 0.16` some higher-order filters may be unstable. With `scipy >= 0.16` higher-order filters are decomposed into second-order-sections, and so are much more stable. FIR filters are passed directly to :func:`scipy.signal.lfilter` or :func:`scipy.signal.filtfilt` without any conversions. See also -------- scipy.signal.sosfilt for details on filtering with second-order sections (`scipy >= 0.16` only) scipy.signal.sosfiltfilt for details on forward-backward filtering with second-order sections (`scipy >= 0.18` only) scipy.signal.lfilter for details on filtering (without SOS) scipy.signal.filtfilt for details on forward-backward filtering (without SOS) Raises ------ ValueError if ``filt`` arguments cannot be interpreted properly Examples -------- We can design an arbitrarily complicated filter using :mod:`gwpy.signal.filter_design` >>> from gwpy.signal import filter_design >>> bp = filter_design.bandpass(50, 250, 4096.) >>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)] >>> zpk = filter_design.concatenate_zpks(bp, *notches) And then can download some data from LOSC to apply it using `TimeSeries.filter`: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> filtered = data.filter(zpk, filtfilt=True) We can plot the original signal, and the filtered version, cutting off either end of the filtered data to remove filter-edge artefacts >>> from gwpy.plot import Plot >>> plot = Plot(data, filtered[128:-128], separate=True) >>> plot.show() """ # parse keyword arguments filtfilt = kwargs.pop('filtfilt', False) # parse filter form, filt = filter_design.parse_filter( filt, analog=kwargs.pop('analog', False), sample_rate=self.sample_rate.to('Hz').value, ) if form == 'zpk': try: sos = signal.zpk2sos(*filt) except AttributeError: # scipy < 0.16, no SOS filtering sos = None b, a = signal.zpk2tf(*filt) else: sos = None b, a = filt # perform filter kwargs.setdefault('axis', 0) if sos is not None and filtfilt: out = signal.sosfiltfilt(sos, self, **kwargs) elif sos is not None: out = signal.sosfilt(sos, self, **kwargs) elif filtfilt: out = signal.filtfilt(b, a, self, **kwargs) else: out = signal.lfilter(b, a, self, **kwargs) # format as type(self) new = out.view(type(self)) new.__metadata_finalize__(self) new._unit = self.unit return new
[ "def", "filter", "(", "self", ",", "*", "filt", ",", "*", "*", "kwargs", ")", ":", "# parse keyword arguments", "filtfilt", "=", "kwargs", ".", "pop", "(", "'filtfilt'", ",", "False", ")", "# parse filter", "form", ",", "filt", "=", "filter_design", ".", ...
Filter this `TimeSeries` with an IIR or FIR filter Parameters ---------- *filt : filter arguments 1, 2, 3, or 4 arguments defining the filter to be applied, - an ``Nx1`` `~numpy.ndarray` of FIR coefficients - an ``Nx6`` `~numpy.ndarray` of SOS coefficients - ``(numerator, denominator)`` polynomials - ``(zeros, poles, gain)`` - ``(A, B, C, D)`` 'state-space' representation filtfilt : `bool`, optional filter forward and backwards to preserve phase, default: `False` analog : `bool`, optional if `True`, filter coefficients will be converted from Hz to Z-domain digital representation, default: `False` inplace : `bool`, optional if `True`, this array will be overwritten with the filtered version, default: `False` **kwargs other keyword arguments are passed to the filter method Returns ------- result : `TimeSeries` the filtered version of the input `TimeSeries` Notes ----- IIR filters are converted either into cascading second-order sections (if `scipy >= 0.16` is installed), or into the ``(numerator, denominator)`` representation before being applied to this `TimeSeries`. .. note:: When using `scipy < 0.16` some higher-order filters may be unstable. With `scipy >= 0.16` higher-order filters are decomposed into second-order-sections, and so are much more stable. FIR filters are passed directly to :func:`scipy.signal.lfilter` or :func:`scipy.signal.filtfilt` without any conversions. See also -------- scipy.signal.sosfilt for details on filtering with second-order sections (`scipy >= 0.16` only) scipy.signal.sosfiltfilt for details on forward-backward filtering with second-order sections (`scipy >= 0.18` only) scipy.signal.lfilter for details on filtering (without SOS) scipy.signal.filtfilt for details on forward-backward filtering (without SOS) Raises ------ ValueError if ``filt`` arguments cannot be interpreted properly Examples -------- We can design an arbitrarily complicated filter using :mod:`gwpy.signal.filter_design` >>> from gwpy.signal import filter_design >>> bp = filter_design.bandpass(50, 250, 4096.) >>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)] >>> zpk = filter_design.concatenate_zpks(bp, *notches) And then can download some data from LOSC to apply it using `TimeSeries.filter`: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> filtered = data.filter(zpk, filtfilt=True) We can plot the original signal, and the filtered version, cutting off either end of the filtered data to remove filter-edge artefacts >>> from gwpy.plot import Plot >>> plot = Plot(data, filtered[128:-128], separate=True) >>> plot.show()
[ "Filter", "this", "TimeSeries", "with", "an", "IIR", "or", "FIR", "filter" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1005-L1133
train
211,555
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.coherence
def coherence(self, other, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the frequency-coherence between this `TimeSeries` and another. Parameters ---------- other : `TimeSeries` `TimeSeries` signal to calculate coherence with fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- If `self` and `other` have difference :attr:`TimeSeries.sample_rate` values, the higher sampled `TimeSeries` will be down-sampled to match the lower. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator """ from matplotlib import mlab from ..frequencyseries import FrequencySeries # check sampling rates if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'): sampling = min(self.sample_rate.value, other.sample_rate.value) # resample higher rate series if self.sample_rate.value == sampling: other = other.resample(sampling) self_ = self else: self_ = self.resample(sampling) else: sampling = self.sample_rate.value self_ = self # check fft lengths if overlap is None: overlap = 0 else: overlap = int((overlap * self_.sample_rate).decompose().value) if fftlength is None: fftlength = int(self_.size/2. + overlap/2.) else: fftlength = int((fftlength * self_.sample_rate).decompose().value) if window is not None: kwargs['window'] = signal.get_window(window, fftlength) coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength, Fs=sampling, noverlap=overlap, **kwargs) out = coh.view(FrequencySeries) out.xindex = freqs out.epoch = self.epoch out.name = 'Coherence between %s and %s' % (self.name, other.name) out.unit = 'coherence' return out
python
def coherence(self, other, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the frequency-coherence between this `TimeSeries` and another. Parameters ---------- other : `TimeSeries` `TimeSeries` signal to calculate coherence with fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- If `self` and `other` have difference :attr:`TimeSeries.sample_rate` values, the higher sampled `TimeSeries` will be down-sampled to match the lower. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator """ from matplotlib import mlab from ..frequencyseries import FrequencySeries # check sampling rates if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'): sampling = min(self.sample_rate.value, other.sample_rate.value) # resample higher rate series if self.sample_rate.value == sampling: other = other.resample(sampling) self_ = self else: self_ = self.resample(sampling) else: sampling = self.sample_rate.value self_ = self # check fft lengths if overlap is None: overlap = 0 else: overlap = int((overlap * self_.sample_rate).decompose().value) if fftlength is None: fftlength = int(self_.size/2. + overlap/2.) else: fftlength = int((fftlength * self_.sample_rate).decompose().value) if window is not None: kwargs['window'] = signal.get_window(window, fftlength) coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength, Fs=sampling, noverlap=overlap, **kwargs) out = coh.view(FrequencySeries) out.xindex = freqs out.epoch = self.epoch out.name = 'Coherence between %s and %s' % (self.name, other.name) out.unit = 'coherence' return out
[ "def", "coherence", "(", "self", ",", "other", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "from", "matplotlib", "import", "mlab", "from", ".", ".", "frequencyseries", "im...
Calculate the frequency-coherence between this `TimeSeries` and another. Parameters ---------- other : `TimeSeries` `TimeSeries` signal to calculate coherence with fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- If `self` and `other` have difference :attr:`TimeSeries.sample_rate` values, the higher sampled `TimeSeries` will be down-sampled to match the lower. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator
[ "Calculate", "the", "frequency", "-", "coherence", "between", "this", "TimeSeries", "and", "another", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1135-L1213
train
211,556
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.auto_coherence
def auto_coherence(self, dt, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the frequency-coherence between this `TimeSeries` and a time-shifted copy of itself. The standard :meth:`TimeSeries.coherence` is calculated between the input `TimeSeries` and a :meth:`cropped <TimeSeries.crop>` copy of itself. Since the cropped version will be shorter, the input series will be shortened to match. Parameters ---------- dt : `float` duration (in seconds) of time-shift fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- The :meth:`TimeSeries.auto_coherence` will perform best when ``dt`` is approximately ``fftlength / 2``. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator """ # shifting self backwards is the same as forwards dt = abs(dt) # crop inputs self_ = self.crop(self.span[0], self.span[1] - dt) other = self.crop(self.span[0] + dt, self.span[1]) return self_.coherence(other, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
python
def auto_coherence(self, dt, fftlength=None, overlap=None, window='hann', **kwargs): """Calculate the frequency-coherence between this `TimeSeries` and a time-shifted copy of itself. The standard :meth:`TimeSeries.coherence` is calculated between the input `TimeSeries` and a :meth:`cropped <TimeSeries.crop>` copy of itself. Since the cropped version will be shorter, the input series will be shortened to match. Parameters ---------- dt : `float` duration (in seconds) of time-shift fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- The :meth:`TimeSeries.auto_coherence` will perform best when ``dt`` is approximately ``fftlength / 2``. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator """ # shifting self backwards is the same as forwards dt = abs(dt) # crop inputs self_ = self.crop(self.span[0], self.span[1] - dt) other = self.crop(self.span[0] + dt, self.span[1]) return self_.coherence(other, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
[ "def", "auto_coherence", "(", "self", ",", "dt", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "# shifting self backwards is the same as forwards", "dt", "=", "abs", "(", "dt", ...
Calculate the frequency-coherence between this `TimeSeries` and a time-shifted copy of itself. The standard :meth:`TimeSeries.coherence` is calculated between the input `TimeSeries` and a :meth:`cropped <TimeSeries.crop>` copy of itself. Since the cropped version will be shorter, the input series will be shortened to match. Parameters ---------- dt : `float` duration (in seconds) of time-shift fftlength : `float`, optional number of seconds in single FFT, defaults to a single FFT covering the full duration overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats **kwargs any other keyword arguments accepted by :func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``, and ``noverlap`` which are superceded by the above keyword arguments Returns ------- coherence : `~gwpy.frequencyseries.FrequencySeries` the coherence `FrequencySeries` of this `TimeSeries` with the other Notes ----- The :meth:`TimeSeries.auto_coherence` will perform best when ``dt`` is approximately ``fftlength / 2``. See Also -------- :func:`matplotlib.mlab.cohere` for details of the coherence calculator
[ "Calculate", "the", "frequency", "-", "coherence", "between", "this", "TimeSeries", "and", "a", "time", "-", "shifted", "copy", "of", "itself", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1215-L1271
train
211,557
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.coherence_spectrogram
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1): """Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series. """ from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
python
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1): """Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series. """ from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
[ "def", "coherence_spectrogram", "(", "self", ",", "other", ",", "stride", ",", "fftlength", "=", "None", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "nproc", "=", "1", ")", ":", "from", ".", ".", "spectrogram", ".", "coherence", "im...
Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series.
[ "Calculate", "the", "coherence", "spectrogram", "between", "this", "TimeSeries", "and", "other", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1273-L1311
train
211,558
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.rms
def rms(self, stride=1): """Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride """ stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through TimeSeries, recording RMS data = numpy.zeros(nsteps) for step in range(nsteps): # find step TimeSeries idx = int(stridesamp * step) idx_end = idx + stridesamp stepseries = self[idx:idx_end] rms_ = numpy.sqrt(numpy.mean(numpy.abs(stepseries.value)**2)) data[step] = rms_ name = '%s %.2f-second RMS' % (self.name, stride) return self.__class__(data, channel=self.channel, t0=self.t0, name=name, sample_rate=(1/float(stride)))
python
def rms(self, stride=1): """Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride """ stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through TimeSeries, recording RMS data = numpy.zeros(nsteps) for step in range(nsteps): # find step TimeSeries idx = int(stridesamp * step) idx_end = idx + stridesamp stepseries = self[idx:idx_end] rms_ = numpy.sqrt(numpy.mean(numpy.abs(stepseries.value)**2)) data[step] = rms_ name = '%s %.2f-second RMS' % (self.name, stride) return self.__class__(data, channel=self.channel, t0=self.t0, name=name, sample_rate=(1/float(stride)))
[ "def", "rms", "(", "self", ",", "stride", "=", "1", ")", ":", "stridesamp", "=", "int", "(", "stride", "*", "self", ".", "sample_rate", ".", "value", ")", "nsteps", "=", "int", "(", "self", ".", "size", "//", "stridesamp", ")", "# stride through TimeSe...
Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride
[ "Calculate", "the", "root", "-", "mean", "-", "square", "value", "of", "this", "TimeSeries", "once", "per", "stride", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1313-L1340
train
211,559
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.demodulate
def demodulate(self, f, stride=1, exp=False, deg=True): """Compute the average magnitude and phase of this `TimeSeries` once per stride at a given frequency. Parameters ---------- f : `float` frequency (Hz) at which to demodulate the signal stride : `float`, optional stride (seconds) between calculations, defaults to 1 second exp : `bool`, optional return the magnitude and phase trends as one `TimeSeries` object representing a complex exponential, default: False deg : `bool`, optional if `exp=False`, calculates the phase in degrees Returns ------- mag, phase : `TimeSeries` if `exp=False`, returns a pair of `TimeSeries` objects representing magnitude and phase trends with `dt=stride` out : `TimeSeries` if `exp=True`, returns a single `TimeSeries` with magnitude and phase trends represented as `mag * exp(1j*phase)` with `dt=stride` Examples -------- Demodulation is useful when trying to examine steady sinusoidal signals we know to be contained within data. For instance, we can download some data from LOSC to look at trends of the amplitude and phase of LIGO Livingston's calibration line at 331.3 Hz: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617) We can demodulate the `TimeSeries` at 331.3 Hz with a stride of one minute: >>> amp, phase = data.demodulate(331.3, stride=60) We can then plot these trends to visualize fluctuations in the amplitude of the calibration line: >>> from gwpy.plot import Plot >>> plot = Plot(amp) >>> ax = plot.gca() >>> ax.set_ylabel('Strain Amplitude at 331.3 Hz') >>> plot.show() """ stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through the TimeSeries and mix with a local oscillator, # taking the average over each stride out = type(self)(numpy.zeros(nsteps, dtype=complex)) out.__array_finalize__(self) out.sample_rate = 1 / float(stride) w = 2 * numpy.pi * f * self.dt.decompose().value for step in range(nsteps): istart = int(stridesamp * step) iend = istart + stridesamp idx = numpy.arange(istart, iend) mixed = 2 * numpy.exp(-1j * w * idx) * self.value[idx] out.value[step] = mixed.mean() if exp: return out mag = out.abs() phase = type(mag)(numpy.angle(out, deg=deg)) phase.__array_finalize__(out) phase.override_unit('deg' if deg else 'rad') return (mag, phase)
python
def demodulate(self, f, stride=1, exp=False, deg=True): """Compute the average magnitude and phase of this `TimeSeries` once per stride at a given frequency. Parameters ---------- f : `float` frequency (Hz) at which to demodulate the signal stride : `float`, optional stride (seconds) between calculations, defaults to 1 second exp : `bool`, optional return the magnitude and phase trends as one `TimeSeries` object representing a complex exponential, default: False deg : `bool`, optional if `exp=False`, calculates the phase in degrees Returns ------- mag, phase : `TimeSeries` if `exp=False`, returns a pair of `TimeSeries` objects representing magnitude and phase trends with `dt=stride` out : `TimeSeries` if `exp=True`, returns a single `TimeSeries` with magnitude and phase trends represented as `mag * exp(1j*phase)` with `dt=stride` Examples -------- Demodulation is useful when trying to examine steady sinusoidal signals we know to be contained within data. For instance, we can download some data from LOSC to look at trends of the amplitude and phase of LIGO Livingston's calibration line at 331.3 Hz: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617) We can demodulate the `TimeSeries` at 331.3 Hz with a stride of one minute: >>> amp, phase = data.demodulate(331.3, stride=60) We can then plot these trends to visualize fluctuations in the amplitude of the calibration line: >>> from gwpy.plot import Plot >>> plot = Plot(amp) >>> ax = plot.gca() >>> ax.set_ylabel('Strain Amplitude at 331.3 Hz') >>> plot.show() """ stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through the TimeSeries and mix with a local oscillator, # taking the average over each stride out = type(self)(numpy.zeros(nsteps, dtype=complex)) out.__array_finalize__(self) out.sample_rate = 1 / float(stride) w = 2 * numpy.pi * f * self.dt.decompose().value for step in range(nsteps): istart = int(stridesamp * step) iend = istart + stridesamp idx = numpy.arange(istart, iend) mixed = 2 * numpy.exp(-1j * w * idx) * self.value[idx] out.value[step] = mixed.mean() if exp: return out mag = out.abs() phase = type(mag)(numpy.angle(out, deg=deg)) phase.__array_finalize__(out) phase.override_unit('deg' if deg else 'rad') return (mag, phase)
[ "def", "demodulate", "(", "self", ",", "f", ",", "stride", "=", "1", ",", "exp", "=", "False", ",", "deg", "=", "True", ")", ":", "stridesamp", "=", "int", "(", "stride", "*", "self", ".", "sample_rate", ".", "value", ")", "nsteps", "=", "int", "...
Compute the average magnitude and phase of this `TimeSeries` once per stride at a given frequency. Parameters ---------- f : `float` frequency (Hz) at which to demodulate the signal stride : `float`, optional stride (seconds) between calculations, defaults to 1 second exp : `bool`, optional return the magnitude and phase trends as one `TimeSeries` object representing a complex exponential, default: False deg : `bool`, optional if `exp=False`, calculates the phase in degrees Returns ------- mag, phase : `TimeSeries` if `exp=False`, returns a pair of `TimeSeries` objects representing magnitude and phase trends with `dt=stride` out : `TimeSeries` if `exp=True`, returns a single `TimeSeries` with magnitude and phase trends represented as `mag * exp(1j*phase)` with `dt=stride` Examples -------- Demodulation is useful when trying to examine steady sinusoidal signals we know to be contained within data. For instance, we can download some data from LOSC to look at trends of the amplitude and phase of LIGO Livingston's calibration line at 331.3 Hz: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('L1', 1131350417, 1131357617) We can demodulate the `TimeSeries` at 331.3 Hz with a stride of one minute: >>> amp, phase = data.demodulate(331.3, stride=60) We can then plot these trends to visualize fluctuations in the amplitude of the calibration line: >>> from gwpy.plot import Plot >>> plot = Plot(amp) >>> ax = plot.gca() >>> ax.set_ylabel('Strain Amplitude at 331.3 Hz') >>> plot.show()
[ "Compute", "the", "average", "magnitude", "and", "phase", "of", "this", "TimeSeries", "once", "per", "stride", "at", "a", "given", "frequency", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1342-L1415
train
211,560
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.taper
def taper(self, side='leftright'): """Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy of `self` tapered at one or both ends Raises ------ ValueError if `side` is not one of `('left', 'right', 'leftright')` Examples -------- To see the effect of the Planck-taper window, we can taper a sinusoidal `TimeSeries` at both ends: >>> import numpy >>> from gwpy.timeseries import TimeSeries >>> t = numpy.linspace(0, 1, 2048) >>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t) >>> tapered = series.taper() We can plot it to see how the ends now vary smoothly from 0 to 1: >>> from gwpy.plot import Plot >>> plot = Plot(series, tapered, separate=True, sharex=True) >>> plot.show() Notes ----- The :meth:`TimeSeries.taper` automatically tapers from the second stationary point (local maximum or minimum) on the specified side of the input. However, the method will never taper more than half the full width of the `TimeSeries`, and will fail if there are no stationary points. See :func:`~gwpy.signal.window.planck` for the generic Planck taper window, and see :func:`scipy.signal.get_window` for other common window formats. """ # check window properties if side not in ('left', 'right', 'leftright'): raise ValueError("side must be one of 'left', 'right', " "or 'leftright'") out = self.copy() # identify the second stationary point away from each boundary, # else default to half the TimeSeries width nleft, nright = 0, 0 mini, = signal.argrelmin(out.value) maxi, = signal.argrelmax(out.value) if 'left' in side: nleft = max(mini[0], maxi[0]) nleft = min(nleft, self.size/2) if 'right' in side: nright = out.size - min(mini[-1], maxi[-1]) nright = min(nright, self.size/2) out *= planck(out.size, nleft=nleft, nright=nright) return out
python
def taper(self, side='leftright'): """Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy of `self` tapered at one or both ends Raises ------ ValueError if `side` is not one of `('left', 'right', 'leftright')` Examples -------- To see the effect of the Planck-taper window, we can taper a sinusoidal `TimeSeries` at both ends: >>> import numpy >>> from gwpy.timeseries import TimeSeries >>> t = numpy.linspace(0, 1, 2048) >>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t) >>> tapered = series.taper() We can plot it to see how the ends now vary smoothly from 0 to 1: >>> from gwpy.plot import Plot >>> plot = Plot(series, tapered, separate=True, sharex=True) >>> plot.show() Notes ----- The :meth:`TimeSeries.taper` automatically tapers from the second stationary point (local maximum or minimum) on the specified side of the input. However, the method will never taper more than half the full width of the `TimeSeries`, and will fail if there are no stationary points. See :func:`~gwpy.signal.window.planck` for the generic Planck taper window, and see :func:`scipy.signal.get_window` for other common window formats. """ # check window properties if side not in ('left', 'right', 'leftright'): raise ValueError("side must be one of 'left', 'right', " "or 'leftright'") out = self.copy() # identify the second stationary point away from each boundary, # else default to half the TimeSeries width nleft, nright = 0, 0 mini, = signal.argrelmin(out.value) maxi, = signal.argrelmax(out.value) if 'left' in side: nleft = max(mini[0], maxi[0]) nleft = min(nleft, self.size/2) if 'right' in side: nright = out.size - min(mini[-1], maxi[-1]) nright = min(nright, self.size/2) out *= planck(out.size, nleft=nleft, nright=nright) return out
[ "def", "taper", "(", "self", ",", "side", "=", "'leftright'", ")", ":", "# check window properties", "if", "side", "not", "in", "(", "'left'", ",", "'right'", ",", "'leftright'", ")", ":", "raise", "ValueError", "(", "\"side must be one of 'left', 'right', \"", ...
Taper the ends of this `TimeSeries` smoothly to zero. Parameters ---------- side : `str`, optional the side of the `TimeSeries` to taper, must be one of `'left'`, `'right'`, or `'leftright'` Returns ------- out : `TimeSeries` a copy of `self` tapered at one or both ends Raises ------ ValueError if `side` is not one of `('left', 'right', 'leftright')` Examples -------- To see the effect of the Planck-taper window, we can taper a sinusoidal `TimeSeries` at both ends: >>> import numpy >>> from gwpy.timeseries import TimeSeries >>> t = numpy.linspace(0, 1, 2048) >>> series = TimeSeries(numpy.cos(10.5*numpy.pi*t), times=t) >>> tapered = series.taper() We can plot it to see how the ends now vary smoothly from 0 to 1: >>> from gwpy.plot import Plot >>> plot = Plot(series, tapered, separate=True, sharex=True) >>> plot.show() Notes ----- The :meth:`TimeSeries.taper` automatically tapers from the second stationary point (local maximum or minimum) on the specified side of the input. However, the method will never taper more than half the full width of the `TimeSeries`, and will fail if there are no stationary points. See :func:`~gwpy.signal.window.planck` for the generic Planck taper window, and see :func:`scipy.signal.get_window` for other common window formats.
[ "Taper", "the", "ends", "of", "this", "TimeSeries", "smoothly", "to", "zero", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1417-L1482
train
211,561
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.whiten
def whiten(self, fftlength=None, overlap=0, method=DEFAULT_FFT_METHOD, window='hanning', detrend='constant', asd=None, fduration=2, highpass=None, **kwargs): """Whiten this `TimeSeries` using inverse spectrum truncation Parameters ---------- fftlength : `float`, optional FFT integration length (in seconds) for ASD estimation, default: choose based on sample rate overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 method : `str`, optional FFT-averaging method window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'constant'`` asd : `~gwpy.frequencyseries.FrequencySeries`, optional the amplitude spectral density using which to whiten the data, overrides other ASD arguments, default: `None` fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, must be no longer than `fftlength`, default: 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, default: `None` **kwargs other keyword arguments are passed to the `TimeSeries.asd` method to estimate the amplitude spectral density `FrequencySeries` of this `TimeSeries` Returns ------- out : `TimeSeries` a whitened version of the input data with zero mean and unit variance See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method gwpy.signal.filter_design.fir_from_transfer for FIR filter design through spectrum truncation Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms The ``window`` argument is used in ASD estimation, FIR filter design, and in preventing spectral leakage in the output. Due to filter settle-in, a segment of length ``0.5*fduration`` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input is detrended and the output normalised such that, if the input is stationary and Gaussian, then the output will have zero mean and unit variance. For more on inverse spectrum truncation, see arXiv:gr-qc/0509116. """ # compute the ASD fftlength = fftlength if fftlength else _fft_length_default(self.dt) if asd is None: asd = self.asd(fftlength, overlap=overlap, method=method, window=window, **kwargs) asd = asd.interpolate(1./self.duration.decompose().value) # design whitening filter, with highpass if requested ncorner = int(highpass / asd.df.decompose().value) if highpass else 0 ntaps = int((fduration * self.sample_rate).decompose().value) tdw = filter_design.fir_from_transfer(1/asd.value, ntaps=ntaps, window=window, ncorner=ncorner) # condition the input data and apply the whitening filter in_ = self.copy().detrend(detrend) out = in_.convolve(tdw, window=window) return out * numpy.sqrt(2 * in_.dt.decompose().value)
python
def whiten(self, fftlength=None, overlap=0, method=DEFAULT_FFT_METHOD, window='hanning', detrend='constant', asd=None, fduration=2, highpass=None, **kwargs): """Whiten this `TimeSeries` using inverse spectrum truncation Parameters ---------- fftlength : `float`, optional FFT integration length (in seconds) for ASD estimation, default: choose based on sample rate overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 method : `str`, optional FFT-averaging method window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'constant'`` asd : `~gwpy.frequencyseries.FrequencySeries`, optional the amplitude spectral density using which to whiten the data, overrides other ASD arguments, default: `None` fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, must be no longer than `fftlength`, default: 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, default: `None` **kwargs other keyword arguments are passed to the `TimeSeries.asd` method to estimate the amplitude spectral density `FrequencySeries` of this `TimeSeries` Returns ------- out : `TimeSeries` a whitened version of the input data with zero mean and unit variance See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method gwpy.signal.filter_design.fir_from_transfer for FIR filter design through spectrum truncation Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms The ``window`` argument is used in ASD estimation, FIR filter design, and in preventing spectral leakage in the output. Due to filter settle-in, a segment of length ``0.5*fduration`` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input is detrended and the output normalised such that, if the input is stationary and Gaussian, then the output will have zero mean and unit variance. For more on inverse spectrum truncation, see arXiv:gr-qc/0509116. """ # compute the ASD fftlength = fftlength if fftlength else _fft_length_default(self.dt) if asd is None: asd = self.asd(fftlength, overlap=overlap, method=method, window=window, **kwargs) asd = asd.interpolate(1./self.duration.decompose().value) # design whitening filter, with highpass if requested ncorner = int(highpass / asd.df.decompose().value) if highpass else 0 ntaps = int((fduration * self.sample_rate).decompose().value) tdw = filter_design.fir_from_transfer(1/asd.value, ntaps=ntaps, window=window, ncorner=ncorner) # condition the input data and apply the whitening filter in_ = self.copy().detrend(detrend) out = in_.convolve(tdw, window=window) return out * numpy.sqrt(2 * in_.dt.decompose().value)
[ "def", "whiten", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "0", ",", "method", "=", "DEFAULT_FFT_METHOD", ",", "window", "=", "'hanning'", ",", "detrend", "=", "'constant'", ",", "asd", "=", "None", ",", "fduration", "=", "2", ",...
Whiten this `TimeSeries` using inverse spectrum truncation Parameters ---------- fftlength : `float`, optional FFT integration length (in seconds) for ASD estimation, default: choose based on sample rate overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 method : `str`, optional FFT-averaging method window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'constant'`` asd : `~gwpy.frequencyseries.FrequencySeries`, optional the amplitude spectral density using which to whiten the data, overrides other ASD arguments, default: `None` fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, must be no longer than `fftlength`, default: 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, default: `None` **kwargs other keyword arguments are passed to the `TimeSeries.asd` method to estimate the amplitude spectral density `FrequencySeries` of this `TimeSeries` Returns ------- out : `TimeSeries` a whitened version of the input data with zero mean and unit variance See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method gwpy.signal.filter_design.fir_from_transfer for FIR filter design through spectrum truncation Notes ----- The accepted ``method`` arguments are: - ``'bartlett'`` : a mean average of non-overlapping periodograms - ``'median'`` : a median average of overlapping periodograms - ``'welch'`` : a mean average of overlapping periodograms The ``window`` argument is used in ASD estimation, FIR filter design, and in preventing spectral leakage in the output. Due to filter settle-in, a segment of length ``0.5*fduration`` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input is detrended and the output normalised such that, if the input is stationary and Gaussian, then the output will have zero mean and unit variance. For more on inverse spectrum truncation, see arXiv:gr-qc/0509116.
[ "Whiten", "this", "TimeSeries", "using", "inverse", "spectrum", "truncation" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1484-L1579
train
211,562
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.gate
def gate(self, tzero=1.0, tpad=0.5, whiten=True, threshold=50., cluster_window=0.5, **whiten_kwargs): """Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show() """ try: from scipy.signal import find_peaks except ImportError as exc: exc.args = ("Must have scipy>=1.1.0 to utilize this method.",) raise # Find points to gate based on a threshold data = self.whiten(**whiten_kwargs) if whiten else self window_samples = cluster_window * data.sample_rate.value gates = find_peaks(abs(data.value), height=threshold, distance=window_samples)[0] out = self.copy() # Iterate over list of indices to gate and apply each one nzero = int(abs(tzero) * self.sample_rate.value) npad = int(abs(tpad) * self.sample_rate.value) half = nzero + npad ntotal = 2 * half for gate in gates: # Set the boundaries for windowed data in the original time series left_idx = max(0, gate - half) right_idx = min(gate + half, len(self.value) - 1) # Choose which part of the window will replace the data # This must be done explicitly for edge cases where a window # overlaps index 0 or the end of the time series left_idx_window = half - (gate - left_idx) right_idx_window = half + (right_idx - gate) window = 1 - planck(ntotal, nleft=npad, nright=npad) window = window[left_idx_window:right_idx_window] out[left_idx:right_idx] *= window return out
python
def gate(self, tzero=1.0, tpad=0.5, whiten=True, threshold=50., cluster_window=0.5, **whiten_kwargs): """Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show() """ try: from scipy.signal import find_peaks except ImportError as exc: exc.args = ("Must have scipy>=1.1.0 to utilize this method.",) raise # Find points to gate based on a threshold data = self.whiten(**whiten_kwargs) if whiten else self window_samples = cluster_window * data.sample_rate.value gates = find_peaks(abs(data.value), height=threshold, distance=window_samples)[0] out = self.copy() # Iterate over list of indices to gate and apply each one nzero = int(abs(tzero) * self.sample_rate.value) npad = int(abs(tpad) * self.sample_rate.value) half = nzero + npad ntotal = 2 * half for gate in gates: # Set the boundaries for windowed data in the original time series left_idx = max(0, gate - half) right_idx = min(gate + half, len(self.value) - 1) # Choose which part of the window will replace the data # This must be done explicitly for edge cases where a window # overlaps index 0 or the end of the time series left_idx_window = half - (gate - left_idx) right_idx_window = half + (right_idx - gate) window = 1 - planck(ntotal, nleft=npad, nright=npad) window = window[left_idx_window:right_idx_window] out[left_idx:right_idx] *= window return out
[ "def", "gate", "(", "self", ",", "tzero", "=", "1.0", ",", "tpad", "=", "0.5", ",", "whiten", "=", "True", ",", "threshold", "=", "50.", ",", "cluster_window", "=", "0.5", ",", "*", "*", "whiten_kwargs", ")", ":", "try", ":", "from", "scipy", ".", ...
Removes high amplitude peaks from data using inverse Planck window. Points will be discovered automatically using a provided threshold and clustered within a provided time window. Parameters ---------- tzero : `int`, optional half-width time duration in which the time series is set to zero tpad : `int`, optional half-width time duration in which the Planck window is tapered whiten : `bool`, optional if True, data will be whitened before gating points are discovered, use of this option is highly recommended threshold : `float`, optional amplitude threshold, if the data exceeds this value a gating window will be placed cluster_window : `float`, optional time duration over which gating points will be clustered **whiten_kwargs other keyword arguments that will be passed to the `TimeSeries.whiten` method if it is being used when discovering gating points Returns ------- out : `~gwpy.timeseries.TimeSeries` a copy of the original `TimeSeries` that has had gating windows applied Examples -------- Read data into a `TimeSeries` >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1135148571, 1135148771) Apply gating using custom arguments >>> gated = data.gate(tzero=1.0, tpad=1.0, threshold=10.0, fftlength=4, overlap=2, method='median') Plot the original data and the gated data, whiten both for visualization purposes >>> overlay = data.whiten(4,2,method='median').plot(dpi=150, label='Ungated', color='dodgerblue', zorder=2) >>> ax = overlay.gca() >>> ax.plot(gated.whiten(4,2,method='median'), label='Gated', color='orange', zorder=3) >>> ax.set_xlim(1135148661, 1135148681) >>> ax.legend() >>> overlay.show()
[ "Removes", "high", "amplitude", "peaks", "from", "data", "using", "inverse", "Planck", "window", ".", "Points", "will", "be", "discovered", "automatically", "using", "a", "provided", "threshold", "and", "clustered", "within", "a", "provided", "time", "window", "...
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1581-L1672
train
211,563
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.convolve
def convolve(self, fir, window='hanning'): """Convolve this `TimeSeries` with an FIR filter using the overlap-save method Parameters ---------- fir : `numpy.ndarray` the time domain filter to convolve with window : `str`, optional window function to apply to boundaries, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : `TimeSeries` the result of the convolution See Also -------- scipy.signal.fftconvolve for details on the convolution scheme used here TimeSeries.filter for an alternative method designed for short filters Notes ----- The output `TimeSeries` is the same length and has the same timestamps as the input. Due to filter settle-in, a segment half the length of `fir` will be corrupted at the left and right boundaries. To prevent spectral leakage these segments will be windowed before convolving. """ pad = int(numpy.ceil(fir.size/2)) nfft = min(8*fir.size, self.size) # condition the input data in_ = self.copy() window = signal.get_window(window, fir.size) in_.value[:pad] *= window[:pad] in_.value[-pad:] *= window[-pad:] # if FFT length is long enough, perform only one convolution if nfft >= self.size/2: conv = signal.fftconvolve(in_.value, fir, mode='same') # else use the overlap-save algorithm else: nstep = nfft - 2*pad conv = numpy.zeros(self.size) # handle first chunk separately conv[:nfft-pad] = signal.fftconvolve(in_.value[:nfft], fir, mode='same')[:nfft-pad] # process chunks of length nstep k = nfft - pad while k < self.size - nfft + pad: yk = signal.fftconvolve(in_.value[k-pad:k+nstep+pad], fir, mode='same') conv[k:k+yk.size-2*pad] = yk[pad:-pad] k += nstep # handle last chunk separately conv[-nfft+pad:] = signal.fftconvolve(in_.value[-nfft:], fir, mode='same')[-nfft+pad:] out = type(self)(conv) out.__array_finalize__(self) return out
python
def convolve(self, fir, window='hanning'): """Convolve this `TimeSeries` with an FIR filter using the overlap-save method Parameters ---------- fir : `numpy.ndarray` the time domain filter to convolve with window : `str`, optional window function to apply to boundaries, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : `TimeSeries` the result of the convolution See Also -------- scipy.signal.fftconvolve for details on the convolution scheme used here TimeSeries.filter for an alternative method designed for short filters Notes ----- The output `TimeSeries` is the same length and has the same timestamps as the input. Due to filter settle-in, a segment half the length of `fir` will be corrupted at the left and right boundaries. To prevent spectral leakage these segments will be windowed before convolving. """ pad = int(numpy.ceil(fir.size/2)) nfft = min(8*fir.size, self.size) # condition the input data in_ = self.copy() window = signal.get_window(window, fir.size) in_.value[:pad] *= window[:pad] in_.value[-pad:] *= window[-pad:] # if FFT length is long enough, perform only one convolution if nfft >= self.size/2: conv = signal.fftconvolve(in_.value, fir, mode='same') # else use the overlap-save algorithm else: nstep = nfft - 2*pad conv = numpy.zeros(self.size) # handle first chunk separately conv[:nfft-pad] = signal.fftconvolve(in_.value[:nfft], fir, mode='same')[:nfft-pad] # process chunks of length nstep k = nfft - pad while k < self.size - nfft + pad: yk = signal.fftconvolve(in_.value[k-pad:k+nstep+pad], fir, mode='same') conv[k:k+yk.size-2*pad] = yk[pad:-pad] k += nstep # handle last chunk separately conv[-nfft+pad:] = signal.fftconvolve(in_.value[-nfft:], fir, mode='same')[-nfft+pad:] out = type(self)(conv) out.__array_finalize__(self) return out
[ "def", "convolve", "(", "self", ",", "fir", ",", "window", "=", "'hanning'", ")", ":", "pad", "=", "int", "(", "numpy", ".", "ceil", "(", "fir", ".", "size", "/", "2", ")", ")", "nfft", "=", "min", "(", "8", "*", "fir", ".", "size", ",", "sel...
Convolve this `TimeSeries` with an FIR filter using the overlap-save method Parameters ---------- fir : `numpy.ndarray` the time domain filter to convolve with window : `str`, optional window function to apply to boundaries, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : `TimeSeries` the result of the convolution See Also -------- scipy.signal.fftconvolve for details on the convolution scheme used here TimeSeries.filter for an alternative method designed for short filters Notes ----- The output `TimeSeries` is the same length and has the same timestamps as the input. Due to filter settle-in, a segment half the length of `fir` will be corrupted at the left and right boundaries. To prevent spectral leakage these segments will be windowed before convolving.
[ "Convolve", "this", "TimeSeries", "with", "an", "FIR", "filter", "using", "the", "overlap", "-", "save", "method" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1674-L1738
train
211,564
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.correlate
def correlate(self, mfilter, window='hanning', detrend='linear', whiten=False, wduration=2, highpass=None, **asd_kw): """Cross-correlate this `TimeSeries` with another signal Parameters ---------- mfilter : `TimeSeries` the time domain signal to correlate with window : `str`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'linear'`` whiten : `bool`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, default: `False` wduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten=True`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, only used if `whiten=True`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD, only used if `whiten=True` Returns ------- snr : `TimeSeries` the correlated signal-to-noise ratio (SNR) timeseries See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method Notes ----- The `window` argument is used in ASD estimation, whitening, and preventing spectral leakage in the output. It is not used to condition the matched-filter, which should be windowed before passing to this method. Due to filter settle-in, a segment half the length of `mfilter` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input and matched-filter will be detrended, and the output will be normalised so that the SNR measures number of standard deviations from the expected mean. """ self.is_compatible(mfilter) # condition data if whiten is True: fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None: overlap = recommended_overlap(window) * fftlength asd = self.asd(fftlength, overlap, window=window, **asd_kw) # pad the matched-filter to prevent corruption npad = int(wduration * mfilter.sample_rate.decompose().value / 2) mfilter = mfilter.pad(npad) # whiten (with errors on division by zero) with numpy.errstate(all='raise'): in_ = self.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend) mfilter = mfilter.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend)[npad:-npad] else: in_ = self.detrend(detrend) mfilter = mfilter.detrend(detrend) # compute matched-filter SNR and normalise stdev = numpy.sqrt((mfilter.value**2).sum()) snr = in_.convolve(mfilter[::-1], window=window) / stdev snr.__array_finalize__(self) return snr
python
def correlate(self, mfilter, window='hanning', detrend='linear', whiten=False, wduration=2, highpass=None, **asd_kw): """Cross-correlate this `TimeSeries` with another signal Parameters ---------- mfilter : `TimeSeries` the time domain signal to correlate with window : `str`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'linear'`` whiten : `bool`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, default: `False` wduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten=True`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, only used if `whiten=True`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD, only used if `whiten=True` Returns ------- snr : `TimeSeries` the correlated signal-to-noise ratio (SNR) timeseries See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method Notes ----- The `window` argument is used in ASD estimation, whitening, and preventing spectral leakage in the output. It is not used to condition the matched-filter, which should be windowed before passing to this method. Due to filter settle-in, a segment half the length of `mfilter` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input and matched-filter will be detrended, and the output will be normalised so that the SNR measures number of standard deviations from the expected mean. """ self.is_compatible(mfilter) # condition data if whiten is True: fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None: overlap = recommended_overlap(window) * fftlength asd = self.asd(fftlength, overlap, window=window, **asd_kw) # pad the matched-filter to prevent corruption npad = int(wduration * mfilter.sample_rate.decompose().value / 2) mfilter = mfilter.pad(npad) # whiten (with errors on division by zero) with numpy.errstate(all='raise'): in_ = self.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend) mfilter = mfilter.whiten(window=window, fduration=wduration, asd=asd, highpass=highpass, detrend=detrend)[npad:-npad] else: in_ = self.detrend(detrend) mfilter = mfilter.detrend(detrend) # compute matched-filter SNR and normalise stdev = numpy.sqrt((mfilter.value**2).sum()) snr = in_.convolve(mfilter[::-1], window=window) / stdev snr.__array_finalize__(self) return snr
[ "def", "correlate", "(", "self", ",", "mfilter", ",", "window", "=", "'hanning'", ",", "detrend", "=", "'linear'", ",", "whiten", "=", "False", ",", "wduration", "=", "2", ",", "highpass", "=", "None", ",", "*", "*", "asd_kw", ")", ":", "self", ".", ...
Cross-correlate this `TimeSeries` with another signal Parameters ---------- mfilter : `TimeSeries` the time domain signal to correlate with window : `str`, optional window function to apply to timeseries prior to FFT, default: ``'hanning'`` see :func:`scipy.signal.get_window` for details on acceptable formats detrend : `str`, optional type of detrending to do before FFT (see `~TimeSeries.detrend` for more details), default: ``'linear'`` whiten : `bool`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, default: `False` wduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten=True`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, only used if `whiten=True`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD, only used if `whiten=True` Returns ------- snr : `TimeSeries` the correlated signal-to-noise ratio (SNR) timeseries See Also -------- TimeSeries.asd for details on the ASD calculation TimeSeries.convolve for details on convolution with the overlap-save method Notes ----- The `window` argument is used in ASD estimation, whitening, and preventing spectral leakage in the output. It is not used to condition the matched-filter, which should be windowed before passing to this method. Due to filter settle-in, a segment half the length of `mfilter` will be corrupted at the beginning and end of the output. See `~TimeSeries.convolve` for more details. The input and matched-filter will be detrended, and the output will be normalised so that the SNR measures number of standard deviations from the expected mean.
[ "Cross", "-", "correlate", "this", "TimeSeries", "with", "another", "signal" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1740-L1828
train
211,565
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.detrend
def detrend(self, detrend='constant'): """Remove the trend from this `TimeSeries` This method just wraps :func:`scipy.signal.detrend` to return an object of the same type as the input. Parameters ---------- detrend : `str`, optional the type of detrending. Returns ------- detrended : `TimeSeries` the detrended input series See Also -------- scipy.signal.detrend for details on the options for the `detrend` argument, and how the operation is done """ data = signal.detrend(self.value, type=detrend).view(type(self)) data.__metadata_finalize__(self) data._unit = self.unit return data
python
def detrend(self, detrend='constant'): """Remove the trend from this `TimeSeries` This method just wraps :func:`scipy.signal.detrend` to return an object of the same type as the input. Parameters ---------- detrend : `str`, optional the type of detrending. Returns ------- detrended : `TimeSeries` the detrended input series See Also -------- scipy.signal.detrend for details on the options for the `detrend` argument, and how the operation is done """ data = signal.detrend(self.value, type=detrend).view(type(self)) data.__metadata_finalize__(self) data._unit = self.unit return data
[ "def", "detrend", "(", "self", ",", "detrend", "=", "'constant'", ")", ":", "data", "=", "signal", ".", "detrend", "(", "self", ".", "value", ",", "type", "=", "detrend", ")", ".", "view", "(", "type", "(", "self", ")", ")", "data", ".", "__metadat...
Remove the trend from this `TimeSeries` This method just wraps :func:`scipy.signal.detrend` to return an object of the same type as the input. Parameters ---------- detrend : `str`, optional the type of detrending. Returns ------- detrended : `TimeSeries` the detrended input series See Also -------- scipy.signal.detrend for details on the options for the `detrend` argument, and how the operation is done
[ "Remove", "the", "trend", "from", "this", "TimeSeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1830-L1855
train
211,566
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.notch
def notch(self, frequency, type='iir', filtfilt=True, **kwargs): """Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- notched : `TimeSeries` a notch-filtered copy of the input `TimeSeries` See Also -------- TimeSeries.filter for details on the filtering method scipy.signal.iirdesign for details on the IIR filter design method """ zpk = filter_design.notch(frequency, self.sample_rate.value, type=type, **kwargs) return self.filter(*zpk, filtfilt=filtfilt)
python
def notch(self, frequency, type='iir', filtfilt=True, **kwargs): """Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- notched : `TimeSeries` a notch-filtered copy of the input `TimeSeries` See Also -------- TimeSeries.filter for details on the filtering method scipy.signal.iirdesign for details on the IIR filter design method """ zpk = filter_design.notch(frequency, self.sample_rate.value, type=type, **kwargs) return self.filter(*zpk, filtfilt=filtfilt)
[ "def", "notch", "(", "self", ",", "frequency", ",", "type", "=", "'iir'", ",", "filtfilt", "=", "True", ",", "*", "*", "kwargs", ")", ":", "zpk", "=", "filter_design", ".", "notch", "(", "frequency", ",", "self", ".", "sample_rate", ".", "value", ","...
Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- notched : `TimeSeries` a notch-filtered copy of the input `TimeSeries` See Also -------- TimeSeries.filter for details on the filtering method scipy.signal.iirdesign for details on the IIR filter design method
[ "Notch", "out", "a", "frequency", "in", "this", "TimeSeries", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1857-L1885
train
211,567
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.q_gram
def q_gram(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, mismatch=qtransform.DEFAULT_MISMATCH, snrthresh=5.5, **kwargs): """Scan a `TimeSeries` using the multi-Q transform and return an `EventTable` of the most significant tiles Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles snrthresh : `float`, optional lower inclusive threshold on individual tile SNR to keep in the table **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `EventTable` a table of time-frequency tiles on the most significant `QPlane` See Also -------- TimeSeries.q_transform for a method to interpolate the raw Q-transform over a regularly gridded spectrogram gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented gwpy.table.EventTable.tile to render this `EventTable` as a collection of polygons Notes ----- Only tiles with signal energy greater than or equal to `snrthresh ** 2 / 2` will be stored in the output `EventTable`. The table columns are ``'time'``, ``'duration'``, ``'frequency'``, ``'bandwidth'``, and ``'energy'``. """ qscan, _ = qtransform.q_scan(self, mismatch=mismatch, qrange=qrange, frange=frange, **kwargs) qgram = qscan.table(snrthresh=snrthresh) return qgram
python
def q_gram(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, mismatch=qtransform.DEFAULT_MISMATCH, snrthresh=5.5, **kwargs): """Scan a `TimeSeries` using the multi-Q transform and return an `EventTable` of the most significant tiles Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles snrthresh : `float`, optional lower inclusive threshold on individual tile SNR to keep in the table **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `EventTable` a table of time-frequency tiles on the most significant `QPlane` See Also -------- TimeSeries.q_transform for a method to interpolate the raw Q-transform over a regularly gridded spectrogram gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented gwpy.table.EventTable.tile to render this `EventTable` as a collection of polygons Notes ----- Only tiles with signal energy greater than or equal to `snrthresh ** 2 / 2` will be stored in the output `EventTable`. The table columns are ``'time'``, ``'duration'``, ``'frequency'``, ``'bandwidth'``, and ``'energy'``. """ qscan, _ = qtransform.q_scan(self, mismatch=mismatch, qrange=qrange, frange=frange, **kwargs) qgram = qscan.table(snrthresh=snrthresh) return qgram
[ "def", "q_gram", "(", "self", ",", "qrange", "=", "qtransform", ".", "DEFAULT_QRANGE", ",", "frange", "=", "qtransform", ".", "DEFAULT_FRANGE", ",", "mismatch", "=", "qtransform", ".", "DEFAULT_MISMATCH", ",", "snrthresh", "=", "5.5", ",", "*", "*", "kwargs"...
Scan a `TimeSeries` using the multi-Q transform and return an `EventTable` of the most significant tiles Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles snrthresh : `float`, optional lower inclusive threshold on individual tile SNR to keep in the table **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `EventTable` a table of time-frequency tiles on the most significant `QPlane` See Also -------- TimeSeries.q_transform for a method to interpolate the raw Q-transform over a regularly gridded spectrogram gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented gwpy.table.EventTable.tile to render this `EventTable` as a collection of polygons Notes ----- Only tiles with signal energy greater than or equal to `snrthresh ** 2 / 2` will be stored in the output `EventTable`. The table columns are ``'time'``, ``'duration'``, ``'frequency'``, ``'bandwidth'``, and ``'energy'``.
[ "Scan", "a", "TimeSeries", "using", "the", "multi", "-", "Q", "transform", "and", "return", "an", "EventTable", "of", "the", "most", "significant", "tiles" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1887-L1940
train
211,568
gwpy/gwpy
gwpy/timeseries/timeseries.py
TimeSeries.q_transform
def q_transform(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, gps=None, search=.5, tres="<default>", fres="<default>", logf=False, norm='median', mismatch=qtransform.DEFAULT_MISMATCH, outseg=None, whiten=True, fduration=2, highpass=None, **asd_kw): """Scan a `TimeSeries` using the multi-Q transform and return an interpolated high-resolution spectrogram By default, this method returns a high-resolution spectrogram in both time and frequency, which can result in a large memory footprint. If you know that you only need a subset of the output for, say, a figure, consider using ``outseg`` and the other keyword arguments to restrict the size of the returned data. Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(log, high)` range of frequencies to scan gps : `float`, optional central time of interest for determine loudest Q-plane search : `float`, optional window around `gps` in which to find peak energies, only used if `gps` is given tres : `float`, optional desired time resolution (seconds) of output `Spectrogram`, default is `abs(outseg) / 1000.` fres : `float`, `int`, `None`, optional desired frequency resolution (Hertz) of output `Spectrogram`, or, if ``logf=True``, the number of frequency samples; give `None` to skip this step and return the original resolution, default is 0.5 Hz or 500 frequency samples logf : `bool`, optional boolean switch to enable (`True`) or disable (`False`) use of log-sampled frequencies in the output `Spectrogram`, if `True` then `fres` is interpreted as a number of frequency samples, default: `False` norm : `bool`, `str`, optional whether to normalize the returned Q-transform output, or how, default: `True` (``'median'``), other options: `False`, ``'mean'`` mismatch : `float` maximum allowed fractional mismatch between neighbouring tiles outseg : `~gwpy.segments.Segment`, optional GPS `[start, stop)` segment for output `Spectrogram`, default is the full duration of the input whiten : `bool`, `~gwpy.frequencyseries.FrequencySeries`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, or an ASD `~gwpy.freqencyseries.FrequencySeries` with which to whiten the data fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten` is not `False`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, used only if `whiten` is not `False`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD to use when whitening the data Returns ------- out : `~gwpy.spectrogram.Spectrogram` output `Spectrogram` of normalised Q energy See Also -------- TimeSeries.asd for documentation on acceptable `**asd_kw` TimeSeries.whiten for documentation on how the whitening is done gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented Notes ----- This method will return a `Spectrogram` of dtype ``float32`` if ``norm`` is given, and ``float64`` otherwise. To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`, the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled frequency axis by passing `logf=True` at runtime. The `fres` argument is then the number of points on the frequency axis. Note, this is incompatible with `~matplotlib.axes.Axes.imshow`. It is also highly recommended to use the `outseg` keyword argument when only a small window around a given GPS time is of interest. This will speed up this method a little, but can greatly speed up rendering the resulting `Spectrogram` using `pcolormesh`. If you aren't going to use `pcolormesh` in the end, don't worry. Examples -------- >>> from numpy.random import normal >>> from scipy.signal import gausspulse >>> from gwpy.timeseries import TimeSeries Generate a `TimeSeries` containing Gaussian noise sampled at 4096 Hz, centred on GPS time 0, with a sine-Gaussian pulse ('glitch') at 500 Hz: >>> noise = TimeSeries(normal(loc=1, size=4096*4), sample_rate=4096, epoch=-2) >>> glitch = TimeSeries(gausspulse(noise.times.value, fc=500) * 4, sample_rate=4096) >>> data = noise + glitch Compute and plot the Q-transform of these data: >>> q = data.q_transform() >>> plot = q.plot() >>> ax = plot.gca() >>> ax.set_xlim(-.2, .2) >>> ax.set_epoch(0) >>> plot.show() """ # noqa: E501 from ..frequencyseries import FrequencySeries # condition data if whiten is True: # generate ASD dynamically window = asd_kw.pop('window', 'hann') fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None and fftlength == self.duration.value: asd_kw['method'] = DEFAULT_FFT_METHOD overlap = 0 elif overlap is None: overlap = recommended_overlap(window) * fftlength whiten = self.asd(fftlength, overlap, window=window, **asd_kw) if isinstance(whiten, FrequencySeries): # apply whitening (with error on division by zero) with numpy.errstate(all='raise'): data = self.whiten(asd=whiten, fduration=fduration, highpass=highpass) else: data = self # determine search window if gps is None: search = None elif search is not None: search = Segment(gps-search/2, gps+search/2) & self.span qgram, _ = qtransform.q_scan( data, frange=frange, qrange=qrange, norm=norm, mismatch=mismatch, search=search) return qgram.interpolate( tres=tres, fres=fres, logf=logf, outseg=outseg)
python
def q_transform(self, qrange=qtransform.DEFAULT_QRANGE, frange=qtransform.DEFAULT_FRANGE, gps=None, search=.5, tres="<default>", fres="<default>", logf=False, norm='median', mismatch=qtransform.DEFAULT_MISMATCH, outseg=None, whiten=True, fduration=2, highpass=None, **asd_kw): """Scan a `TimeSeries` using the multi-Q transform and return an interpolated high-resolution spectrogram By default, this method returns a high-resolution spectrogram in both time and frequency, which can result in a large memory footprint. If you know that you only need a subset of the output for, say, a figure, consider using ``outseg`` and the other keyword arguments to restrict the size of the returned data. Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(log, high)` range of frequencies to scan gps : `float`, optional central time of interest for determine loudest Q-plane search : `float`, optional window around `gps` in which to find peak energies, only used if `gps` is given tres : `float`, optional desired time resolution (seconds) of output `Spectrogram`, default is `abs(outseg) / 1000.` fres : `float`, `int`, `None`, optional desired frequency resolution (Hertz) of output `Spectrogram`, or, if ``logf=True``, the number of frequency samples; give `None` to skip this step and return the original resolution, default is 0.5 Hz or 500 frequency samples logf : `bool`, optional boolean switch to enable (`True`) or disable (`False`) use of log-sampled frequencies in the output `Spectrogram`, if `True` then `fres` is interpreted as a number of frequency samples, default: `False` norm : `bool`, `str`, optional whether to normalize the returned Q-transform output, or how, default: `True` (``'median'``), other options: `False`, ``'mean'`` mismatch : `float` maximum allowed fractional mismatch between neighbouring tiles outseg : `~gwpy.segments.Segment`, optional GPS `[start, stop)` segment for output `Spectrogram`, default is the full duration of the input whiten : `bool`, `~gwpy.frequencyseries.FrequencySeries`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, or an ASD `~gwpy.freqencyseries.FrequencySeries` with which to whiten the data fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten` is not `False`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, used only if `whiten` is not `False`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD to use when whitening the data Returns ------- out : `~gwpy.spectrogram.Spectrogram` output `Spectrogram` of normalised Q energy See Also -------- TimeSeries.asd for documentation on acceptable `**asd_kw` TimeSeries.whiten for documentation on how the whitening is done gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented Notes ----- This method will return a `Spectrogram` of dtype ``float32`` if ``norm`` is given, and ``float64`` otherwise. To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`, the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled frequency axis by passing `logf=True` at runtime. The `fres` argument is then the number of points on the frequency axis. Note, this is incompatible with `~matplotlib.axes.Axes.imshow`. It is also highly recommended to use the `outseg` keyword argument when only a small window around a given GPS time is of interest. This will speed up this method a little, but can greatly speed up rendering the resulting `Spectrogram` using `pcolormesh`. If you aren't going to use `pcolormesh` in the end, don't worry. Examples -------- >>> from numpy.random import normal >>> from scipy.signal import gausspulse >>> from gwpy.timeseries import TimeSeries Generate a `TimeSeries` containing Gaussian noise sampled at 4096 Hz, centred on GPS time 0, with a sine-Gaussian pulse ('glitch') at 500 Hz: >>> noise = TimeSeries(normal(loc=1, size=4096*4), sample_rate=4096, epoch=-2) >>> glitch = TimeSeries(gausspulse(noise.times.value, fc=500) * 4, sample_rate=4096) >>> data = noise + glitch Compute and plot the Q-transform of these data: >>> q = data.q_transform() >>> plot = q.plot() >>> ax = plot.gca() >>> ax.set_xlim(-.2, .2) >>> ax.set_epoch(0) >>> plot.show() """ # noqa: E501 from ..frequencyseries import FrequencySeries # condition data if whiten is True: # generate ASD dynamically window = asd_kw.pop('window', 'hann') fftlength = asd_kw.pop('fftlength', _fft_length_default(self.dt)) overlap = asd_kw.pop('overlap', None) if overlap is None and fftlength == self.duration.value: asd_kw['method'] = DEFAULT_FFT_METHOD overlap = 0 elif overlap is None: overlap = recommended_overlap(window) * fftlength whiten = self.asd(fftlength, overlap, window=window, **asd_kw) if isinstance(whiten, FrequencySeries): # apply whitening (with error on division by zero) with numpy.errstate(all='raise'): data = self.whiten(asd=whiten, fduration=fduration, highpass=highpass) else: data = self # determine search window if gps is None: search = None elif search is not None: search = Segment(gps-search/2, gps+search/2) & self.span qgram, _ = qtransform.q_scan( data, frange=frange, qrange=qrange, norm=norm, mismatch=mismatch, search=search) return qgram.interpolate( tres=tres, fres=fres, logf=logf, outseg=outseg)
[ "def", "q_transform", "(", "self", ",", "qrange", "=", "qtransform", ".", "DEFAULT_QRANGE", ",", "frange", "=", "qtransform", ".", "DEFAULT_FRANGE", ",", "gps", "=", "None", ",", "search", "=", ".5", ",", "tres", "=", "\"<default>\"", ",", "fres", "=", "...
Scan a `TimeSeries` using the multi-Q transform and return an interpolated high-resolution spectrogram By default, this method returns a high-resolution spectrogram in both time and frequency, which can result in a large memory footprint. If you know that you only need a subset of the output for, say, a figure, consider using ``outseg`` and the other keyword arguments to restrict the size of the returned data. Parameters ---------- qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(log, high)` range of frequencies to scan gps : `float`, optional central time of interest for determine loudest Q-plane search : `float`, optional window around `gps` in which to find peak energies, only used if `gps` is given tres : `float`, optional desired time resolution (seconds) of output `Spectrogram`, default is `abs(outseg) / 1000.` fres : `float`, `int`, `None`, optional desired frequency resolution (Hertz) of output `Spectrogram`, or, if ``logf=True``, the number of frequency samples; give `None` to skip this step and return the original resolution, default is 0.5 Hz or 500 frequency samples logf : `bool`, optional boolean switch to enable (`True`) or disable (`False`) use of log-sampled frequencies in the output `Spectrogram`, if `True` then `fres` is interpreted as a number of frequency samples, default: `False` norm : `bool`, `str`, optional whether to normalize the returned Q-transform output, or how, default: `True` (``'median'``), other options: `False`, ``'mean'`` mismatch : `float` maximum allowed fractional mismatch between neighbouring tiles outseg : `~gwpy.segments.Segment`, optional GPS `[start, stop)` segment for output `Spectrogram`, default is the full duration of the input whiten : `bool`, `~gwpy.frequencyseries.FrequencySeries`, optional boolean switch to enable (`True`) or disable (`False`) data whitening, or an ASD `~gwpy.freqencyseries.FrequencySeries` with which to whiten the data fduration : `float`, optional duration (in seconds) of the time-domain FIR whitening filter, only used if `whiten` is not `False`, defaults to 2 seconds highpass : `float`, optional highpass corner frequency (in Hz) of the FIR whitening filter, used only if `whiten` is not `False`, default: `None` **asd_kw keyword arguments to pass to `TimeSeries.asd` to generate an ASD to use when whitening the data Returns ------- out : `~gwpy.spectrogram.Spectrogram` output `Spectrogram` of normalised Q energy See Also -------- TimeSeries.asd for documentation on acceptable `**asd_kw` TimeSeries.whiten for documentation on how the whitening is done gwpy.signal.qtransform for code and documentation on how the Q-transform is implemented Notes ----- This method will return a `Spectrogram` of dtype ``float32`` if ``norm`` is given, and ``float64`` otherwise. To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`, the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled frequency axis by passing `logf=True` at runtime. The `fres` argument is then the number of points on the frequency axis. Note, this is incompatible with `~matplotlib.axes.Axes.imshow`. It is also highly recommended to use the `outseg` keyword argument when only a small window around a given GPS time is of interest. This will speed up this method a little, but can greatly speed up rendering the resulting `Spectrogram` using `pcolormesh`. If you aren't going to use `pcolormesh` in the end, don't worry. Examples -------- >>> from numpy.random import normal >>> from scipy.signal import gausspulse >>> from gwpy.timeseries import TimeSeries Generate a `TimeSeries` containing Gaussian noise sampled at 4096 Hz, centred on GPS time 0, with a sine-Gaussian pulse ('glitch') at 500 Hz: >>> noise = TimeSeries(normal(loc=1, size=4096*4), sample_rate=4096, epoch=-2) >>> glitch = TimeSeries(gausspulse(noise.times.value, fc=500) * 4, sample_rate=4096) >>> data = noise + glitch Compute and plot the Q-transform of these data: >>> q = data.q_transform() >>> plot = q.plot() >>> ax = plot.gca() >>> ax.set_xlim(-.2, .2) >>> ax.set_epoch(0) >>> plot.show()
[ "Scan", "a", "TimeSeries", "using", "the", "multi", "-", "Q", "transform", "and", "return", "an", "interpolated", "high", "-", "resolution", "spectrogram" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1942-L2110
train
211,569
gwpy/gwpy
gwpy/types/series.py
Series._update_index
def _update_index(self, axis, key, value): """Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index` """ # delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) # if value is changing, delete current index try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) # set new value setattr(self, _key, value) return value
python
def _update_index(self, axis, key, value): """Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index` """ # delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) # if value is changing, delete current index try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) # set new value setattr(self, _key, value) return value
[ "def", "_update_index", "(", "self", ",", "axis", ",", "key", ",", "value", ")", ":", "# delete current value if given None", "if", "value", "is", "None", ":", "return", "delattr", "(", "self", ",", "key", ")", "_key", "=", "\"_{}\"", ".", "format", "(", ...
Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index`
[ "Update", "the", "current", "axis", "index", "based", "on", "a", "given", "key", "or", "value" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L163-L207
train
211,570
gwpy/gwpy
gwpy/types/series.py
Series._set_index
def _set_index(self, key, index): """Set a new index array for this series """ axis = key[0] origin = "{}0".format(axis) delta = "d{}".format(axis) if index is None: return delattr(self, key) if not isinstance(index, Index): try: unit = index.unit except AttributeError: unit = getattr(self, "_default_{}unit".format(axis)) index = Index(index, unit=unit, copy=False) setattr(self, origin, index[0]) if index.regular: setattr(self, delta, index[1] - index[0]) else: delattr(self, delta) setattr(self, "_{}".format(key), index)
python
def _set_index(self, key, index): """Set a new index array for this series """ axis = key[0] origin = "{}0".format(axis) delta = "d{}".format(axis) if index is None: return delattr(self, key) if not isinstance(index, Index): try: unit = index.unit except AttributeError: unit = getattr(self, "_default_{}unit".format(axis)) index = Index(index, unit=unit, copy=False) setattr(self, origin, index[0]) if index.regular: setattr(self, delta, index[1] - index[0]) else: delattr(self, delta) setattr(self, "_{}".format(key), index)
[ "def", "_set_index", "(", "self", ",", "key", ",", "index", ")", ":", "axis", "=", "key", "[", "0", "]", "origin", "=", "\"{}0\"", ".", "format", "(", "axis", ")", "delta", "=", "\"d{}\"", ".", "format", "(", "axis", ")", "if", "index", "is", "No...
Set a new index array for this series
[ "Set", "a", "new", "index", "array", "for", "this", "series" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L209-L228
train
211,571
gwpy/gwpy
gwpy/types/series.py
Series.x0
def x0(self): """X-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar """ try: return self._x0 except AttributeError: self._x0 = Quantity(0, self.xunit) return self._x0
python
def x0(self): """X-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar """ try: return self._x0 except AttributeError: self._x0 = Quantity(0, self.xunit) return self._x0
[ "def", "x0", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_x0", "except", "AttributeError", ":", "self", ".", "_x0", "=", "Quantity", "(", "0", ",", "self", ".", "xunit", ")", "return", "self", ".", "_x0" ]
X-axis coordinate of the first data point :type: `~astropy.units.Quantity` scalar
[ "X", "-", "axis", "coordinate", "of", "the", "first", "data", "point" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L250-L259
train
211,572
gwpy/gwpy
gwpy/types/series.py
Series.dx
def dx(self): """X-axis sample separation :type: `~astropy.units.Quantity` scalar """ try: return self._dx except AttributeError: try: self._xindex except AttributeError: self._dx = Quantity(1, self.xunit) else: if not self.xindex.regular: raise AttributeError("This series has an irregular x-axis " "index, so 'dx' is not well defined") self._dx = self.xindex[1] - self.xindex[0] return self._dx
python
def dx(self): """X-axis sample separation :type: `~astropy.units.Quantity` scalar """ try: return self._dx except AttributeError: try: self._xindex except AttributeError: self._dx = Quantity(1, self.xunit) else: if not self.xindex.regular: raise AttributeError("This series has an irregular x-axis " "index, so 'dx' is not well defined") self._dx = self.xindex[1] - self.xindex[0] return self._dx
[ "def", "dx", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_dx", "except", "AttributeError", ":", "try", ":", "self", ".", "_xindex", "except", "AttributeError", ":", "self", ".", "_dx", "=", "Quantity", "(", "1", ",", "self", ".", "xuni...
X-axis sample separation :type: `~astropy.units.Quantity` scalar
[ "X", "-", "axis", "sample", "separation" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L274-L291
train
211,573
gwpy/gwpy
gwpy/types/series.py
Series.xindex
def xindex(self): """Positions of the data on the x-axis :type: `~astropy.units.Quantity` array """ try: return self._xindex except AttributeError: self._xindex = Index.define(self.x0, self.dx, self.shape[0]) return self._xindex
python
def xindex(self): """Positions of the data on the x-axis :type: `~astropy.units.Quantity` array """ try: return self._xindex except AttributeError: self._xindex = Index.define(self.x0, self.dx, self.shape[0]) return self._xindex
[ "def", "xindex", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_xindex", "except", "AttributeError", ":", "self", ".", "_xindex", "=", "Index", ".", "define", "(", "self", ".", "x0", ",", "self", ".", "dx", ",", "self", ".", "shape", "...
Positions of the data on the x-axis :type: `~astropy.units.Quantity` array
[ "Positions", "of", "the", "data", "on", "the", "x", "-", "axis" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L306-L315
train
211,574
gwpy/gwpy
gwpy/types/series.py
Series.xunit
def xunit(self): """Unit of x-axis index :type: `~astropy.units.Unit` """ try: return self._dx.unit except AttributeError: try: return self._x0.unit except AttributeError: return self._default_xunit
python
def xunit(self): """Unit of x-axis index :type: `~astropy.units.Unit` """ try: return self._dx.unit except AttributeError: try: return self._x0.unit except AttributeError: return self._default_xunit
[ "def", "xunit", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_dx", ".", "unit", "except", "AttributeError", ":", "try", ":", "return", "self", ".", "_x0", ".", "unit", "except", "AttributeError", ":", "return", "self", ".", "_default_xunit"...
Unit of x-axis index :type: `~astropy.units.Unit`
[ "Unit", "of", "x", "-", "axis", "index" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L330-L341
train
211,575
gwpy/gwpy
gwpy/types/series.py
Series.plot
def plot(self, method='plot', **kwargs): """Plot the data for this series Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes matplotlib.axes.Axes.plot for documentation of keyword arguments used in rendering the data """ from ..plot import Plot from ..plot.text import default_unit_label # correct for log scales and zeros if kwargs.get('xscale') == 'log' and self.x0.value == 0: kwargs.setdefault('xlim', (self.dx.value, self.xspan[1])) # make plot plot = Plot(self, method=method, **kwargs) # set default y-axis label (xlabel is set by Plot()) default_unit_label(plot.gca().yaxis, self.unit) return plot
python
def plot(self, method='plot', **kwargs): """Plot the data for this series Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes matplotlib.axes.Axes.plot for documentation of keyword arguments used in rendering the data """ from ..plot import Plot from ..plot.text import default_unit_label # correct for log scales and zeros if kwargs.get('xscale') == 'log' and self.x0.value == 0: kwargs.setdefault('xlim', (self.dx.value, self.xspan[1])) # make plot plot = Plot(self, method=method, **kwargs) # set default y-axis label (xlabel is set by Plot()) default_unit_label(plot.gca().yaxis, self.unit) return plot
[ "def", "plot", "(", "self", ",", "method", "=", "'plot'", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "plot", "import", "Plot", "from", ".", ".", "plot", ".", "text", "import", "default_unit_label", "# correct for log scales and zeros", "if", "kwa...
Plot the data for this series Returns ------- figure : `~matplotlib.figure.Figure` the newly created figure, with populated Axes. See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes matplotlib.axes.Axes.plot for documentation of keyword arguments used in rendering the data
[ "Plot", "the", "data", "for", "this", "series" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L422-L454
train
211,576
gwpy/gwpy
gwpy/types/series.py
Series.step
def step(self, **kwargs): """Create a step plot of this series """ kwargs.setdefault('linestyle', kwargs.pop('where', 'steps-post')) data = self.append(self.value[-1:], inplace=False) return data.plot(**kwargs)
python
def step(self, **kwargs): """Create a step plot of this series """ kwargs.setdefault('linestyle', kwargs.pop('where', 'steps-post')) data = self.append(self.value[-1:], inplace=False) return data.plot(**kwargs)
[ "def", "step", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'linestyle'", ",", "kwargs", ".", "pop", "(", "'where'", ",", "'steps-post'", ")", ")", "data", "=", "self", ".", "append", "(", "self", ".", "value", ...
Create a step plot of this series
[ "Create", "a", "step", "plot", "of", "this", "series" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L456-L461
train
211,577
gwpy/gwpy
gwpy/types/series.py
Series.shift
def shift(self, delta): """Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m """ self.x0 = self.x0 + Quantity(delta, self.xunit)
python
def shift(self, delta): """Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m """ self.x0 = self.x0 + Quantity(delta, self.xunit)
[ "def", "shift", "(", "self", ",", "delta", ")", ":", "self", ".", "x0", "=", "self", ".", "x0", "+", "Quantity", "(", "delta", ",", "self", ".", "xunit", ")" ]
Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m
[ "Shift", "this", "Series", "forward", "on", "the", "X", "-", "axis", "by", "delta" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L465-L488
train
211,578
gwpy/gwpy
gwpy/types/series.py
Series.value_at
def value_at(self, x): """Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `xindex` value """ x = Quantity(x, self.xindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as e: e.args = ("Value %r not found in array index" % x,) raise return self[idx]
python
def value_at(self, x): """Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `xindex` value """ x = Quantity(x, self.xindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as e: e.args = ("Value %r not found in array index" % x,) raise return self[idx]
[ "def", "value_at", "(", "self", ",", "x", ")", ":", "x", "=", "Quantity", "(", "x", ",", "self", ".", "xindex", ".", "unit", ")", ".", "value", "try", ":", "idx", "=", "(", "self", ".", "xindex", ".", "value", "==", "x", ")", ".", "nonzero", ...
Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `xindex` value
[ "Return", "the", "value", "of", "this", "Series", "at", "the", "given", "xindex", "value" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L490-L509
train
211,579
gwpy/gwpy
gwpy/types/series.py
Series.is_contiguous
def is_contiguous(self, other, tol=1/2.**18): """Check whether other is contiguous with self. Parameters ---------- other : `Series`, `numpy.ndarray` another series of the same type to test for contiguity tol : `float`, optional the numerical tolerance of the test Returns ------- 1 if `other` is contiguous with this series, i.e. would attach seamlessly onto the end -1 if `other` is anti-contiguous with this seires, i.e. would attach seamlessly onto the start 0 if `other` is completely dis-contiguous with thie series Notes ----- if a raw `numpy.ndarray` is passed as other, with no metadata, then the contiguity check will always pass """ self.is_compatible(other) if isinstance(other, type(self)): if abs(float(self.xspan[1] - other.xspan[0])) < tol: return 1 elif abs(float(other.xspan[1] - self.xspan[0])) < tol: return -1 return 0 elif type(other) in [list, tuple, numpy.ndarray]: return 1
python
def is_contiguous(self, other, tol=1/2.**18): """Check whether other is contiguous with self. Parameters ---------- other : `Series`, `numpy.ndarray` another series of the same type to test for contiguity tol : `float`, optional the numerical tolerance of the test Returns ------- 1 if `other` is contiguous with this series, i.e. would attach seamlessly onto the end -1 if `other` is anti-contiguous with this seires, i.e. would attach seamlessly onto the start 0 if `other` is completely dis-contiguous with thie series Notes ----- if a raw `numpy.ndarray` is passed as other, with no metadata, then the contiguity check will always pass """ self.is_compatible(other) if isinstance(other, type(self)): if abs(float(self.xspan[1] - other.xspan[0])) < tol: return 1 elif abs(float(other.xspan[1] - self.xspan[0])) < tol: return -1 return 0 elif type(other) in [list, tuple, numpy.ndarray]: return 1
[ "def", "is_contiguous", "(", "self", ",", "other", ",", "tol", "=", "1", "/", "2.", "**", "18", ")", ":", "self", ".", "is_compatible", "(", "other", ")", "if", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ":", "if", "abs", "("...
Check whether other is contiguous with self. Parameters ---------- other : `Series`, `numpy.ndarray` another series of the same type to test for contiguity tol : `float`, optional the numerical tolerance of the test Returns ------- 1 if `other` is contiguous with this series, i.e. would attach seamlessly onto the end -1 if `other` is anti-contiguous with this seires, i.e. would attach seamlessly onto the start 0 if `other` is completely dis-contiguous with thie series Notes ----- if a raw `numpy.ndarray` is passed as other, with no metadata, then the contiguity check will always pass
[ "Check", "whether", "other", "is", "contiguous", "with", "self", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L595-L630
train
211,580
gwpy/gwpy
gwpy/types/series.py
Series.is_compatible
def is_compatible(self, other): """Check whether this series and other have compatible metadata This method tests that the `sample size <Series.dx>`, and the `~Series.unit` match. """ if isinstance(other, type(self)): # check step size, if possible try: if not self.dx == other.dx: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, self.dx, other.dx)) except AttributeError: raise ValueError("Series with irregular xindexes cannot " "be compatible") # check units if not self.unit == other.unit and not ( self.unit in [dimensionless_unscaled, None] and other.unit in [dimensionless_unscaled, None]): raise ValueError("%s units do not match: %s vs %s." % (type(self).__name__, str(self.unit), str(other.unit))) else: # assume an array-like object, and just check that the shape # and dtype match arr = numpy.asarray(other) if arr.ndim != self.ndim: raise ValueError("Dimensionality does not match") if arr.dtype != self.dtype: warn("Array data types do not match: %s vs %s" % (self.dtype, other.dtype)) return True
python
def is_compatible(self, other): """Check whether this series and other have compatible metadata This method tests that the `sample size <Series.dx>`, and the `~Series.unit` match. """ if isinstance(other, type(self)): # check step size, if possible try: if not self.dx == other.dx: raise ValueError("%s sample sizes do not match: " "%s vs %s." % (type(self).__name__, self.dx, other.dx)) except AttributeError: raise ValueError("Series with irregular xindexes cannot " "be compatible") # check units if not self.unit == other.unit and not ( self.unit in [dimensionless_unscaled, None] and other.unit in [dimensionless_unscaled, None]): raise ValueError("%s units do not match: %s vs %s." % (type(self).__name__, str(self.unit), str(other.unit))) else: # assume an array-like object, and just check that the shape # and dtype match arr = numpy.asarray(other) if arr.ndim != self.ndim: raise ValueError("Dimensionality does not match") if arr.dtype != self.dtype: warn("Array data types do not match: %s vs %s" % (self.dtype, other.dtype)) return True
[ "def", "is_compatible", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "type", "(", "self", ")", ")", ":", "# check step size, if possible", "try", ":", "if", "not", "self", ".", "dx", "==", "other", ".", "dx", ":", "raise"...
Check whether this series and other have compatible metadata This method tests that the `sample size <Series.dx>`, and the `~Series.unit` match.
[ "Check", "whether", "this", "series", "and", "other", "have", "compatible", "metadata" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L632-L664
train
211,581
gwpy/gwpy
gwpy/types/series.py
Series.prepend
def prepend(self, other, inplace=True, pad=None, gap=None, resize=True): """Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets """ out = other.append(self, inplace=False, gap=gap, pad=pad, resize=resize) if inplace: self.resize(out.shape, refcheck=False) self[:] = out[:] self.x0 = out.x0.copy() del out return self return out
python
def prepend(self, other, inplace=True, pad=None, gap=None, resize=True): """Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets """ out = other.append(self, inplace=False, gap=gap, pad=pad, resize=resize) if inplace: self.resize(out.shape, refcheck=False) self[:] = out[:] self.x0 = out.x0.copy() del out return self return out
[ "def", "prepend", "(", "self", ",", "other", ",", "inplace", "=", "True", ",", "pad", "=", "None", ",", "gap", "=", "None", ",", "resize", "=", "True", ")", ":", "out", "=", "other", ".", "append", "(", "self", ",", "inplace", "=", "False", ",", ...
Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets
[ "Connect", "another", "series", "onto", "the", "start", "of", "the", "current", "one", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L811-L862
train
211,582
gwpy/gwpy
gwpy/types/series.py
Series.update
def update(self, other, inplace=True): """Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`. """ return self.append(other, inplace=inplace, resize=False)
python
def update(self, other, inplace=True): """Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`. """ return self.append(other, inplace=inplace, resize=False)
[ "def", "update", "(", "self", ",", "other", ",", "inplace", "=", "True", ")", ":", "return", "self", ".", "append", "(", "other", ",", "inplace", "=", "inplace", ",", "resize", "=", "False", ")" ]
Update this series by appending new data from an other and dropping the same amount of data off the start. This is a convenience method that just calls `~Series.append` with `resize=False`.
[ "Update", "this", "series", "by", "appending", "new", "data", "from", "an", "other", "and", "dropping", "the", "same", "amount", "of", "data", "off", "the", "start", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L864-L871
train
211,583
gwpy/gwpy
gwpy/types/series.py
Series.crop
def crop(self, start=None, end=None, copy=False): """Crop this series to the given x-axis extent. Parameters ---------- start : `float`, optional lower limit of x-axis to crop to, defaults to current `~Series.x0` end : `float`, optional upper limit of x-axis to crop to, defaults to current series end copy : `bool`, optional, default: `False` copy the input data to fresh memory, otherwise return a view Returns ------- series : `Series` A new series with a sub-set of the input data Notes ----- If either ``start`` or ``end`` are outside of the original `Series` span, warnings will be printed and the limits will be restricted to the :attr:`~Series.xspan` """ x0, x1 = self.xspan xtype = type(x0) if isinstance(start, Quantity): start = start.to(self.xunit).value if isinstance(end, Quantity): end = end.to(self.xunit).value # pin early starts to time-series start if start == x0: start = None elif start is not None and xtype(start) < x0: warn('%s.crop given start smaller than current start, ' 'crop will begin when the Series actually starts.' % type(self).__name__) start = None # pin late ends to time-series end if end == x1: end = None if end is not None and xtype(end) > x1: warn('%s.crop given end larger than current end, ' 'crop will end when the Series actually ends.' % type(self).__name__) end = None # find start index if start is None: idx0 = None else: idx0 = int((xtype(start) - x0) // self.dx.value) # find end index if end is None: idx1 = None else: idx1 = int((xtype(end) - x0) // self.dx.value) if idx1 >= self.size: idx1 = None # crop if copy: return self[idx0:idx1].copy() return self[idx0:idx1]
python
def crop(self, start=None, end=None, copy=False): """Crop this series to the given x-axis extent. Parameters ---------- start : `float`, optional lower limit of x-axis to crop to, defaults to current `~Series.x0` end : `float`, optional upper limit of x-axis to crop to, defaults to current series end copy : `bool`, optional, default: `False` copy the input data to fresh memory, otherwise return a view Returns ------- series : `Series` A new series with a sub-set of the input data Notes ----- If either ``start`` or ``end`` are outside of the original `Series` span, warnings will be printed and the limits will be restricted to the :attr:`~Series.xspan` """ x0, x1 = self.xspan xtype = type(x0) if isinstance(start, Quantity): start = start.to(self.xunit).value if isinstance(end, Quantity): end = end.to(self.xunit).value # pin early starts to time-series start if start == x0: start = None elif start is not None and xtype(start) < x0: warn('%s.crop given start smaller than current start, ' 'crop will begin when the Series actually starts.' % type(self).__name__) start = None # pin late ends to time-series end if end == x1: end = None if end is not None and xtype(end) > x1: warn('%s.crop given end larger than current end, ' 'crop will end when the Series actually ends.' % type(self).__name__) end = None # find start index if start is None: idx0 = None else: idx0 = int((xtype(start) - x0) // self.dx.value) # find end index if end is None: idx1 = None else: idx1 = int((xtype(end) - x0) // self.dx.value) if idx1 >= self.size: idx1 = None # crop if copy: return self[idx0:idx1].copy() return self[idx0:idx1]
[ "def", "crop", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "copy", "=", "False", ")", ":", "x0", ",", "x1", "=", "self", ".", "xspan", "xtype", "=", "type", "(", "x0", ")", "if", "isinstance", "(", "start", ",", "Quant...
Crop this series to the given x-axis extent. Parameters ---------- start : `float`, optional lower limit of x-axis to crop to, defaults to current `~Series.x0` end : `float`, optional upper limit of x-axis to crop to, defaults to current series end copy : `bool`, optional, default: `False` copy the input data to fresh memory, otherwise return a view Returns ------- series : `Series` A new series with a sub-set of the input data Notes ----- If either ``start`` or ``end`` are outside of the original `Series` span, warnings will be printed and the limits will be restricted to the :attr:`~Series.xspan`
[ "Crop", "this", "series", "to", "the", "given", "x", "-", "axis", "extent", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L873-L941
train
211,584
gwpy/gwpy
gwpy/types/series.py
Series.pad
def pad(self, pad_width, **kwargs): """Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs see :meth:`numpy.pad` for kwarg documentation Returns ------- series : `Series` the padded version of the input See also -------- numpy.pad for details on the underlying functionality """ # format arguments kwargs.setdefault('mode', 'constant') if isinstance(pad_width, int): pad_width = (pad_width,) # form pad and view to this type new = numpy.pad(self, pad_width, **kwargs).view(type(self)) # numpy.pad has stripped all metadata, so copy it over new.__metadata_finalize__(self) new._unit = self.unit # finally move the starting index based on the amount of left-padding new.x0 -= self.dx * pad_width[0] return new
python
def pad(self, pad_width, **kwargs): """Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs see :meth:`numpy.pad` for kwarg documentation Returns ------- series : `Series` the padded version of the input See also -------- numpy.pad for details on the underlying functionality """ # format arguments kwargs.setdefault('mode', 'constant') if isinstance(pad_width, int): pad_width = (pad_width,) # form pad and view to this type new = numpy.pad(self, pad_width, **kwargs).view(type(self)) # numpy.pad has stripped all metadata, so copy it over new.__metadata_finalize__(self) new._unit = self.unit # finally move the starting index based on the amount of left-padding new.x0 -= self.dx * pad_width[0] return new
[ "def", "pad", "(", "self", ",", "pad_width", ",", "*", "*", "kwargs", ")", ":", "# format arguments", "kwargs", ".", "setdefault", "(", "'mode'", ",", "'constant'", ")", "if", "isinstance", "(", "pad_width", ",", "int", ")", ":", "pad_width", "=", "(", ...
Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs see :meth:`numpy.pad` for kwarg documentation Returns ------- series : `Series` the padded version of the input See also -------- numpy.pad for details on the underlying functionality
[ "Pad", "this", "series", "to", "a", "new", "size" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L943-L976
train
211,585
gwpy/gwpy
gwpy/types/series.py
Series.inject
def inject(self, other): """Add two compatible `Series` along their shared x-axis values. Parameters ---------- other : `Series` a `Series` whose xindex intersects with `self.xindex` Returns ------- out : `Series` the sum of `self` and `other` along their shared x-axis values Raises ------ ValueError if `self` and `other` have incompatible units or xindex intervals Notes ----- If `other.xindex` and `self.xindex` do not intersect, this method will return a copy of `self`. If the series have uniformly offset indices, this method will raise a warning. If `self.xindex` is an array of timestamps, and if `other.xspan` is not a subset of `self.xspan`, then `other` will be cropped before being adding to `self`. Users who wish to taper or window their `Series` should do so before passing it to this method. See :meth:`TimeSeries.taper` and :func:`~gwpy.signal.window.planck` for more information. """ # check Series compatibility self.is_compatible(other) if (self.xunit == second) and (other.xspan[0] < self.xspan[0]): other = other.crop(start=self.xspan[0]) if (self.xunit == second) and (other.xspan[1] > self.xspan[1]): other = other.crop(end=self.xspan[1]) ox0 = other.x0.to(self.x0.unit) idx = ((ox0 - self.x0) / self.dx).value if not idx.is_integer(): warn('Series have overlapping xspan but their x-axis values are ' 'uniformly offset. Returning a copy of the original Series.') return self.copy() # add the Series along their shared samples slice_ = slice(int(idx), int(idx) + other.size) out = self.copy() out.value[slice_] += other.value return out
python
def inject(self, other): """Add two compatible `Series` along their shared x-axis values. Parameters ---------- other : `Series` a `Series` whose xindex intersects with `self.xindex` Returns ------- out : `Series` the sum of `self` and `other` along their shared x-axis values Raises ------ ValueError if `self` and `other` have incompatible units or xindex intervals Notes ----- If `other.xindex` and `self.xindex` do not intersect, this method will return a copy of `self`. If the series have uniformly offset indices, this method will raise a warning. If `self.xindex` is an array of timestamps, and if `other.xspan` is not a subset of `self.xspan`, then `other` will be cropped before being adding to `self`. Users who wish to taper or window their `Series` should do so before passing it to this method. See :meth:`TimeSeries.taper` and :func:`~gwpy.signal.window.planck` for more information. """ # check Series compatibility self.is_compatible(other) if (self.xunit == second) and (other.xspan[0] < self.xspan[0]): other = other.crop(start=self.xspan[0]) if (self.xunit == second) and (other.xspan[1] > self.xspan[1]): other = other.crop(end=self.xspan[1]) ox0 = other.x0.to(self.x0.unit) idx = ((ox0 - self.x0) / self.dx).value if not idx.is_integer(): warn('Series have overlapping xspan but their x-axis values are ' 'uniformly offset. Returning a copy of the original Series.') return self.copy() # add the Series along their shared samples slice_ = slice(int(idx), int(idx) + other.size) out = self.copy() out.value[slice_] += other.value return out
[ "def", "inject", "(", "self", ",", "other", ")", ":", "# check Series compatibility", "self", ".", "is_compatible", "(", "other", ")", "if", "(", "self", ".", "xunit", "==", "second", ")", "and", "(", "other", ".", "xspan", "[", "0", "]", "<", "self", ...
Add two compatible `Series` along their shared x-axis values. Parameters ---------- other : `Series` a `Series` whose xindex intersects with `self.xindex` Returns ------- out : `Series` the sum of `self` and `other` along their shared x-axis values Raises ------ ValueError if `self` and `other` have incompatible units or xindex intervals Notes ----- If `other.xindex` and `self.xindex` do not intersect, this method will return a copy of `self`. If the series have uniformly offset indices, this method will raise a warning. If `self.xindex` is an array of timestamps, and if `other.xspan` is not a subset of `self.xspan`, then `other` will be cropped before being adding to `self`. Users who wish to taper or window their `Series` should do so before passing it to this method. See :meth:`TimeSeries.taper` and :func:`~gwpy.signal.window.planck` for more information.
[ "Add", "two", "compatible", "Series", "along", "their", "shared", "x", "-", "axis", "values", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/series.py#L978-L1026
train
211,586
gwpy/gwpy
gwpy/segments/flag.py
_select_query_method
def _select_query_method(cls, url): """Select the correct query method based on the URL Works for `DataQualityFlag` and `DataQualityDict` """ if urlparse(url).netloc.startswith('geosegdb.'): # only DB2 server return cls.query_segdb return cls.query_dqsegdb
python
def _select_query_method(cls, url): """Select the correct query method based on the URL Works for `DataQualityFlag` and `DataQualityDict` """ if urlparse(url).netloc.startswith('geosegdb.'): # only DB2 server return cls.query_segdb return cls.query_dqsegdb
[ "def", "_select_query_method", "(", "cls", ",", "url", ")", ":", "if", "urlparse", "(", "url", ")", ".", "netloc", ".", "startswith", "(", "'geosegdb.'", ")", ":", "# only DB2 server", "return", "cls", ".", "query_segdb", "return", "cls", ".", "query_dqsegdb...
Select the correct query method based on the URL Works for `DataQualityFlag` and `DataQualityDict`
[ "Select", "the", "correct", "query", "method", "based", "on", "the", "URL" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L76-L83
train
211,587
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.query
def query(cls, flag, *args, **kwargs): """Query for segments of a given flag This method intelligently selects the `~DataQualityFlag.query_segdb` or the `~DataQualityFlag.query_dqsegdb` methods based on the ``url`` kwarg given. Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` See Also -------- DataQualityFlag.query_segdb DataQualityFlag.query_dqsegdb for details on the actual query engine, and documentation of other keyword arguments appropriate for each query Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ query_ = _select_query_method( cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER)) return query_(flag, *args, **kwargs)
python
def query(cls, flag, *args, **kwargs): """Query for segments of a given flag This method intelligently selects the `~DataQualityFlag.query_segdb` or the `~DataQualityFlag.query_dqsegdb` methods based on the ``url`` kwarg given. Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` See Also -------- DataQualityFlag.query_segdb DataQualityFlag.query_dqsegdb for details on the actual query engine, and documentation of other keyword arguments appropriate for each query Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ query_ = _select_query_method( cls, kwargs.get('url', DEFAULT_SEGMENT_SERVER)) return query_(flag, *args, **kwargs)
[ "def", "query", "(", "cls", ",", "flag", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "query_", "=", "_select_query_method", "(", "cls", ",", "kwargs", ".", "get", "(", "'url'", ",", "DEFAULT_SEGMENT_SERVER", ")", ")", "return", "query_", "(", ...
Query for segments of a given flag This method intelligently selects the `~DataQualityFlag.query_segdb` or the `~DataQualityFlag.query_dqsegdb` methods based on the ``url`` kwarg given. Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` See Also -------- DataQualityFlag.query_segdb DataQualityFlag.query_dqsegdb for details on the actual query engine, and documentation of other keyword arguments appropriate for each query Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
[ "Query", "for", "segments", "of", "a", "given", "flag" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L368-L405
train
211,588
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.query_segdb
def query_segdb(cls, flag, *args, **kwargs): """Query the initial LIGO segment database for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse arguments qsegs = _parse_query_segments(args, cls.query_segdb) # process query try: flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs) except TypeError as exc: if 'DataQualityDict' in str(exc): raise TypeError(str(exc).replace('DataQualityDict', cls.__name__)) else: raise if len(flags) > 1: raise RuntimeError("Multiple flags returned for single query, " "something went wrong:\n %s" % '\n '.join(flags.keys())) elif len(flags) == 0: raise RuntimeError("No flags returned for single query, " "something went wrong.") return flags[flag]
python
def query_segdb(cls, flag, *args, **kwargs): """Query the initial LIGO segment database for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse arguments qsegs = _parse_query_segments(args, cls.query_segdb) # process query try: flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs) except TypeError as exc: if 'DataQualityDict' in str(exc): raise TypeError(str(exc).replace('DataQualityDict', cls.__name__)) else: raise if len(flags) > 1: raise RuntimeError("Multiple flags returned for single query, " "something went wrong:\n %s" % '\n '.join(flags.keys())) elif len(flags) == 0: raise RuntimeError("No flags returned for single query, " "something went wrong.") return flags[flag]
[ "def", "query_segdb", "(", "cls", ",", "flag", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"query_segdb is deprecated and will be removed in a \"", "\"future release\"", ",", "DeprecationWarning", ")", "# parse arguments", "q...
Query the initial LIGO segment database for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
[ "Query", "the", "initial", "LIGO", "segment", "database", "for", "the", "given", "flag" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L408-L454
train
211,589
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.query_dqsegdb
def query_dqsegdb(cls, flag, *args, **kwargs): """Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ # parse arguments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # get server url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) # parse flag out = cls(name=flag) if out.ifo is None or out.tag is None: raise ValueError("Cannot parse ifo or tag (name) for flag %r" % flag) # process query for start, end in qsegs: # handle infinities if float(end) == +inf: end = to_gps('now').seconds # query try: data = query_segments(flag, int(start), int(end), host=url) except HTTPError as exc: if exc.code == 404: # if not found, annotate flag name exc.msg += ' [{0}]'.format(flag) raise # read from json buffer new = cls.read( BytesIO(json.dumps(data).encode('utf-8')), format='json', ) # restrict to query segments segl = SegmentList([Segment(start, end)]) new.known &= segl new.active &= segl out += new # replace metadata out.description = new.description out.isgood = new.isgood return out
python
def query_dqsegdb(cls, flag, *args, **kwargs): """Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ # parse arguments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # get server url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) # parse flag out = cls(name=flag) if out.ifo is None or out.tag is None: raise ValueError("Cannot parse ifo or tag (name) for flag %r" % flag) # process query for start, end in qsegs: # handle infinities if float(end) == +inf: end = to_gps('now').seconds # query try: data = query_segments(flag, int(start), int(end), host=url) except HTTPError as exc: if exc.code == 404: # if not found, annotate flag name exc.msg += ' [{0}]'.format(flag) raise # read from json buffer new = cls.read( BytesIO(json.dumps(data).encode('utf-8')), format='json', ) # restrict to query segments segl = SegmentList([Segment(start, end)]) new.known &= segl new.active &= segl out += new # replace metadata out.description = new.description out.isgood = new.isgood return out
[ "def", "query_dqsegdb", "(", "cls", ",", "flag", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# parse arguments", "qsegs", "=", "_parse_query_segments", "(", "args", ",", "cls", ".", "query_dqsegdb", ")", "# get server", "url", "=", "kwargs", ".",...
Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
[ "Query", "the", "advanced", "LIGO", "DQSegDB", "for", "the", "given", "flag" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L457-L522
train
211,590
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.fetch_open_data
def fetch_open_data(cls, flag, start, end, **kwargs): """Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)> """ start = to_gps(start).gpsSeconds end = to_gps(end).gpsSeconds known = [(start, end)] active = timeline.get_segments(flag, start, end, **kwargs) return cls(flag.replace('_', ':', 1), known=known, active=active, label=flag)
python
def fetch_open_data(cls, flag, start, end, **kwargs): """Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)> """ start = to_gps(start).gpsSeconds end = to_gps(end).gpsSeconds known = [(start, end)] active = timeline.get_segments(flag, start, end, **kwargs) return cls(flag.replace('_', ':', 1), known=known, active=active, label=flag)
[ "def", "fetch_open_data", "(", "cls", ",", "flag", ",", "start", ",", "end", ",", "*", "*", "kwargs", ")", ":", "start", "=", "to_gps", "(", "start", ")", ".", "gpsSeconds", "end", "=", "to_gps", "(", "end", ")", ".", "gpsSeconds", "known", "=", "[...
Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)>
[ "Fetch", "Open", "Data", "timeline", "segments", "into", "a", "flag", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L525-L573
train
211,591
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.read
def read(cls, source, *args, **kwargs): """Read segments from file into a `DataQualityFlag`. Parameters ---------- filename : `str` path of file to read name : `str`, optional name of flag to read from file, otherwise read all segments. format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coltype : `type`, optional, default: `float` datatype to force for segment times, only valid for ``format='segwizard'``. strict : `bool`, optional, default: `True` require segment start and stop times match printed duration, only valid for ``format='segwizard'``. coalesce : `bool`, optional if `True` coalesce the all segment lists before returning, otherwise return exactly as contained in file(s). nproc : `int`, optional, default: 1 number of CPUs to use for parallel reading of multiple files verbose : `bool`, optional, default: `False` print a progress bar showing read status Returns ------- dqflag : `DataQualityFlag` formatted `DataQualityFlag` containing the active and known segments read from file. Notes -----""" if 'flag' in kwargs: # pragma: no cover warnings.warn('\'flag\' keyword was renamed \'name\', this ' 'warning will result in an error in the future') kwargs.setdefault('name', kwargs.pop('flags')) coalesce = kwargs.pop('coalesce', False) def combiner(flags): """Combine `DataQualityFlag` from each file into a single object """ out = flags[0] for flag in flags[1:]: out.known += flag.known out.active += flag.active if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, *args, **kwargs)
python
def read(cls, source, *args, **kwargs): """Read segments from file into a `DataQualityFlag`. Parameters ---------- filename : `str` path of file to read name : `str`, optional name of flag to read from file, otherwise read all segments. format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coltype : `type`, optional, default: `float` datatype to force for segment times, only valid for ``format='segwizard'``. strict : `bool`, optional, default: `True` require segment start and stop times match printed duration, only valid for ``format='segwizard'``. coalesce : `bool`, optional if `True` coalesce the all segment lists before returning, otherwise return exactly as contained in file(s). nproc : `int`, optional, default: 1 number of CPUs to use for parallel reading of multiple files verbose : `bool`, optional, default: `False` print a progress bar showing read status Returns ------- dqflag : `DataQualityFlag` formatted `DataQualityFlag` containing the active and known segments read from file. Notes -----""" if 'flag' in kwargs: # pragma: no cover warnings.warn('\'flag\' keyword was renamed \'name\', this ' 'warning will result in an error in the future') kwargs.setdefault('name', kwargs.pop('flags')) coalesce = kwargs.pop('coalesce', False) def combiner(flags): """Combine `DataQualityFlag` from each file into a single object """ out = flags[0] for flag in flags[1:]: out.known += flag.known out.active += flag.active if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, *args, **kwargs)
[ "def", "read", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'flag'", "in", "kwargs", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'\\'flag\\' keyword was renamed \\'name\\', this '", "'warning will result in a...
Read segments from file into a `DataQualityFlag`. Parameters ---------- filename : `str` path of file to read name : `str`, optional name of flag to read from file, otherwise read all segments. format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coltype : `type`, optional, default: `float` datatype to force for segment times, only valid for ``format='segwizard'``. strict : `bool`, optional, default: `True` require segment start and stop times match printed duration, only valid for ``format='segwizard'``. coalesce : `bool`, optional if `True` coalesce the all segment lists before returning, otherwise return exactly as contained in file(s). nproc : `int`, optional, default: 1 number of CPUs to use for parallel reading of multiple files verbose : `bool`, optional, default: `False` print a progress bar showing read status Returns ------- dqflag : `DataQualityFlag` formatted `DataQualityFlag` containing the active and known segments read from file. Notes -----
[ "Read", "segments", "from", "file", "into", "a", "DataQualityFlag", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L576-L635
train
211,592
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.from_veto_def
def from_veto_def(cls, veto): """Define a `DataQualityFlag` from a `VetoDef` Parameters ---------- veto : :class:`~ligo.lw.lsctables.VetoDef` veto definition to convert from """ name = '%s:%s' % (veto.ifo, veto.name) try: name += ':%d' % int(veto.version) except TypeError: pass if veto.end_time == 0: veto.end_time = +inf known = Segment(veto.start_time, veto.end_time) pad = (veto.start_pad, veto.end_pad) return cls(name=name, known=[known], category=veto.category, description=veto.comment, padding=pad)
python
def from_veto_def(cls, veto): """Define a `DataQualityFlag` from a `VetoDef` Parameters ---------- veto : :class:`~ligo.lw.lsctables.VetoDef` veto definition to convert from """ name = '%s:%s' % (veto.ifo, veto.name) try: name += ':%d' % int(veto.version) except TypeError: pass if veto.end_time == 0: veto.end_time = +inf known = Segment(veto.start_time, veto.end_time) pad = (veto.start_pad, veto.end_pad) return cls(name=name, known=[known], category=veto.category, description=veto.comment, padding=pad)
[ "def", "from_veto_def", "(", "cls", ",", "veto", ")", ":", "name", "=", "'%s:%s'", "%", "(", "veto", ".", "ifo", ",", "veto", ".", "name", ")", "try", ":", "name", "+=", "':%d'", "%", "int", "(", "veto", ".", "version", ")", "except", "TypeError", ...
Define a `DataQualityFlag` from a `VetoDef` Parameters ---------- veto : :class:`~ligo.lw.lsctables.VetoDef` veto definition to convert from
[ "Define", "a", "DataQualityFlag", "from", "a", "VetoDef" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L638-L656
train
211,593
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.populate
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None, pad=True, **kwargs): """Query the segment database for this flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. This `DataQualityFlag` will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of segments during which to query, if not given, existing known segments for this flag will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with this flag, default: `True`. **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityFlag` a reference to this flag """ tmp = DataQualityDict() tmp[self.name] = self tmp.populate(source=source, segments=segments, pad=pad, **kwargs) return tmp[self.name]
python
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None, pad=True, **kwargs): """Query the segment database for this flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. This `DataQualityFlag` will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of segments during which to query, if not given, existing known segments for this flag will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with this flag, default: `True`. **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityFlag` a reference to this flag """ tmp = DataQualityDict() tmp[self.name] = self tmp.populate(source=source, segments=segments, pad=pad, **kwargs) return tmp[self.name]
[ "def", "populate", "(", "self", ",", "source", "=", "DEFAULT_SEGMENT_SERVER", ",", "segments", "=", "None", ",", "pad", "=", "True", ",", "*", "*", "kwargs", ")", ":", "tmp", "=", "DataQualityDict", "(", ")", "tmp", "[", "self", ".", "name", "]", "="...
Query the segment database for this flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. This `DataQualityFlag` will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of segments during which to query, if not given, existing known segments for this flag will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with this flag, default: `True`. **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityFlag` a reference to this flag
[ "Query", "the", "segment", "database", "for", "this", "flag", "s", "active", "segments", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L667-L710
train
211,594
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.contract
def contract(self, x): """Contract each of the `active` `Segments` by ``x`` seconds. This method adds ``x`` to each segment's lower bound, and subtracts ``x`` from the upper bound. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to contract each `Segment`. """ self.active = self.active.contract(x) return self.active
python
def contract(self, x): """Contract each of the `active` `Segments` by ``x`` seconds. This method adds ``x`` to each segment's lower bound, and subtracts ``x`` from the upper bound. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to contract each `Segment`. """ self.active = self.active.contract(x) return self.active
[ "def", "contract", "(", "self", ",", "x", ")", ":", "self", ".", "active", "=", "self", ".", "active", ".", "contract", "(", "x", ")", "return", "self", ".", "active" ]
Contract each of the `active` `Segments` by ``x`` seconds. This method adds ``x`` to each segment's lower bound, and subtracts ``x`` from the upper bound. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to contract each `Segment`.
[ "Contract", "each", "of", "the", "active", "Segments", "by", "x", "seconds", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L712-L727
train
211,595
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.protract
def protract(self, x): """Protract each of the `active` `Segments` by ``x`` seconds. This method subtracts ``x`` from each segment's lower bound, and adds ``x`` to the upper bound, while maintaining that each `Segment` stays within the `known` bounds. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to protact each `Segment`. """ self.active = self.active.protract(x) return self.active
python
def protract(self, x): """Protract each of the `active` `Segments` by ``x`` seconds. This method subtracts ``x`` from each segment's lower bound, and adds ``x`` to the upper bound, while maintaining that each `Segment` stays within the `known` bounds. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to protact each `Segment`. """ self.active = self.active.protract(x) return self.active
[ "def", "protract", "(", "self", ",", "x", ")", ":", "self", ".", "active", "=", "self", ".", "active", ".", "protract", "(", "x", ")", "return", "self", ".", "active" ]
Protract each of the `active` `Segments` by ``x`` seconds. This method subtracts ``x`` from each segment's lower bound, and adds ``x`` to the upper bound, while maintaining that each `Segment` stays within the `known` bounds. The :attr:`~DataQualityFlag.active` `SegmentList` is modified in place. Parameters ---------- x : `float` number of seconds by which to protact each `Segment`.
[ "Protract", "each", "of", "the", "active", "Segments", "by", "x", "seconds", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L729-L745
train
211,596
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.pad
def pad(self, *args, **kwargs): """Apply a padding to each segment in this `DataQualityFlag` This method either takes no arguments, in which case the value of the :attr:`~DataQualityFlag.padding` attribute will be used, or two values representing the padding for the start and end of each segment. For both the `start` and `end` paddings, a positive value means pad forward in time, so that a positive `start` pad or negative `end` padding will contract a segment at one or both ends, and vice-versa. This method will apply the same padding to both the `~DataQualityFlag.known` and `~DataQualityFlag.active` lists, but will not :meth:`~DataQualityFlag.coalesce` the result. Parameters ---------- start : `float` padding to apply to the start of the each segment end : `float` padding to apply to the end of each segment inplace : `bool`, optional, default: `False` modify this object in-place, default is `False`, i.e. return a copy of the original object with padded segments Returns ------- paddedflag : `DataQualityFlag` a view of the modified flag """ if not args: start, end = self.padding else: start, end = args if kwargs.pop('inplace', False): new = self else: new = self.copy() if kwargs: raise TypeError("unexpected keyword argument %r" % list(kwargs.keys())[0]) new.known = [(s[0]+start, s[1]+end) for s in self.known] new.active = [(s[0]+start, s[1]+end) for s in self.active] return new
python
def pad(self, *args, **kwargs): """Apply a padding to each segment in this `DataQualityFlag` This method either takes no arguments, in which case the value of the :attr:`~DataQualityFlag.padding` attribute will be used, or two values representing the padding for the start and end of each segment. For both the `start` and `end` paddings, a positive value means pad forward in time, so that a positive `start` pad or negative `end` padding will contract a segment at one or both ends, and vice-versa. This method will apply the same padding to both the `~DataQualityFlag.known` and `~DataQualityFlag.active` lists, but will not :meth:`~DataQualityFlag.coalesce` the result. Parameters ---------- start : `float` padding to apply to the start of the each segment end : `float` padding to apply to the end of each segment inplace : `bool`, optional, default: `False` modify this object in-place, default is `False`, i.e. return a copy of the original object with padded segments Returns ------- paddedflag : `DataQualityFlag` a view of the modified flag """ if not args: start, end = self.padding else: start, end = args if kwargs.pop('inplace', False): new = self else: new = self.copy() if kwargs: raise TypeError("unexpected keyword argument %r" % list(kwargs.keys())[0]) new.known = [(s[0]+start, s[1]+end) for s in self.known] new.active = [(s[0]+start, s[1]+end) for s in self.active] return new
[ "def", "pad", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", ":", "start", ",", "end", "=", "self", ".", "padding", "else", ":", "start", ",", "end", "=", "args", "if", "kwargs", ".", "pop", "(", "'inplac...
Apply a padding to each segment in this `DataQualityFlag` This method either takes no arguments, in which case the value of the :attr:`~DataQualityFlag.padding` attribute will be used, or two values representing the padding for the start and end of each segment. For both the `start` and `end` paddings, a positive value means pad forward in time, so that a positive `start` pad or negative `end` padding will contract a segment at one or both ends, and vice-versa. This method will apply the same padding to both the `~DataQualityFlag.known` and `~DataQualityFlag.active` lists, but will not :meth:`~DataQualityFlag.coalesce` the result. Parameters ---------- start : `float` padding to apply to the start of the each segment end : `float` padding to apply to the end of each segment inplace : `bool`, optional, default: `False` modify this object in-place, default is `False`, i.e. return a copy of the original object with padded segments Returns ------- paddedflag : `DataQualityFlag` a view of the modified flag
[ "Apply", "a", "padding", "to", "each", "segment", "in", "this", "DataQualityFlag" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L747-L793
train
211,597
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.round
def round(self, contract=False): """Round this flag to integer segments. Parameters ---------- contract : `bool`, optional if `False` (default) expand each segment to the containing integer boundaries, otherwise contract each segment to the contained boundaries Returns ------- roundedflag : `DataQualityFlag` A copy of the original flag with the `active` and `known` segments padded out to integer boundaries. """ def _round(seg): if contract: # round inwards a = type(seg[0])(ceil(seg[0])) b = type(seg[1])(floor(seg[1])) else: # round outwards a = type(seg[0])(floor(seg[0])) b = type(seg[1])(ceil(seg[1])) if a >= b: # if segment is too short, return 'null' segment return type(seg)(0, 0) # will get coalesced away return type(seg)(a, b) new = self.copy() new.active = type(new.active)(map(_round, new.active)) new.known = type(new.known)(map(_round, new.known)) return new.coalesce()
python
def round(self, contract=False): """Round this flag to integer segments. Parameters ---------- contract : `bool`, optional if `False` (default) expand each segment to the containing integer boundaries, otherwise contract each segment to the contained boundaries Returns ------- roundedflag : `DataQualityFlag` A copy of the original flag with the `active` and `known` segments padded out to integer boundaries. """ def _round(seg): if contract: # round inwards a = type(seg[0])(ceil(seg[0])) b = type(seg[1])(floor(seg[1])) else: # round outwards a = type(seg[0])(floor(seg[0])) b = type(seg[1])(ceil(seg[1])) if a >= b: # if segment is too short, return 'null' segment return type(seg)(0, 0) # will get coalesced away return type(seg)(a, b) new = self.copy() new.active = type(new.active)(map(_round, new.active)) new.known = type(new.known)(map(_round, new.known)) return new.coalesce()
[ "def", "round", "(", "self", ",", "contract", "=", "False", ")", ":", "def", "_round", "(", "seg", ")", ":", "if", "contract", ":", "# round inwards", "a", "=", "type", "(", "seg", "[", "0", "]", ")", "(", "ceil", "(", "seg", "[", "0", "]", ")"...
Round this flag to integer segments. Parameters ---------- contract : `bool`, optional if `False` (default) expand each segment to the containing integer boundaries, otherwise contract each segment to the contained boundaries Returns ------- roundedflag : `DataQualityFlag` A copy of the original flag with the `active` and `known` segments padded out to integer boundaries.
[ "Round", "this", "flag", "to", "integer", "segments", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L795-L825
train
211,598
gwpy/gwpy
gwpy/segments/flag.py
DataQualityFlag.coalesce
def coalesce(self): """Coalesce the segments for this flag. This method does two things: - `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and `~DataQualityFlag.active` segment lists - forces the `active` segments to be a proper subset of the `known` segments .. note:: this operations is performed in-place. Returns ------- self a view of this flag, not a copy. """ self.known = self.known.coalesce() self.active = self.active.coalesce() self.active = (self.known & self.active).coalesce() return self
python
def coalesce(self): """Coalesce the segments for this flag. This method does two things: - `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and `~DataQualityFlag.active` segment lists - forces the `active` segments to be a proper subset of the `known` segments .. note:: this operations is performed in-place. Returns ------- self a view of this flag, not a copy. """ self.known = self.known.coalesce() self.active = self.active.coalesce() self.active = (self.known & self.active).coalesce() return self
[ "def", "coalesce", "(", "self", ")", ":", "self", ".", "known", "=", "self", ".", "known", ".", "coalesce", "(", ")", "self", ".", "active", "=", "self", ".", "active", ".", "coalesce", "(", ")", "self", ".", "active", "=", "(", "self", ".", "kno...
Coalesce the segments for this flag. This method does two things: - `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and `~DataQualityFlag.active` segment lists - forces the `active` segments to be a proper subset of the `known` segments .. note:: this operations is performed in-place. Returns ------- self a view of this flag, not a copy.
[ "Coalesce", "the", "segments", "for", "this", "flag", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L827-L849
train
211,599