repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
gwpy/gwpy
gwpy/table/io/ligolw.py
read_table
def read_table(source, tablename=None, **kwargs): """Read a `Table` from one or more LIGO_LW XML documents source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document` objects tablename : `str`, optional the `Name` of the relevant `Table` to read, if not given a table will be returned if only one exists in the document(s) **kwargs keyword arguments for the read, or conversion functions See Also -------- gwpy.io.ligolw.read_table for details of keyword arguments for the read operation gwpy.table.io.ligolw.to_astropy_table for details of keyword arguments for the conversion operation """ from ligo.lw import table as ligolw_table from ligo.lw.lsctables import TableByName # -- keyword handling ----------------------- # separate keywords for reading and converting from LIGO_LW to Astropy read_kw = kwargs # rename for readability convert_kw = { 'rename': None, 'use_numpy_dtypes': False, } for key in filter(kwargs.__contains__, convert_kw): convert_kw[key] = kwargs.pop(key) if convert_kw['rename'] is None: convert_kw['rename'] = {} # allow user to specify LIGO_LW columns to read to provide the # desired output columns try: columns = list(kwargs.pop('columns')) except KeyError: columns = None try: read_kw['columns'] = list(kwargs.pop('ligolw_columns')) except KeyError: read_kw['columns'] = columns convert_kw['columns'] = columns or read_kw['columns'] if tablename: tableclass = TableByName[ligolw_table.Table.TableName(tablename)] # work out if fancy property columns are required # means 'peak_time' and 'peak_time_ns' will get read if 'peak' # is requested if convert_kw['columns'] is not None: readcols = set(read_kw['columns']) propcols = _get_property_columns(tableclass, convert_kw['columns']) for col in propcols: try: readcols.remove(col) except KeyError: continue readcols.update(propcols[col]) read_kw['columns'] = list(readcols) # -- read ----------------------------------- return Table(read_ligolw_table(source, tablename=tablename, **read_kw), **convert_kw)
python
def read_table(source, tablename=None, **kwargs): """Read a `Table` from one or more LIGO_LW XML documents source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document` objects tablename : `str`, optional the `Name` of the relevant `Table` to read, if not given a table will be returned if only one exists in the document(s) **kwargs keyword arguments for the read, or conversion functions See Also -------- gwpy.io.ligolw.read_table for details of keyword arguments for the read operation gwpy.table.io.ligolw.to_astropy_table for details of keyword arguments for the conversion operation """ from ligo.lw import table as ligolw_table from ligo.lw.lsctables import TableByName # -- keyword handling ----------------------- # separate keywords for reading and converting from LIGO_LW to Astropy read_kw = kwargs # rename for readability convert_kw = { 'rename': None, 'use_numpy_dtypes': False, } for key in filter(kwargs.__contains__, convert_kw): convert_kw[key] = kwargs.pop(key) if convert_kw['rename'] is None: convert_kw['rename'] = {} # allow user to specify LIGO_LW columns to read to provide the # desired output columns try: columns = list(kwargs.pop('columns')) except KeyError: columns = None try: read_kw['columns'] = list(kwargs.pop('ligolw_columns')) except KeyError: read_kw['columns'] = columns convert_kw['columns'] = columns or read_kw['columns'] if tablename: tableclass = TableByName[ligolw_table.Table.TableName(tablename)] # work out if fancy property columns are required # means 'peak_time' and 'peak_time_ns' will get read if 'peak' # is requested if convert_kw['columns'] is not None: readcols = set(read_kw['columns']) propcols = _get_property_columns(tableclass, convert_kw['columns']) for col in propcols: try: readcols.remove(col) except KeyError: continue readcols.update(propcols[col]) read_kw['columns'] = list(readcols) # -- read ----------------------------------- return Table(read_ligolw_table(source, tablename=tablename, **read_kw), **convert_kw)
[ "def", "read_table", "(", "source", ",", "tablename", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "ligo", ".", "lw", "import", "table", "as", "ligolw_table", "from", "ligo", ".", "lw", ".", "lsctables", "import", "TableByName", "# -- keyword ha...
Read a `Table` from one or more LIGO_LW XML documents source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one or more open files, file paths, or LIGO_LW `Document` objects tablename : `str`, optional the `Name` of the relevant `Table` to read, if not given a table will be returned if only one exists in the document(s) **kwargs keyword arguments for the read, or conversion functions See Also -------- gwpy.io.ligolw.read_table for details of keyword arguments for the read operation gwpy.table.io.ligolw.to_astropy_table for details of keyword arguments for the conversion operation
[ "Read", "a", "Table", "from", "one", "or", "more", "LIGO_LW", "XML", "documents" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/ligolw.py#L298-L365
train
211,400
gwpy/gwpy
gwpy/table/io/ligolw.py
write_table
def write_table(table, target, tablename=None, ilwdchar_compat=None, **kwargs): """Write a `~astropy.table.Table` to file in LIGO_LW XML format This method will attempt to write in the new `ligo.lw` format (if ``ilwdchar_compat`` is ``None`` or ``False``), but will fall back to the older `glue.ligolw` (in that order) if that fails (if ``ilwdchar_compat`` is ``None`` or ``True``). """ if tablename is None: # try and get tablename from metadata tablename = table.meta.get('tablename', None) if tablename is None: # panic raise ValueError("please pass ``tablename=`` to specify the target " "LIGO_LW Table Name") try: llwtable = table_to_ligolw( table, tablename, ilwdchar_compat=ilwdchar_compat or False, ) except LigolwElementError as exc: if ilwdchar_compat is not None: raise try: llwtable = table_to_ligolw(table, tablename, ilwdchar_compat=True) except Exception: raise exc return write_ligolw_tables(target, [llwtable], **kwargs)
python
def write_table(table, target, tablename=None, ilwdchar_compat=None, **kwargs): """Write a `~astropy.table.Table` to file in LIGO_LW XML format This method will attempt to write in the new `ligo.lw` format (if ``ilwdchar_compat`` is ``None`` or ``False``), but will fall back to the older `glue.ligolw` (in that order) if that fails (if ``ilwdchar_compat`` is ``None`` or ``True``). """ if tablename is None: # try and get tablename from metadata tablename = table.meta.get('tablename', None) if tablename is None: # panic raise ValueError("please pass ``tablename=`` to specify the target " "LIGO_LW Table Name") try: llwtable = table_to_ligolw( table, tablename, ilwdchar_compat=ilwdchar_compat or False, ) except LigolwElementError as exc: if ilwdchar_compat is not None: raise try: llwtable = table_to_ligolw(table, tablename, ilwdchar_compat=True) except Exception: raise exc return write_ligolw_tables(target, [llwtable], **kwargs)
[ "def", "write_table", "(", "table", ",", "target", ",", "tablename", "=", "None", ",", "ilwdchar_compat", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "tablename", "is", "None", ":", "# try and get tablename from metadata", "tablename", "=", "table", ...
Write a `~astropy.table.Table` to file in LIGO_LW XML format This method will attempt to write in the new `ligo.lw` format (if ``ilwdchar_compat`` is ``None`` or ``False``), but will fall back to the older `glue.ligolw` (in that order) if that fails (if ``ilwdchar_compat`` is ``None`` or ``True``).
[ "Write", "a", "~astropy", ".", "table", ".", "Table", "to", "file", "in", "LIGO_LW", "XML", "format" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/ligolw.py#L370-L397
train
211,401
gwpy/gwpy
gwpy/types/io/ascii.py
read_ascii_series
def read_ascii_series(input_, array_type=Series, unpack=True, **kwargs): """Read a `Series` from an ASCII file Parameters ---------- input : `str`, `file` file to read array_type : `type` desired return type """ xarr, yarr = loadtxt(input_, unpack=unpack, **kwargs) return array_type(yarr, xindex=xarr)
python
def read_ascii_series(input_, array_type=Series, unpack=True, **kwargs): """Read a `Series` from an ASCII file Parameters ---------- input : `str`, `file` file to read array_type : `type` desired return type """ xarr, yarr = loadtxt(input_, unpack=unpack, **kwargs) return array_type(yarr, xindex=xarr)
[ "def", "read_ascii_series", "(", "input_", ",", "array_type", "=", "Series", ",", "unpack", "=", "True", ",", "*", "*", "kwargs", ")", ":", "xarr", ",", "yarr", "=", "loadtxt", "(", "input_", ",", "unpack", "=", "unpack", ",", "*", "*", "kwargs", ")"...
Read a `Series` from an ASCII file Parameters ---------- input : `str`, `file` file to read array_type : `type` desired return type
[ "Read", "a", "Series", "from", "an", "ASCII", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/ascii.py#L35-L47
train
211,402
gwpy/gwpy
gwpy/types/io/ascii.py
write_ascii_series
def write_ascii_series(series, output, **kwargs): """Write a `Series` to a file in ASCII format Parameters ---------- series : :class:`~gwpy.data.Series` data series to write output : `str`, `file` file to write to See also -------- numpy.savetxt for documentation of keyword arguments """ xarr = series.xindex.value yarr = series.value return savetxt(output, column_stack((xarr, yarr)), **kwargs)
python
def write_ascii_series(series, output, **kwargs): """Write a `Series` to a file in ASCII format Parameters ---------- series : :class:`~gwpy.data.Series` data series to write output : `str`, `file` file to write to See also -------- numpy.savetxt for documentation of keyword arguments """ xarr = series.xindex.value yarr = series.value return savetxt(output, column_stack((xarr, yarr)), **kwargs)
[ "def", "write_ascii_series", "(", "series", ",", "output", ",", "*", "*", "kwargs", ")", ":", "xarr", "=", "series", ".", "xindex", ".", "value", "yarr", "=", "series", ".", "value", "return", "savetxt", "(", "output", ",", "column_stack", "(", "(", "x...
Write a `Series` to a file in ASCII format Parameters ---------- series : :class:`~gwpy.data.Series` data series to write output : `str`, `file` file to write to See also -------- numpy.savetxt for documentation of keyword arguments
[ "Write", "a", "Series", "to", "a", "file", "in", "ASCII", "format" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/io/ascii.py#L52-L70
train
211,403
gwpy/gwpy
gwpy/timeseries/io/gwf/__init__.py
channel_dict_kwarg
def channel_dict_kwarg(value, channels, types=None, astype=None): """Format the given kwarg value in a dict with one value per channel Parameters ---------- value : any type keyword argument value as given by user channels : `list` list of channels being read types : `list` of `type` list of valid object types for value astype : `type` output type for `dict` values Returns ------- dict : `dict` `dict` of values, one value per channel key, if parsing is successful None : `None` `None`, if parsing was unsuccessful """ if types is not None and isinstance(value, tuple(types)): out = dict((c, value) for c in channels) elif isinstance(value, (tuple, list)): out = dict(zip(channels, value)) elif value is None: out = dict() elif isinstance(value, dict): out = value.copy() else: return None if astype is not None: return dict((key, astype(out[key])) for key in out) return out
python
def channel_dict_kwarg(value, channels, types=None, astype=None): """Format the given kwarg value in a dict with one value per channel Parameters ---------- value : any type keyword argument value as given by user channels : `list` list of channels being read types : `list` of `type` list of valid object types for value astype : `type` output type for `dict` values Returns ------- dict : `dict` `dict` of values, one value per channel key, if parsing is successful None : `None` `None`, if parsing was unsuccessful """ if types is not None and isinstance(value, tuple(types)): out = dict((c, value) for c in channels) elif isinstance(value, (tuple, list)): out = dict(zip(channels, value)) elif value is None: out = dict() elif isinstance(value, dict): out = value.copy() else: return None if astype is not None: return dict((key, astype(out[key])) for key in out) return out
[ "def", "channel_dict_kwarg", "(", "value", ",", "channels", ",", "types", "=", "None", ",", "astype", "=", "None", ")", ":", "if", "types", "is", "not", "None", "and", "isinstance", "(", "value", ",", "tuple", "(", "types", ")", ")", ":", "out", "=",...
Format the given kwarg value in a dict with one value per channel Parameters ---------- value : any type keyword argument value as given by user channels : `list` list of channels being read types : `list` of `type` list of valid object types for value astype : `type` output type for `dict` values Returns ------- dict : `dict` `dict` of values, one value per channel key, if parsing is successful None : `None` `None`, if parsing was unsuccessful
[ "Format", "the", "given", "kwarg", "value", "in", "a", "dict", "with", "one", "value", "per", "channel" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/__init__.py#L65-L101
train
211,404
gwpy/gwpy
gwpy/timeseries/io/gwf/__init__.py
import_gwf_library
def import_gwf_library(library, package=__package__): """Utility method to import the relevant timeseries.io.gwf frame API This is just a wrapper around :meth:`importlib.import_module` with a slightly nicer error message """ # import the frame library here to have any ImportErrors occur early try: return importlib.import_module('.%s' % library, package=package) except ImportError as exc: exc.args = ('Cannot import %s frame API: %s' % (library, str(exc)),) raise
python
def import_gwf_library(library, package=__package__): """Utility method to import the relevant timeseries.io.gwf frame API This is just a wrapper around :meth:`importlib.import_module` with a slightly nicer error message """ # import the frame library here to have any ImportErrors occur early try: return importlib.import_module('.%s' % library, package=package) except ImportError as exc: exc.args = ('Cannot import %s frame API: %s' % (library, str(exc)),) raise
[ "def", "import_gwf_library", "(", "library", ",", "package", "=", "__package__", ")", ":", "# import the frame library here to have any ImportErrors occur early", "try", ":", "return", "importlib", ".", "import_module", "(", "'.%s'", "%", "library", ",", "package", "=",...
Utility method to import the relevant timeseries.io.gwf frame API This is just a wrapper around :meth:`importlib.import_module` with a slightly nicer error message
[ "Utility", "method", "to", "import", "the", "relevant", "timeseries", ".", "io", ".", "gwf", "frame", "API" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/__init__.py#L104-L115
train
211,405
gwpy/gwpy
gwpy/timeseries/io/gwf/__init__.py
get_default_gwf_api
def get_default_gwf_api(): """Return the preferred GWF library Examples -------- If you have |LDAStools.frameCPP|_ installed: >>> from gwpy.timeseries.io.gwf import get_default_gwf_api >>> get_default_gwf_api() 'framecpp' Or, if you don't have |lalframe|_: >>> get_default_gwf_api() 'lalframe' Otherwise: >>> get_default_gwf_api() ImportError: no GWF API available, please install a third-party GWF library (framecpp, lalframe) and try again """ for lib in APIS: try: import_gwf_library(lib) except ImportError: continue else: return lib raise ImportError("no GWF API available, please install a third-party GWF " "library ({}) and try again".format(', '.join(APIS)))
python
def get_default_gwf_api(): """Return the preferred GWF library Examples -------- If you have |LDAStools.frameCPP|_ installed: >>> from gwpy.timeseries.io.gwf import get_default_gwf_api >>> get_default_gwf_api() 'framecpp' Or, if you don't have |lalframe|_: >>> get_default_gwf_api() 'lalframe' Otherwise: >>> get_default_gwf_api() ImportError: no GWF API available, please install a third-party GWF library (framecpp, lalframe) and try again """ for lib in APIS: try: import_gwf_library(lib) except ImportError: continue else: return lib raise ImportError("no GWF API available, please install a third-party GWF " "library ({}) and try again".format(', '.join(APIS)))
[ "def", "get_default_gwf_api", "(", ")", ":", "for", "lib", "in", "APIS", ":", "try", ":", "import_gwf_library", "(", "lib", ")", "except", "ImportError", ":", "continue", "else", ":", "return", "lib", "raise", "ImportError", "(", "\"no GWF API available, please ...
Return the preferred GWF library Examples -------- If you have |LDAStools.frameCPP|_ installed: >>> from gwpy.timeseries.io.gwf import get_default_gwf_api >>> get_default_gwf_api() 'framecpp' Or, if you don't have |lalframe|_: >>> get_default_gwf_api() 'lalframe' Otherwise: >>> get_default_gwf_api() ImportError: no GWF API available, please install a third-party GWF library (framecpp, lalframe) and try again
[ "Return", "the", "preferred", "GWF", "library" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/__init__.py#L118-L148
train
211,406
gwpy/gwpy
gwpy/timeseries/io/nds2.py
print_verbose
def print_verbose(*args, **kwargs): """Utility to print something only if verbose=True is given """ if kwargs.pop('verbose', False) is True: gprint(*args, **kwargs)
python
def print_verbose(*args, **kwargs): """Utility to print something only if verbose=True is given """ if kwargs.pop('verbose', False) is True: gprint(*args, **kwargs)
[ "def", "print_verbose", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "pop", "(", "'verbose'", ",", "False", ")", "is", "True", ":", "gprint", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Utility to print something only if verbose=True is given
[ "Utility", "to", "print", "something", "only", "if", "verbose", "=", "True", "is", "given" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L41-L45
train
211,407
gwpy/gwpy
gwpy/timeseries/io/nds2.py
set_parameter
def set_parameter(connection, parameter, value, verbose=False): """Set a parameter for the connection, handling errors as warnings """ value = str(value) try: if not connection.set_parameter(parameter, value): raise ValueError("invalid parameter or value") except (AttributeError, ValueError) as exc: warnings.warn( 'failed to set {}={!r}: {}'.format(parameter, value, str(exc)), io_nds2.NDSWarning) else: print_verbose( ' [{}] set {}={!r}'.format( connection.get_host(), parameter, value), verbose=verbose, )
python
def set_parameter(connection, parameter, value, verbose=False): """Set a parameter for the connection, handling errors as warnings """ value = str(value) try: if not connection.set_parameter(parameter, value): raise ValueError("invalid parameter or value") except (AttributeError, ValueError) as exc: warnings.warn( 'failed to set {}={!r}: {}'.format(parameter, value, str(exc)), io_nds2.NDSWarning) else: print_verbose( ' [{}] set {}={!r}'.format( connection.get_host(), parameter, value), verbose=verbose, )
[ "def", "set_parameter", "(", "connection", ",", "parameter", ",", "value", ",", "verbose", "=", "False", ")", ":", "value", "=", "str", "(", "value", ")", "try", ":", "if", "not", "connection", ".", "set_parameter", "(", "parameter", ",", "value", ")", ...
Set a parameter for the connection, handling errors as warnings
[ "Set", "a", "parameter", "for", "the", "connection", "handling", "errors", "as", "warnings" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L71-L87
train
211,408
gwpy/gwpy
gwpy/timeseries/io/nds2.py
_pad_series
def _pad_series(ts, pad, start, end): """Pad a timeseries to match the specified [start, end) limits To cover a gap in data returned from NDS """ span = ts.span pada = max(int((span[0] - start) * ts.sample_rate.value), 0) padb = max(int((end - span[1]) * ts.sample_rate.value), 0) if pada or padb: return ts.pad((pada, padb), mode='constant', constant_values=(pad,)) return ts
python
def _pad_series(ts, pad, start, end): """Pad a timeseries to match the specified [start, end) limits To cover a gap in data returned from NDS """ span = ts.span pada = max(int((span[0] - start) * ts.sample_rate.value), 0) padb = max(int((end - span[1]) * ts.sample_rate.value), 0) if pada or padb: return ts.pad((pada, padb), mode='constant', constant_values=(pad,)) return ts
[ "def", "_pad_series", "(", "ts", ",", "pad", ",", "start", ",", "end", ")", ":", "span", "=", "ts", ".", "span", "pada", "=", "max", "(", "int", "(", "(", "span", "[", "0", "]", "-", "start", ")", "*", "ts", ".", "sample_rate", ".", "value", ...
Pad a timeseries to match the specified [start, end) limits To cover a gap in data returned from NDS
[ "Pad", "a", "timeseries", "to", "match", "the", "specified", "[", "start", "end", ")", "limits" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L191-L201
train
211,409
gwpy/gwpy
gwpy/timeseries/io/nds2.py
_create_series
def _create_series(ndschan, value, start, end, series_class=TimeSeries): """Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS """ channel = Channel.from_nds2(ndschan) nsamp = int((end - start) * channel.sample_rate.value) return series_class(numpy_ones(nsamp) * value, t0=start, sample_rate=channel.sample_rate, unit=channel.unit, channel=channel)
python
def _create_series(ndschan, value, start, end, series_class=TimeSeries): """Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS """ channel = Channel.from_nds2(ndschan) nsamp = int((end - start) * channel.sample_rate.value) return series_class(numpy_ones(nsamp) * value, t0=start, sample_rate=channel.sample_rate, unit=channel.unit, channel=channel)
[ "def", "_create_series", "(", "ndschan", ",", "value", ",", "start", ",", "end", ",", "series_class", "=", "TimeSeries", ")", ":", "channel", "=", "Channel", ".", "from_nds2", "(", "ndschan", ")", "nsamp", "=", "int", "(", "(", "end", "-", "start", ")"...
Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS
[ "Create", "a", "timeseries", "to", "cover", "the", "specified", "[", "start", "end", ")", "limits" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L204-L213
train
211,410
gwpy/gwpy
gwpy/timeseries/io/nds2.py
_get_data_segments
def _get_data_segments(channels, start, end, connection): """Get available data segments for the given channels """ allsegs = io_nds2.get_availability(channels, start, end, connection=connection) return allsegs.intersection(allsegs.keys())
python
def _get_data_segments(channels, start, end, connection): """Get available data segments for the given channels """ allsegs = io_nds2.get_availability(channels, start, end, connection=connection) return allsegs.intersection(allsegs.keys())
[ "def", "_get_data_segments", "(", "channels", ",", "start", ",", "end", ",", "connection", ")", ":", "allsegs", "=", "io_nds2", ".", "get_availability", "(", "channels", ",", "start", ",", "end", ",", "connection", "=", "connection", ")", "return", "allsegs"...
Get available data segments for the given channels
[ "Get", "available", "data", "segments", "for", "the", "given", "channels" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L216-L221
train
211,411
gwpy/gwpy
setup_utils.py
in_git_clone
def in_git_clone(): """Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir` """ gitdir = '.git' return os.path.isdir(gitdir) and ( os.path.isdir(os.path.join(gitdir, 'objects')) and os.path.isdir(os.path.join(gitdir, 'refs')) and os.path.exists(os.path.join(gitdir, 'HEAD')) )
python
def in_git_clone(): """Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir` """ gitdir = '.git' return os.path.isdir(gitdir) and ( os.path.isdir(os.path.join(gitdir, 'objects')) and os.path.isdir(os.path.join(gitdir, 'refs')) and os.path.exists(os.path.join(gitdir, 'HEAD')) )
[ "def", "in_git_clone", "(", ")", ":", "gitdir", "=", "'.git'", "return", "os", ".", "path", ".", "isdir", "(", "gitdir", ")", "and", "(", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "gitdir", ",", "'objects'", ")", ...
Returns `True` if the current directory is a git repository Logic is 'borrowed' from :func:`git.repo.fun.is_git_dir`
[ "Returns", "True", "if", "the", "current", "directory", "is", "a", "git", "repository" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L74-L84
train
211,412
gwpy/gwpy
setup_utils.py
reuse_dist_file
def reuse_dist_file(filename): """Returns `True` if a distribution file can be reused Otherwise it should be regenerated """ # if target file doesn't exist, we must generate it if not os.path.isfile(filename): return False # if we can interact with git, we can regenerate it, so we may as well try: import git except ImportError: return True else: try: git.Repo().tags except (TypeError, git.GitError): return True else: return False
python
def reuse_dist_file(filename): """Returns `True` if a distribution file can be reused Otherwise it should be regenerated """ # if target file doesn't exist, we must generate it if not os.path.isfile(filename): return False # if we can interact with git, we can regenerate it, so we may as well try: import git except ImportError: return True else: try: git.Repo().tags except (TypeError, git.GitError): return True else: return False
[ "def", "reuse_dist_file", "(", "filename", ")", ":", "# if target file doesn't exist, we must generate it", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "False", "# if we can interact with git, we can regenerate it, so we may as well", ...
Returns `True` if a distribution file can be reused Otherwise it should be regenerated
[ "Returns", "True", "if", "a", "distribution", "file", "can", "be", "reused" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L87-L107
train
211,413
gwpy/gwpy
setup_utils.py
get_gitpython_version
def get_gitpython_version(): """Determine the required version of GitPython Because of target systems running very, very old versions of setuptools, we only specify the actual version we need when we need it. """ # if not in git clone, it doesn't matter if not in_git_clone(): return 'GitPython' # otherwise, call out to get the git version try: gitv = subprocess.check_output('git --version', shell=True) except (OSError, IOError, subprocess.CalledProcessError): # no git installation, most likely git_version = '0.0.0' else: if isinstance(gitv, bytes): gitv = gitv.decode('utf-8') git_version = gitv.strip().split()[2] # if git>=2.15, we need GitPython>=2.1.8 if LooseVersion(git_version) >= '2.15': return 'GitPython>=2.1.8' return 'GitPython'
python
def get_gitpython_version(): """Determine the required version of GitPython Because of target systems running very, very old versions of setuptools, we only specify the actual version we need when we need it. """ # if not in git clone, it doesn't matter if not in_git_clone(): return 'GitPython' # otherwise, call out to get the git version try: gitv = subprocess.check_output('git --version', shell=True) except (OSError, IOError, subprocess.CalledProcessError): # no git installation, most likely git_version = '0.0.0' else: if isinstance(gitv, bytes): gitv = gitv.decode('utf-8') git_version = gitv.strip().split()[2] # if git>=2.15, we need GitPython>=2.1.8 if LooseVersion(git_version) >= '2.15': return 'GitPython>=2.1.8' return 'GitPython'
[ "def", "get_gitpython_version", "(", ")", ":", "# if not in git clone, it doesn't matter", "if", "not", "in_git_clone", "(", ")", ":", "return", "'GitPython'", "# otherwise, call out to get the git version", "try", ":", "gitv", "=", "subprocess", ".", "check_output", "(",...
Determine the required version of GitPython Because of target systems running very, very old versions of setuptools, we only specify the actual version we need when we need it.
[ "Determine", "the", "required", "version", "of", "GitPython" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L110-L134
train
211,414
gwpy/gwpy
setup_utils.py
get_setup_requires
def get_setup_requires(): """Return the list of packages required for this setup.py run """ # don't force requirements if just asking for help if {'--help', '--help-commands'}.intersection(sys.argv): return list() # otherwise collect all requirements for all known commands reqlist = [] for cmd, dependencies in SETUP_REQUIRES.items(): if cmd in sys.argv: reqlist.extend(dependencies) return reqlist
python
def get_setup_requires(): """Return the list of packages required for this setup.py run """ # don't force requirements if just asking for help if {'--help', '--help-commands'}.intersection(sys.argv): return list() # otherwise collect all requirements for all known commands reqlist = [] for cmd, dependencies in SETUP_REQUIRES.items(): if cmd in sys.argv: reqlist.extend(dependencies) return reqlist
[ "def", "get_setup_requires", "(", ")", ":", "# don't force requirements if just asking for help", "if", "{", "'--help'", ",", "'--help-commands'", "}", ".", "intersection", "(", "sys", ".", "argv", ")", ":", "return", "list", "(", ")", "# otherwise collect all require...
Return the list of packages required for this setup.py run
[ "Return", "the", "list", "of", "packages", "required", "for", "this", "setup", ".", "py", "run" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L398-L411
train
211,415
gwpy/gwpy
setup_utils.py
get_scripts
def get_scripts(scripts_dir='bin'): """Get relative file paths for all files under the ``scripts_dir`` """ scripts = [] for (dirname, _, filenames) in os.walk(scripts_dir): scripts.extend([os.path.join(dirname, fn) for fn in filenames]) return scripts
python
def get_scripts(scripts_dir='bin'): """Get relative file paths for all files under the ``scripts_dir`` """ scripts = [] for (dirname, _, filenames) in os.walk(scripts_dir): scripts.extend([os.path.join(dirname, fn) for fn in filenames]) return scripts
[ "def", "get_scripts", "(", "scripts_dir", "=", "'bin'", ")", ":", "scripts", "=", "[", "]", "for", "(", "dirname", ",", "_", ",", "filenames", ")", "in", "os", ".", "walk", "(", "scripts_dir", ")", ":", "scripts", ".", "extend", "(", "[", "os", "."...
Get relative file paths for all files under the ``scripts_dir``
[ "Get", "relative", "file", "paths", "for", "all", "files", "under", "the", "scripts_dir" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L414-L420
train
211,416
gwpy/gwpy
setup_utils.py
_parse_years
def _parse_years(years): """Parse string of ints include ranges into a `list` of `int` Source: https://stackoverflow.com/a/6405228/1307974 """ result = [] for part in years.split(','): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) else: a = int(part) result.append(a) return result
python
def _parse_years(years): """Parse string of ints include ranges into a `list` of `int` Source: https://stackoverflow.com/a/6405228/1307974 """ result = [] for part in years.split(','): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) else: a = int(part) result.append(a) return result
[ "def", "_parse_years", "(", "years", ")", ":", "result", "=", "[", "]", "for", "part", "in", "years", ".", "split", "(", "','", ")", ":", "if", "'-'", "in", "part", ":", "a", ",", "b", "=", "part", ".", "split", "(", "'-'", ")", "a", ",", "b"...
Parse string of ints include ranges into a `list` of `int` Source: https://stackoverflow.com/a/6405228/1307974
[ "Parse", "string", "of", "ints", "include", "ranges", "into", "a", "list", "of", "int" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L423-L437
train
211,417
gwpy/gwpy
setup_utils.py
_format_years
def _format_years(years): """Format a list of ints into a string including ranges Source: https://stackoverflow.com/a/9471386/1307974 """ def sub(x): return x[1] - x[0] ranges = [] for k, iterable in groupby(enumerate(sorted(years)), sub): rng = list(iterable) if len(rng) == 1: s = str(rng[0][1]) else: s = "{}-{}".format(rng[0][1], rng[-1][1]) ranges.append(s) return ", ".join(ranges)
python
def _format_years(years): """Format a list of ints into a string including ranges Source: https://stackoverflow.com/a/9471386/1307974 """ def sub(x): return x[1] - x[0] ranges = [] for k, iterable in groupby(enumerate(sorted(years)), sub): rng = list(iterable) if len(rng) == 1: s = str(rng[0][1]) else: s = "{}-{}".format(rng[0][1], rng[-1][1]) ranges.append(s) return ", ".join(ranges)
[ "def", "_format_years", "(", "years", ")", ":", "def", "sub", "(", "x", ")", ":", "return", "x", "[", "1", "]", "-", "x", "[", "0", "]", "ranges", "=", "[", "]", "for", "k", ",", "iterable", "in", "groupby", "(", "enumerate", "(", "sorted", "("...
Format a list of ints into a string including ranges Source: https://stackoverflow.com/a/9471386/1307974
[ "Format", "a", "list", "of", "ints", "into", "a", "string", "including", "ranges" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L440-L456
train
211,418
gwpy/gwpy
setup_utils.py
update_copyright
def update_copyright(path, year): """Update a file's copyright statement to include the given year """ with open(path, "r") as fobj: text = fobj.read().rstrip() match = COPYRIGHT_REGEX.search(text) x = match.start("years") y = match.end("years") if text[y-1] == " ": # don't strip trailing whitespace y -= 1 yearstr = match.group("years") years = set(_parse_years(yearstr)) | {year} with open(path, "w") as fobj: print(text[:x] + _format_years(years) + text[y:], file=fobj)
python
def update_copyright(path, year): """Update a file's copyright statement to include the given year """ with open(path, "r") as fobj: text = fobj.read().rstrip() match = COPYRIGHT_REGEX.search(text) x = match.start("years") y = match.end("years") if text[y-1] == " ": # don't strip trailing whitespace y -= 1 yearstr = match.group("years") years = set(_parse_years(yearstr)) | {year} with open(path, "w") as fobj: print(text[:x] + _format_years(years) + text[y:], file=fobj)
[ "def", "update_copyright", "(", "path", ",", "year", ")", ":", "with", "open", "(", "path", ",", "\"r\"", ")", "as", "fobj", ":", "text", "=", "fobj", ".", "read", "(", ")", ".", "rstrip", "(", ")", "match", "=", "COPYRIGHT_REGEX", ".", "search", "...
Update a file's copyright statement to include the given year
[ "Update", "a", "file", "s", "copyright", "statement", "to", "include", "the", "given", "year" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/setup_utils.py#L459-L472
train
211,419
gwpy/gwpy
gwpy/frequencyseries/hist.py
SpectralVariance.percentile
def percentile(self, percentile): """Calculate a given spectral percentile for this `SpectralVariance` Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence` """ rows, columns = self.shape out = numpy.zeros(rows) # Loop over frequencies for i in range(rows): # Calculate cumulative sum for array cumsumvals = numpy.cumsum(self.value[i, :]) # Find value nearest requested percentile abs_cumsumvals_minus_percentile = numpy.abs(cumsumvals - percentile) minindex = abs_cumsumvals_minus_percentile.argmin() val = self.bins[minindex] out[i] = val name = '%s %s%% percentile' % (self.name, percentile) return FrequencySeries(out, epoch=self.epoch, channel=self.channel, frequencies=self.bins[:-1], name=name)
python
def percentile(self, percentile): """Calculate a given spectral percentile for this `SpectralVariance` Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence` """ rows, columns = self.shape out = numpy.zeros(rows) # Loop over frequencies for i in range(rows): # Calculate cumulative sum for array cumsumvals = numpy.cumsum(self.value[i, :]) # Find value nearest requested percentile abs_cumsumvals_minus_percentile = numpy.abs(cumsumvals - percentile) minindex = abs_cumsumvals_minus_percentile.argmin() val = self.bins[minindex] out[i] = val name = '%s %s%% percentile' % (self.name, percentile) return FrequencySeries(out, epoch=self.epoch, channel=self.channel, frequencies=self.bins[:-1], name=name)
[ "def", "percentile", "(", "self", ",", "percentile", ")", ":", "rows", ",", "columns", "=", "self", ".", "shape", "out", "=", "numpy", ".", "zeros", "(", "rows", ")", "# Loop over frequencies", "for", "i", "in", "range", "(", "rows", ")", ":", "# Calcu...
Calculate a given spectral percentile for this `SpectralVariance` Parameters ---------- percentile : `float` percentile (0 - 100) of the bins to compute Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` the given percentile `FrequencySeries` calculated from this `SpectralVaraicence`
[ "Calculate", "a", "given", "spectral", "percentile", "for", "this", "SpectralVariance" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/frequencyseries/hist.py#L314-L344
train
211,420
gwpy/gwpy
gwpy/detector/channel.py
Channel.ndstype
def ndstype(self): """NDS type integer for this channel. This property is mapped to the `Channel.type` string. """ if self.type is not None: return io_nds2.Nds2ChannelType.find(self.type).value
python
def ndstype(self): """NDS type integer for this channel. This property is mapped to the `Channel.type` string. """ if self.type is not None: return io_nds2.Nds2ChannelType.find(self.type).value
[ "def", "ndstype", "(", "self", ")", ":", "if", "self", ".", "type", "is", "not", "None", ":", "return", "io_nds2", ".", "Nds2ChannelType", ".", "find", "(", "self", ".", "type", ")", ".", "value" ]
NDS type integer for this channel. This property is mapped to the `Channel.type` string.
[ "NDS", "type", "integer", "for", "this", "channel", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L278-L284
train
211,421
gwpy/gwpy
gwpy/detector/channel.py
Channel.ndsname
def ndsname(self): """Name of this channel as stored in the NDS database """ if self.type not in [None, 'raw', 'reduced', 'online']: return '%s,%s' % (self.name, self.type) return self.name
python
def ndsname(self): """Name of this channel as stored in the NDS database """ if self.type not in [None, 'raw', 'reduced', 'online']: return '%s,%s' % (self.name, self.type) return self.name
[ "def", "ndsname", "(", "self", ")", ":", "if", "self", ".", "type", "not", "in", "[", "None", ",", "'raw'", ",", "'reduced'", ",", "'online'", "]", ":", "return", "'%s,%s'", "%", "(", "self", ".", "name", ",", "self", ".", "type", ")", "return", ...
Name of this channel as stored in the NDS database
[ "Name", "of", "this", "channel", "as", "stored", "in", "the", "NDS", "database" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L399-L404
train
211,422
gwpy/gwpy
gwpy/detector/channel.py
Channel.query
def query(cls, name, use_kerberos=None, debug=False): """Query the LIGO Channel Information System for the `Channel` matching the given name Parameters ---------- name : `str` name of channel use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- c : `Channel` a new `Channel` containing all of the attributes set from its entry in the CIS """ channellist = ChannelList.query(name, use_kerberos=use_kerberos, debug=debug) if not channellist: raise ValueError("No channels found matching '%s'" % name) if len(channellist) > 1: raise ValueError("%d channels found matching '%s', please refine " "search, or use `ChannelList.query` to return " "all results" % (len(channellist), name)) return channellist[0]
python
def query(cls, name, use_kerberos=None, debug=False): """Query the LIGO Channel Information System for the `Channel` matching the given name Parameters ---------- name : `str` name of channel use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- c : `Channel` a new `Channel` containing all of the attributes set from its entry in the CIS """ channellist = ChannelList.query(name, use_kerberos=use_kerberos, debug=debug) if not channellist: raise ValueError("No channels found matching '%s'" % name) if len(channellist) > 1: raise ValueError("%d channels found matching '%s', please refine " "search, or use `ChannelList.query` to return " "all results" % (len(channellist), name)) return channellist[0]
[ "def", "query", "(", "cls", ",", "name", ",", "use_kerberos", "=", "None", ",", "debug", "=", "False", ")", ":", "channellist", "=", "ChannelList", ".", "query", "(", "name", ",", "use_kerberos", "=", "use_kerberos", ",", "debug", "=", "debug", ")", "i...
Query the LIGO Channel Information System for the `Channel` matching the given name Parameters ---------- name : `str` name of channel use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- c : `Channel` a new `Channel` containing all of the attributes set from its entry in the CIS
[ "Query", "the", "LIGO", "Channel", "Information", "System", "for", "the", "Channel", "matching", "the", "given", "name" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L409-L441
train
211,423
gwpy/gwpy
gwpy/detector/channel.py
Channel.from_nds2
def from_nds2(cls, nds2channel): """Generate a new channel using an existing nds2.channel object """ # extract metadata name = nds2channel.name sample_rate = nds2channel.sample_rate unit = nds2channel.signal_units if not unit: unit = None ctype = nds2channel.channel_type_to_string(nds2channel.channel_type) # get dtype dtype = { # pylint: disable: no-member nds2channel.DATA_TYPE_INT16: numpy.int16, nds2channel.DATA_TYPE_INT32: numpy.int32, nds2channel.DATA_TYPE_INT64: numpy.int64, nds2channel.DATA_TYPE_FLOAT32: numpy.float32, nds2channel.DATA_TYPE_FLOAT64: numpy.float64, nds2channel.DATA_TYPE_COMPLEX32: numpy.complex64, }.get(nds2channel.data_type) return cls(name, sample_rate=sample_rate, unit=unit, dtype=dtype, type=ctype)
python
def from_nds2(cls, nds2channel): """Generate a new channel using an existing nds2.channel object """ # extract metadata name = nds2channel.name sample_rate = nds2channel.sample_rate unit = nds2channel.signal_units if not unit: unit = None ctype = nds2channel.channel_type_to_string(nds2channel.channel_type) # get dtype dtype = { # pylint: disable: no-member nds2channel.DATA_TYPE_INT16: numpy.int16, nds2channel.DATA_TYPE_INT32: numpy.int32, nds2channel.DATA_TYPE_INT64: numpy.int64, nds2channel.DATA_TYPE_FLOAT32: numpy.float32, nds2channel.DATA_TYPE_FLOAT64: numpy.float64, nds2channel.DATA_TYPE_COMPLEX32: numpy.complex64, }.get(nds2channel.data_type) return cls(name, sample_rate=sample_rate, unit=unit, dtype=dtype, type=ctype)
[ "def", "from_nds2", "(", "cls", ",", "nds2channel", ")", ":", "# extract metadata", "name", "=", "nds2channel", ".", "name", "sample_rate", "=", "nds2channel", ".", "sample_rate", "unit", "=", "nds2channel", ".", "signal_units", "if", "not", "unit", ":", "unit...
Generate a new channel using an existing nds2.channel object
[ "Generate", "a", "new", "channel", "using", "an", "existing", "nds2", ".", "channel", "object" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L482-L502
train
211,424
gwpy/gwpy
gwpy/detector/channel.py
Channel.parse_channel_name
def parse_channel_name(cls, name, strict=True): """Decompose a channel name string into its components Parameters ---------- name : `str` name to parse strict : `bool`, optional require exact matching of format, with no surrounding text, default `True` Returns ------- match : `dict` `dict` of channel name components with the following keys: - `'ifo'`: the letter-number interferometer prefix - `'system'`: the top-level system name - `'subsystem'`: the second-level sub-system name - `'signal'`: the remaining underscore-delimited signal name - `'trend'`: the trend type - `'ndstype'`: the NDS2 channel suffix Any optional keys that aren't found will return a value of `None` Raises ------ ValueError if the name cannot be parsed with at least an IFO and SYSTEM Examples -------- >>> Channel.parse_channel_name('L1:LSC-DARM_IN1_DQ') {'ifo': 'L1', 'ndstype': None, 'signal': 'IN1_DQ', 'subsystem': 'DARM', 'system': 'LSC', 'trend': None} >>> Channel.parse_channel_name( 'H1:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M.rms,m-trend') {'ifo': 'H1', 'ndstype': 'm-trend', 'signal': 'ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M', 'subsystem': 'BS', 'system': 'ISI', 'trend': 'rms'} """ match = cls.MATCH.search(name) if match is None or (strict and ( match.start() != 0 or match.end() != len(name))): raise ValueError("Cannot parse channel name according to LIGO " "channel-naming convention T990033") return match.groupdict()
python
def parse_channel_name(cls, name, strict=True): """Decompose a channel name string into its components Parameters ---------- name : `str` name to parse strict : `bool`, optional require exact matching of format, with no surrounding text, default `True` Returns ------- match : `dict` `dict` of channel name components with the following keys: - `'ifo'`: the letter-number interferometer prefix - `'system'`: the top-level system name - `'subsystem'`: the second-level sub-system name - `'signal'`: the remaining underscore-delimited signal name - `'trend'`: the trend type - `'ndstype'`: the NDS2 channel suffix Any optional keys that aren't found will return a value of `None` Raises ------ ValueError if the name cannot be parsed with at least an IFO and SYSTEM Examples -------- >>> Channel.parse_channel_name('L1:LSC-DARM_IN1_DQ') {'ifo': 'L1', 'ndstype': None, 'signal': 'IN1_DQ', 'subsystem': 'DARM', 'system': 'LSC', 'trend': None} >>> Channel.parse_channel_name( 'H1:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M.rms,m-trend') {'ifo': 'H1', 'ndstype': 'm-trend', 'signal': 'ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M', 'subsystem': 'BS', 'system': 'ISI', 'trend': 'rms'} """ match = cls.MATCH.search(name) if match is None or (strict and ( match.start() != 0 or match.end() != len(name))): raise ValueError("Cannot parse channel name according to LIGO " "channel-naming convention T990033") return match.groupdict()
[ "def", "parse_channel_name", "(", "cls", ",", "name", ",", "strict", "=", "True", ")", ":", "match", "=", "cls", ".", "MATCH", ".", "search", "(", "name", ")", "if", "match", "is", "None", "or", "(", "strict", "and", "(", "match", ".", "start", "("...
Decompose a channel name string into its components Parameters ---------- name : `str` name to parse strict : `bool`, optional require exact matching of format, with no surrounding text, default `True` Returns ------- match : `dict` `dict` of channel name components with the following keys: - `'ifo'`: the letter-number interferometer prefix - `'system'`: the top-level system name - `'subsystem'`: the second-level sub-system name - `'signal'`: the remaining underscore-delimited signal name - `'trend'`: the trend type - `'ndstype'`: the NDS2 channel suffix Any optional keys that aren't found will return a value of `None` Raises ------ ValueError if the name cannot be parsed with at least an IFO and SYSTEM Examples -------- >>> Channel.parse_channel_name('L1:LSC-DARM_IN1_DQ') {'ifo': 'L1', 'ndstype': None, 'signal': 'IN1_DQ', 'subsystem': 'DARM', 'system': 'LSC', 'trend': None} >>> Channel.parse_channel_name( 'H1:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M.rms,m-trend') {'ifo': 'H1', 'ndstype': 'm-trend', 'signal': 'ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M', 'subsystem': 'BS', 'system': 'ISI', 'trend': 'rms'}
[ "Decompose", "a", "channel", "name", "string", "into", "its", "components" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L507-L561
train
211,425
gwpy/gwpy
gwpy/detector/channel.py
Channel.copy
def copy(self): """Returns a copy of this channel """ new = type(self)(str(self)) new._init_from_channel(self) return new
python
def copy(self): """Returns a copy of this channel """ new = type(self)(str(self)) new._init_from_channel(self) return new
[ "def", "copy", "(", "self", ")", ":", "new", "=", "type", "(", "self", ")", "(", "str", "(", "self", ")", ")", "new", ".", "_init_from_channel", "(", "self", ")", "return", "new" ]
Returns a copy of this channel
[ "Returns", "a", "copy", "of", "this", "channel" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L596-L601
train
211,426
gwpy/gwpy
gwpy/detector/channel.py
ChannelList.from_names
def from_names(cls, *names): """Create a new `ChannelList` from a list of names The list of names can include comma-separated sets of names, in which case the return will be a flattened list of all parsed channel names. """ new = cls() for namestr in names: for name in cls._split_names(namestr): new.append(Channel(name)) return new
python
def from_names(cls, *names): """Create a new `ChannelList` from a list of names The list of names can include comma-separated sets of names, in which case the return will be a flattened list of all parsed channel names. """ new = cls() for namestr in names: for name in cls._split_names(namestr): new.append(Channel(name)) return new
[ "def", "from_names", "(", "cls", ",", "*", "names", ")", ":", "new", "=", "cls", "(", ")", "for", "namestr", "in", "names", ":", "for", "name", "in", "cls", ".", "_split_names", "(", "namestr", ")", ":", "new", ".", "append", "(", "Channel", "(", ...
Create a new `ChannelList` from a list of names The list of names can include comma-separated sets of names, in which case the return will be a flattened list of all parsed channel names.
[ "Create", "a", "new", "ChannelList", "from", "a", "list", "of", "names" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L661-L672
train
211,427
gwpy/gwpy
gwpy/detector/channel.py
ChannelList._split_names
def _split_names(namestr): """Split a comma-separated list of channel names. """ out = [] namestr = QUOTE_REGEX.sub('', namestr) while True: namestr = namestr.strip('\' \n') if ',' not in namestr: break for nds2type in io_nds2.Nds2ChannelType.names() + ['']: if nds2type and ',%s' % nds2type in namestr: try: channel, ctype, namestr = namestr.split(',', 2) except ValueError: channel, ctype = namestr.split(',') namestr = '' out.append('%s,%s' % (channel, ctype)) break elif nds2type == '' and ',' in namestr: channel, namestr = namestr.split(',', 1) out.append(channel) break if namestr: out.append(namestr) return out
python
def _split_names(namestr): """Split a comma-separated list of channel names. """ out = [] namestr = QUOTE_REGEX.sub('', namestr) while True: namestr = namestr.strip('\' \n') if ',' not in namestr: break for nds2type in io_nds2.Nds2ChannelType.names() + ['']: if nds2type and ',%s' % nds2type in namestr: try: channel, ctype, namestr = namestr.split(',', 2) except ValueError: channel, ctype = namestr.split(',') namestr = '' out.append('%s,%s' % (channel, ctype)) break elif nds2type == '' and ',' in namestr: channel, namestr = namestr.split(',', 1) out.append(channel) break if namestr: out.append(namestr) return out
[ "def", "_split_names", "(", "namestr", ")", ":", "out", "=", "[", "]", "namestr", "=", "QUOTE_REGEX", ".", "sub", "(", "''", ",", "namestr", ")", "while", "True", ":", "namestr", "=", "namestr", ".", "strip", "(", "'\\' \\n'", ")", "if", "','", "not"...
Split a comma-separated list of channel names.
[ "Split", "a", "comma", "-", "separated", "list", "of", "channel", "names", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L675-L699
train
211,428
gwpy/gwpy
gwpy/detector/channel.py
ChannelList.find
def find(self, name): """Find the `Channel` with a specific name in this `ChannelList`. Parameters ---------- name : `str` name of the `Channel` to find Returns ------- index : `int` the position of the first `Channel` in this `ChannelList` whose `~Channel.name` matches the search key. Raises ------ ValueError if no matching `Channel` is found. """ for i, chan in enumerate(self): if name == chan.name: return i raise ValueError(name)
python
def find(self, name): """Find the `Channel` with a specific name in this `ChannelList`. Parameters ---------- name : `str` name of the `Channel` to find Returns ------- index : `int` the position of the first `Channel` in this `ChannelList` whose `~Channel.name` matches the search key. Raises ------ ValueError if no matching `Channel` is found. """ for i, chan in enumerate(self): if name == chan.name: return i raise ValueError(name)
[ "def", "find", "(", "self", ",", "name", ")", ":", "for", "i", ",", "chan", "in", "enumerate", "(", "self", ")", ":", "if", "name", "==", "chan", ".", "name", ":", "return", "i", "raise", "ValueError", "(", "name", ")" ]
Find the `Channel` with a specific name in this `ChannelList`. Parameters ---------- name : `str` name of the `Channel` to find Returns ------- index : `int` the position of the first `Channel` in this `ChannelList` whose `~Channel.name` matches the search key. Raises ------ ValueError if no matching `Channel` is found.
[ "Find", "the", "Channel", "with", "a", "specific", "name", "in", "this", "ChannelList", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L701-L723
train
211,429
gwpy/gwpy
gwpy/detector/channel.py
ChannelList.query
def query(cls, name, use_kerberos=None, debug=False): """Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found. """ from .io import cis return cis.query(name, use_kerberos=use_kerberos, debug=debug)
python
def query(cls, name, use_kerberos=None, debug=False): """Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found. """ from .io import cis return cis.query(name, use_kerberos=use_kerberos, debug=debug)
[ "def", "query", "(", "cls", ",", "name", ",", "use_kerberos", "=", "None", ",", "debug", "=", "False", ")", ":", "from", ".", "io", "import", "cis", "return", "cis", ".", "query", "(", "name", ",", "use_kerberos", "=", "use_kerberos", ",", "debug", "...
Query the LIGO Channel Information System a `ChannelList`. Parameters ---------- name : `str` name of channel, or part of it. use_kerberos : `bool`, optional use an existing Kerberos ticket as the authentication credential, default behaviour will check for credentials and request username and password if none are found (`None`) debug : `bool`, optional print verbose HTTP connection status for debugging, default: `False` Returns ------- channels : `ChannelList` a new list containing all `Channels <Channel>` found.
[ "Query", "the", "LIGO", "Channel", "Information", "System", "a", "ChannelList", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L785-L808
train
211,430
gwpy/gwpy
gwpy/detector/channel.py
ChannelList.query_nds2_availability
def query_nds2_availability(cls, channels, start, end, ctype=126, connection=None, host=None, port=None): """Query for when data are available for these channels in NDS2 Parameters ---------- channels : `list` list of `Channel` or `str` for which to search start : `int` GPS start time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` end : `int` GPS end time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` connection : `nds2.connection`, optional open connection to an NDS(2) server, if not given, one will be created based on ``host`` and ``port`` keywords host : `str`, optional name of NDS server host port : `int`, optional port number for NDS connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs """ start = int(to_gps(start)) end = int(ceil(to_gps(end))) chans = io_nds2.find_channels(channels, connection=connection, unique=True, epoch=(start, end), type=ctype) availability = io_nds2.get_availability(chans, start, end, connection=connection) return type(availability)(zip(channels, availability.values()))
python
def query_nds2_availability(cls, channels, start, end, ctype=126, connection=None, host=None, port=None): """Query for when data are available for these channels in NDS2 Parameters ---------- channels : `list` list of `Channel` or `str` for which to search start : `int` GPS start time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` end : `int` GPS end time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` connection : `nds2.connection`, optional open connection to an NDS(2) server, if not given, one will be created based on ``host`` and ``port`` keywords host : `str`, optional name of NDS server host port : `int`, optional port number for NDS connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs """ start = int(to_gps(start)) end = int(ceil(to_gps(end))) chans = io_nds2.find_channels(channels, connection=connection, unique=True, epoch=(start, end), type=ctype) availability = io_nds2.get_availability(chans, start, end, connection=connection) return type(availability)(zip(channels, availability.values()))
[ "def", "query_nds2_availability", "(", "cls", ",", "channels", ",", "start", ",", "end", ",", "ctype", "=", "126", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "start", "=", "int", "(", "to_gps", "(", ...
Query for when data are available for these channels in NDS2 Parameters ---------- channels : `list` list of `Channel` or `str` for which to search start : `int` GPS start time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` end : `int` GPS end time of search, or any acceptable input to :meth:`~gwpy.time.to_gps` connection : `nds2.connection`, optional open connection to an NDS(2) server, if not given, one will be created based on ``host`` and ``port`` keywords host : `str`, optional name of NDS server host port : `int`, optional port number for NDS connection Returns ------- segdict : `~gwpy.segments.SegmentListDict` dict of ``(name, SegmentList)`` pairs
[ "Query", "for", "when", "data", "are", "available", "for", "these", "channels", "in", "NDS2" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/channel.py#L855-L894
train
211,431
gwpy/gwpy
gwpy/table/io/gravityspy.py
get_gravityspy_triggers
def get_gravityspy_triggers(tablename, engine=None, **kwargs): """Fetch data into an `GravitySpyTable` Parameters ---------- table : `str`, The name of table you are attempting to receive triggers from. selection other filters you would like to supply underlying reader method for the given format .. note:: For now it will attempt to automatically connect you to a specific DB. In the future, this may be an input argument. Returns ------- table : `GravitySpyTable` """ from sqlalchemy.engine import create_engine from sqlalchemy.exc import ProgrammingError # connect if needed if engine is None: conn_kw = {} for key in ('db', 'host', 'user', 'passwd'): try: conn_kw[key] = kwargs.pop(key) except KeyError: pass engine = create_engine(get_connection_str(**conn_kw)) try: return GravitySpyTable(fetch(engine, tablename, **kwargs)) except ProgrammingError as exc: if 'relation "%s" does not exist' % tablename in str(exc): msg = exc.args[0] msg = msg.replace( 'does not exist', 'does not exist, the following tablenames are ' 'acceptable:\n %s\n' % '\n '.join(engine.table_names())) exc.args = (msg,) raise
python
def get_gravityspy_triggers(tablename, engine=None, **kwargs): """Fetch data into an `GravitySpyTable` Parameters ---------- table : `str`, The name of table you are attempting to receive triggers from. selection other filters you would like to supply underlying reader method for the given format .. note:: For now it will attempt to automatically connect you to a specific DB. In the future, this may be an input argument. Returns ------- table : `GravitySpyTable` """ from sqlalchemy.engine import create_engine from sqlalchemy.exc import ProgrammingError # connect if needed if engine is None: conn_kw = {} for key in ('db', 'host', 'user', 'passwd'): try: conn_kw[key] = kwargs.pop(key) except KeyError: pass engine = create_engine(get_connection_str(**conn_kw)) try: return GravitySpyTable(fetch(engine, tablename, **kwargs)) except ProgrammingError as exc: if 'relation "%s" does not exist' % tablename in str(exc): msg = exc.args[0] msg = msg.replace( 'does not exist', 'does not exist, the following tablenames are ' 'acceptable:\n %s\n' % '\n '.join(engine.table_names())) exc.args = (msg,) raise
[ "def", "get_gravityspy_triggers", "(", "tablename", ",", "engine", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "sqlalchemy", ".", "engine", "import", "create_engine", "from", "sqlalchemy", ".", "exc", "import", "ProgrammingError", "# connect if needed"...
Fetch data into an `GravitySpyTable` Parameters ---------- table : `str`, The name of table you are attempting to receive triggers from. selection other filters you would like to supply underlying reader method for the given format .. note:: For now it will attempt to automatically connect you to a specific DB. In the future, this may be an input argument. Returns ------- table : `GravitySpyTable`
[ "Fetch", "data", "into", "an", "GravitySpyTable" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/gravityspy.py#L42-L88
train
211,432
gwpy/gwpy
gwpy/table/io/gravityspy.py
get_connection_str
def get_connection_str(db='gravityspy', host='gravityspy.ciera.northwestern.edu', user=None, passwd=None): """Create string to pass to create_engine Parameters ---------- db : `str`, default: ``gravityspy`` The name of the SQL database your connecting to. host : `str`, default: ``gravityspy.ciera.northwestern.edu`` The name of the server the database you are connecting to lives on. user : `str`, default: `None` Your username for authentication to this database. passwd : `str`, default: `None` Your password for authentication to this database. .. note:: `user` and `passwd` should be given together, otherwise they will be ignored and values will be resolved from the ``GRAVITYSPY_DATABASE_USER`` and ``GRAVITYSPY_DATABASE_PASSWD`` environment variables. Returns ------- conn_string : `str` A SQLAlchemy engine compliant connection string """ if (not user) or (not passwd): user = os.getenv('GRAVITYSPY_DATABASE_USER', None) passwd = os.getenv('GRAVITYSPY_DATABASE_PASSWD', None) if (not user) or (not passwd): raise ValueError('Remember to either pass ' 'or export GRAVITYSPY_DATABASE_USER ' 'and export GRAVITYSPY_DATABASE_PASSWD in order ' 'to access the Gravity Spy Data: ' 'https://secrets.ligo.org/secrets/144/' ' description is username and secret is password.') return 'postgresql://{0}:{1}@{2}:5432/{3}'.format(user, passwd, host, db)
python
def get_connection_str(db='gravityspy', host='gravityspy.ciera.northwestern.edu', user=None, passwd=None): """Create string to pass to create_engine Parameters ---------- db : `str`, default: ``gravityspy`` The name of the SQL database your connecting to. host : `str`, default: ``gravityspy.ciera.northwestern.edu`` The name of the server the database you are connecting to lives on. user : `str`, default: `None` Your username for authentication to this database. passwd : `str`, default: `None` Your password for authentication to this database. .. note:: `user` and `passwd` should be given together, otherwise they will be ignored and values will be resolved from the ``GRAVITYSPY_DATABASE_USER`` and ``GRAVITYSPY_DATABASE_PASSWD`` environment variables. Returns ------- conn_string : `str` A SQLAlchemy engine compliant connection string """ if (not user) or (not passwd): user = os.getenv('GRAVITYSPY_DATABASE_USER', None) passwd = os.getenv('GRAVITYSPY_DATABASE_PASSWD', None) if (not user) or (not passwd): raise ValueError('Remember to either pass ' 'or export GRAVITYSPY_DATABASE_USER ' 'and export GRAVITYSPY_DATABASE_PASSWD in order ' 'to access the Gravity Spy Data: ' 'https://secrets.ligo.org/secrets/144/' ' description is username and secret is password.') return 'postgresql://{0}:{1}@{2}:5432/{3}'.format(user, passwd, host, db)
[ "def", "get_connection_str", "(", "db", "=", "'gravityspy'", ",", "host", "=", "'gravityspy.ciera.northwestern.edu'", ",", "user", "=", "None", ",", "passwd", "=", "None", ")", ":", "if", "(", "not", "user", ")", "or", "(", "not", "passwd", ")", ":", "us...
Create string to pass to create_engine Parameters ---------- db : `str`, default: ``gravityspy`` The name of the SQL database your connecting to. host : `str`, default: ``gravityspy.ciera.northwestern.edu`` The name of the server the database you are connecting to lives on. user : `str`, default: `None` Your username for authentication to this database. passwd : `str`, default: `None` Your password for authentication to this database. .. note:: `user` and `passwd` should be given together, otherwise they will be ignored and values will be resolved from the ``GRAVITYSPY_DATABASE_USER`` and ``GRAVITYSPY_DATABASE_PASSWD`` environment variables. Returns ------- conn_string : `str` A SQLAlchemy engine compliant connection string
[ "Create", "string", "to", "pass", "to", "create_engine" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/gravityspy.py#L93-L138
train
211,433
gwpy/gwpy
gwpy/detector/__init__.py
get_timezone_offset
def get_timezone_offset(ifo, dt=None): """Return the offset in seconds between UTC and the given interferometer Parameters ---------- ifo : `str` prefix of interferometer, e.g. ``'X1'`` dt : `datetime.datetime`, optional the time at which to calculate the offset, defaults to now Returns ------- offset : `int` the offset in seconds between the timezone of the interferometer and UTC """ import pytz dt = dt or datetime.datetime.now() offset = pytz.timezone(get_timezone(ifo)).utcoffset(dt) return offset.days * 86400 + offset.seconds + offset.microseconds * 1e-6
python
def get_timezone_offset(ifo, dt=None): """Return the offset in seconds between UTC and the given interferometer Parameters ---------- ifo : `str` prefix of interferometer, e.g. ``'X1'`` dt : `datetime.datetime`, optional the time at which to calculate the offset, defaults to now Returns ------- offset : `int` the offset in seconds between the timezone of the interferometer and UTC """ import pytz dt = dt or datetime.datetime.now() offset = pytz.timezone(get_timezone(ifo)).utcoffset(dt) return offset.days * 86400 + offset.seconds + offset.microseconds * 1e-6
[ "def", "get_timezone_offset", "(", "ifo", ",", "dt", "=", "None", ")", ":", "import", "pytz", "dt", "=", "dt", "or", "datetime", ".", "datetime", ".", "now", "(", ")", "offset", "=", "pytz", ".", "timezone", "(", "get_timezone", "(", "ifo", ")", ")",...
Return the offset in seconds between UTC and the given interferometer Parameters ---------- ifo : `str` prefix of interferometer, e.g. ``'X1'`` dt : `datetime.datetime`, optional the time at which to calculate the offset, defaults to now Returns ------- offset : `int` the offset in seconds between the timezone of the interferometer and UTC
[ "Return", "the", "offset", "in", "seconds", "between", "UTC", "and", "the", "given", "interferometer" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/__init__.py#L58-L78
train
211,434
gwpy/gwpy
gwpy/signal/spectral/_ui.py
normalize_fft_params
def normalize_fft_params(series, kwargs=None, func=None): """Normalize a set of FFT parameters for processing This method reads the ``fftlength`` and ``overlap`` keyword arguments (presumed to be values in seconds), works out sensible defaults, then updates ``kwargs`` in place to include ``nfft`` and ``noverlap`` as values in sample counts. If a ``window`` is given, the ``noverlap`` parameter will be set to the recommended overlap for that window type, if ``overlap`` is not given. If a ``window`` is given as a `str`, it will be converted to a `numpy.ndarray` containing the correct window (of the correct length). Parameters ---------- series : `gwpy.timeseries.TimeSeries` the data that will be processed using an FFT-based method kwargs : `dict` the dict of keyword arguments passed by the user func : `callable`, optional the FFT method that will be called Examples -------- >>> from numpy.random import normal >>> from gwpy.timeseries import TimeSeries >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256)) {'nfft': 1024, 'noverlap': 0} >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256), ... {'window': 'hann'}) {'window': array([ 0.00000000e+00, 9.41235870e-06, ..., 3.76490804e-05, 9.41235870e-06]), 'noverlap': 0, 'nfft': 1024} """ # parse keywords if kwargs is None: kwargs = dict() samp = series.sample_rate fftlength = kwargs.pop('fftlength', None) or series.duration overlap = kwargs.pop('overlap', None) window = kwargs.pop('window', None) # parse function library and name if func is None: method = library = None else: method = func.__name__ library = _fft_library(func) # fftlength -> nfft nfft = seconds_to_samples(fftlength, samp) # overlap -> noverlap noverlap = _normalize_overlap(overlap, window, nfft, samp, method=method) # create window window = _normalize_window(window, nfft, library, series.dtype) if window is not None: # allow FFT methods to use their own defaults kwargs['window'] = window # create FFT plan for LAL if library == 'lal' and kwargs.get('plan', None) is None: from ._lal import generate_fft_plan kwargs['plan'] = generate_fft_plan(nfft, dtype=series.dtype) kwargs.update({ 'nfft': nfft, 'noverlap': noverlap, }) return kwargs
python
def normalize_fft_params(series, kwargs=None, func=None): """Normalize a set of FFT parameters for processing This method reads the ``fftlength`` and ``overlap`` keyword arguments (presumed to be values in seconds), works out sensible defaults, then updates ``kwargs`` in place to include ``nfft`` and ``noverlap`` as values in sample counts. If a ``window`` is given, the ``noverlap`` parameter will be set to the recommended overlap for that window type, if ``overlap`` is not given. If a ``window`` is given as a `str`, it will be converted to a `numpy.ndarray` containing the correct window (of the correct length). Parameters ---------- series : `gwpy.timeseries.TimeSeries` the data that will be processed using an FFT-based method kwargs : `dict` the dict of keyword arguments passed by the user func : `callable`, optional the FFT method that will be called Examples -------- >>> from numpy.random import normal >>> from gwpy.timeseries import TimeSeries >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256)) {'nfft': 1024, 'noverlap': 0} >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256), ... {'window': 'hann'}) {'window': array([ 0.00000000e+00, 9.41235870e-06, ..., 3.76490804e-05, 9.41235870e-06]), 'noverlap': 0, 'nfft': 1024} """ # parse keywords if kwargs is None: kwargs = dict() samp = series.sample_rate fftlength = kwargs.pop('fftlength', None) or series.duration overlap = kwargs.pop('overlap', None) window = kwargs.pop('window', None) # parse function library and name if func is None: method = library = None else: method = func.__name__ library = _fft_library(func) # fftlength -> nfft nfft = seconds_to_samples(fftlength, samp) # overlap -> noverlap noverlap = _normalize_overlap(overlap, window, nfft, samp, method=method) # create window window = _normalize_window(window, nfft, library, series.dtype) if window is not None: # allow FFT methods to use their own defaults kwargs['window'] = window # create FFT plan for LAL if library == 'lal' and kwargs.get('plan', None) is None: from ._lal import generate_fft_plan kwargs['plan'] = generate_fft_plan(nfft, dtype=series.dtype) kwargs.update({ 'nfft': nfft, 'noverlap': noverlap, }) return kwargs
[ "def", "normalize_fft_params", "(", "series", ",", "kwargs", "=", "None", ",", "func", "=", "None", ")", ":", "# parse keywords", "if", "kwargs", "is", "None", ":", "kwargs", "=", "dict", "(", ")", "samp", "=", "series", ".", "sample_rate", "fftlength", ...
Normalize a set of FFT parameters for processing This method reads the ``fftlength`` and ``overlap`` keyword arguments (presumed to be values in seconds), works out sensible defaults, then updates ``kwargs`` in place to include ``nfft`` and ``noverlap`` as values in sample counts. If a ``window`` is given, the ``noverlap`` parameter will be set to the recommended overlap for that window type, if ``overlap`` is not given. If a ``window`` is given as a `str`, it will be converted to a `numpy.ndarray` containing the correct window (of the correct length). Parameters ---------- series : `gwpy.timeseries.TimeSeries` the data that will be processed using an FFT-based method kwargs : `dict` the dict of keyword arguments passed by the user func : `callable`, optional the FFT method that will be called Examples -------- >>> from numpy.random import normal >>> from gwpy.timeseries import TimeSeries >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256)) {'nfft': 1024, 'noverlap': 0} >>> normalize_fft_params(TimeSeries(normal(size=1024), sample_rate=256), ... {'window': 'hann'}) {'window': array([ 0.00000000e+00, 9.41235870e-06, ..., 3.76490804e-05, 9.41235870e-06]), 'noverlap': 0, 'nfft': 1024}
[ "Normalize", "a", "set", "of", "FFT", "parameters", "for", "processing" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L76-L147
train
211,435
gwpy/gwpy
gwpy/signal/spectral/_ui.py
_normalize_overlap
def _normalize_overlap(overlap, window, nfft, samp, method='welch'): """Normalise an overlap in physical units to a number of samples Parameters ---------- overlap : `float`, `Quantity`, `None` the overlap in some physical unit (seconds) window : `str` the name of the window function that will be used, only used if `overlap=None` is given nfft : `int` the number of samples that will be used in the fast Fourier transform samp : `Quantity` the sampling rate (Hz) of the data that will be transformed method : `str` the name of the averaging method, default: `'welch'`, only used to return `0` for `'bartlett'` averaging Returns ------- noverlap : `int` the number of samples to be be used for the overlap """ if method == 'bartlett': return 0 if overlap is None and isinstance(window, string_types): return recommended_overlap(window, nfft) if overlap is None: return 0 return seconds_to_samples(overlap, samp)
python
def _normalize_overlap(overlap, window, nfft, samp, method='welch'): """Normalise an overlap in physical units to a number of samples Parameters ---------- overlap : `float`, `Quantity`, `None` the overlap in some physical unit (seconds) window : `str` the name of the window function that will be used, only used if `overlap=None` is given nfft : `int` the number of samples that will be used in the fast Fourier transform samp : `Quantity` the sampling rate (Hz) of the data that will be transformed method : `str` the name of the averaging method, default: `'welch'`, only used to return `0` for `'bartlett'` averaging Returns ------- noverlap : `int` the number of samples to be be used for the overlap """ if method == 'bartlett': return 0 if overlap is None and isinstance(window, string_types): return recommended_overlap(window, nfft) if overlap is None: return 0 return seconds_to_samples(overlap, samp)
[ "def", "_normalize_overlap", "(", "overlap", ",", "window", ",", "nfft", ",", "samp", ",", "method", "=", "'welch'", ")", ":", "if", "method", "==", "'bartlett'", ":", "return", "0", "if", "overlap", "is", "None", "and", "isinstance", "(", "window", ",",...
Normalise an overlap in physical units to a number of samples Parameters ---------- overlap : `float`, `Quantity`, `None` the overlap in some physical unit (seconds) window : `str` the name of the window function that will be used, only used if `overlap=None` is given nfft : `int` the number of samples that will be used in the fast Fourier transform samp : `Quantity` the sampling rate (Hz) of the data that will be transformed method : `str` the name of the averaging method, default: `'welch'`, only used to return `0` for `'bartlett'` averaging Returns ------- noverlap : `int` the number of samples to be be used for the overlap
[ "Normalise", "an", "overlap", "in", "physical", "units", "to", "a", "number", "of", "samples" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L150-L184
train
211,436
gwpy/gwpy
gwpy/signal/spectral/_ui.py
_normalize_window
def _normalize_window(window, nfft, library, dtype): """Normalise a window specification for a PSD calculation Parameters ---------- window : `str`, `numpy.ndarray`, `None` the input window specification nfft : `int` the length of the Fourier transform, in samples library : `str` the name of the library that provides the PSD routine dtype : `type` the required type of the window array, only used if `library='lal'` is given Returns ------- window : `numpy.ndarray`, `lal.REAL8Window` a numpy-, or `LAL`-format window array """ if library == '_lal' and isinstance(window, numpy.ndarray): from ._lal import window_from_array return window_from_array(window) if library == '_lal': from ._lal import generate_window return generate_window(nfft, window=window, dtype=dtype) if isinstance(window, string_types): window = canonical_name(window) if isinstance(window, string_types + (tuple,)): return get_window(window, nfft) return None
python
def _normalize_window(window, nfft, library, dtype): """Normalise a window specification for a PSD calculation Parameters ---------- window : `str`, `numpy.ndarray`, `None` the input window specification nfft : `int` the length of the Fourier transform, in samples library : `str` the name of the library that provides the PSD routine dtype : `type` the required type of the window array, only used if `library='lal'` is given Returns ------- window : `numpy.ndarray`, `lal.REAL8Window` a numpy-, or `LAL`-format window array """ if library == '_lal' and isinstance(window, numpy.ndarray): from ._lal import window_from_array return window_from_array(window) if library == '_lal': from ._lal import generate_window return generate_window(nfft, window=window, dtype=dtype) if isinstance(window, string_types): window = canonical_name(window) if isinstance(window, string_types + (tuple,)): return get_window(window, nfft) return None
[ "def", "_normalize_window", "(", "window", ",", "nfft", ",", "library", ",", "dtype", ")", ":", "if", "library", "==", "'_lal'", "and", "isinstance", "(", "window", ",", "numpy", ".", "ndarray", ")", ":", "from", ".", "_lal", "import", "window_from_array",...
Normalise a window specification for a PSD calculation Parameters ---------- window : `str`, `numpy.ndarray`, `None` the input window specification nfft : `int` the length of the Fourier transform, in samples library : `str` the name of the library that provides the PSD routine dtype : `type` the required type of the window array, only used if `library='lal'` is given Returns ------- window : `numpy.ndarray`, `lal.REAL8Window` a numpy-, or `LAL`-format window array
[ "Normalise", "a", "window", "specification", "for", "a", "PSD", "calculation" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L187-L220
train
211,437
gwpy/gwpy
gwpy/signal/spectral/_ui.py
set_fft_params
def set_fft_params(func): """Decorate a method to automatically convert quantities to samples """ @wraps(func) def wrapped_func(series, method_func, *args, **kwargs): """Wrap function to normalize FFT params before execution """ if isinstance(series, tuple): data = series[0] else: data = series # normalise FFT parmeters for all libraries normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
python
def set_fft_params(func): """Decorate a method to automatically convert quantities to samples """ @wraps(func) def wrapped_func(series, method_func, *args, **kwargs): """Wrap function to normalize FFT params before execution """ if isinstance(series, tuple): data = series[0] else: data = series # normalise FFT parmeters for all libraries normalize_fft_params(data, kwargs=kwargs, func=method_func) return func(series, method_func, *args, **kwargs) return wrapped_func
[ "def", "set_fft_params", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "series", ",", "method_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap function to normalize FFT params before execution\n \"...
Decorate a method to automatically convert quantities to samples
[ "Decorate", "a", "method", "to", "automatically", "convert", "quantities", "to", "samples" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L223-L240
train
211,438
gwpy/gwpy
gwpy/signal/spectral/_ui.py
psd
def psd(timeseries, method_func, *args, **kwargs): """Generate a PSD using a method function All arguments are presumed to be given in physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling """ # decorator has translated the arguments for us, so just call psdn() return _psdn(timeseries, method_func, *args, **kwargs)
python
def psd(timeseries, method_func, *args, **kwargs): """Generate a PSD using a method function All arguments are presumed to be given in physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling """ # decorator has translated the arguments for us, so just call psdn() return _psdn(timeseries, method_func, *args, **kwargs)
[ "def", "psd", "(", "timeseries", ",", "method_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# decorator has translated the arguments for us, so just call psdn()", "return", "_psdn", "(", "timeseries", ",", "method_func", ",", "*", "args", ",", "*", ...
Generate a PSD using a method function All arguments are presumed to be given in physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling
[ "Generate", "a", "PSD", "using", "a", "method", "function" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L246-L263
train
211,439
gwpy/gwpy
gwpy/signal/spectral/_ui.py
_psdn
def _psdn(timeseries, method_func, *args, **kwargs): """Generate a PSD using a method function with FFT arguments in samples All arguments are presumed to be in sample counts, not physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling """ # unpack tuple of timeseries for cross spectrum try: timeseries, other = timeseries # or just calculate PSD except ValueError: return method_func(timeseries, kwargs.pop('nfft'), *args, **kwargs) else: return method_func(timeseries, other, kwargs.pop('nfft'), *args, **kwargs)
python
def _psdn(timeseries, method_func, *args, **kwargs): """Generate a PSD using a method function with FFT arguments in samples All arguments are presumed to be in sample counts, not physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling """ # unpack tuple of timeseries for cross spectrum try: timeseries, other = timeseries # or just calculate PSD except ValueError: return method_func(timeseries, kwargs.pop('nfft'), *args, **kwargs) else: return method_func(timeseries, other, kwargs.pop('nfft'), *args, **kwargs)
[ "def", "_psdn", "(", "timeseries", ",", "method_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# unpack tuple of timeseries for cross spectrum", "try", ":", "timeseries", ",", "other", "=", "timeseries", "# or just calculate PSD", "except", "ValueError...
Generate a PSD using a method function with FFT arguments in samples All arguments are presumed to be in sample counts, not physical units Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries`, `tuple` the data to process, or a 2-tuple of series to correlate method_func : `callable` the function that will be called to perform the signal processing *args, **kwargs other arguments to pass to ``method_func`` when calling
[ "Generate", "a", "PSD", "using", "a", "method", "function", "with", "FFT", "arguments", "in", "samples" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L266-L290
train
211,440
gwpy/gwpy
gwpy/signal/spectral/_ui.py
average_spectrogram
def average_spectrogram(timeseries, method_func, stride, *args, **kwargs): """Generate an average spectrogram using a method function Each time bin of the resulting spectrogram is a PSD generated using the method_func """ # unpack CSD TimeSeries pair, or single timeseries try: timeseries, other = timeseries except ValueError: timeseries = timeseries other = None from ...spectrogram import Spectrogram nproc = kwargs.pop('nproc', 1) # get params epoch = timeseries.t0.value nstride = seconds_to_samples(stride, timeseries.sample_rate) kwargs['fftlength'] = kwargs.pop('fftlength', stride) or stride normalize_fft_params(timeseries, kwargs=kwargs, func=method_func) nfft = kwargs['nfft'] noverlap = kwargs['noverlap'] # sanity check parameters if nstride > timeseries.size: raise ValueError("stride cannot be greater than the duration of " "this TimeSeries") if nfft > nstride: raise ValueError("fftlength cannot be greater than stride") if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): """Calculate a single PSD for a spectrogram """ psd_ = _psdn(series, method_func, *args, **kwargs) del psd_.epoch # fixes Segmentation fault (no idea why it faults) return psd_ # define chunks tschunks = _chunk_timeseries(timeseries, nstride, noverlap) if other is not None: otherchunks = _chunk_timeseries(other, nstride, noverlap) tschunks = zip(tschunks, otherchunks) # calculate PSDs psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # recombobulate PSDs into a spectrogram return Spectrogram.from_spectra(*psds, epoch=epoch, dt=stride)
python
def average_spectrogram(timeseries, method_func, stride, *args, **kwargs): """Generate an average spectrogram using a method function Each time bin of the resulting spectrogram is a PSD generated using the method_func """ # unpack CSD TimeSeries pair, or single timeseries try: timeseries, other = timeseries except ValueError: timeseries = timeseries other = None from ...spectrogram import Spectrogram nproc = kwargs.pop('nproc', 1) # get params epoch = timeseries.t0.value nstride = seconds_to_samples(stride, timeseries.sample_rate) kwargs['fftlength'] = kwargs.pop('fftlength', stride) or stride normalize_fft_params(timeseries, kwargs=kwargs, func=method_func) nfft = kwargs['nfft'] noverlap = kwargs['noverlap'] # sanity check parameters if nstride > timeseries.size: raise ValueError("stride cannot be greater than the duration of " "this TimeSeries") if nfft > nstride: raise ValueError("fftlength cannot be greater than stride") if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): """Calculate a single PSD for a spectrogram """ psd_ = _psdn(series, method_func, *args, **kwargs) del psd_.epoch # fixes Segmentation fault (no idea why it faults) return psd_ # define chunks tschunks = _chunk_timeseries(timeseries, nstride, noverlap) if other is not None: otherchunks = _chunk_timeseries(other, nstride, noverlap) tschunks = zip(tschunks, otherchunks) # calculate PSDs psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # recombobulate PSDs into a spectrogram return Spectrogram.from_spectra(*psds, epoch=epoch, dt=stride)
[ "def", "average_spectrogram", "(", "timeseries", ",", "method_func", ",", "stride", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# unpack CSD TimeSeries pair, or single timeseries", "try", ":", "timeseries", ",", "other", "=", "timeseries", "except", "Val...
Generate an average spectrogram using a method function Each time bin of the resulting spectrogram is a PSD generated using the method_func
[ "Generate", "an", "average", "spectrogram", "using", "a", "method", "function" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L293-L345
train
211,441
gwpy/gwpy
gwpy/signal/spectral/_ui.py
spectrogram
def spectrogram(timeseries, method_func, **kwargs): """Generate a spectrogram using a method function Each time bin of the resulting spectrogram is a PSD estimate using a single FFT """ from ...spectrogram import Spectrogram # get params sampling = timeseries.sample_rate.to('Hz').value nproc = kwargs.pop('nproc', 1) nfft = kwargs.pop('nfft') noverlap = kwargs.pop('noverlap') nstride = nfft - noverlap # sanity check parameters if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): """Calculate a single PSD for a spectrogram """ return method_func(series, nfft=nfft, **kwargs)[1] # define chunks chunks = [] x = 0 while x + nfft <= timeseries.size: y = min(timeseries.size, x + nfft) chunks.append((x, y)) x += nstride tschunks = (timeseries.value[i:j] for i, j in chunks) # calculate PSDs with multiprocessing psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # convert PSDs to array with spacing for averages numtimes = 1 + int((timeseries.size - nstride) / nstride) numfreqs = int(nfft / 2 + 1) data = numpy.zeros((numtimes, numfreqs), dtype=timeseries.dtype) data[:len(psds)] = psds # create output spectrogram unit = fft_utils.scale_timeseries_unit( timeseries.unit, scaling=kwargs.get('scaling', 'density')) out = Spectrogram(numpy.empty((numtimes, numfreqs), dtype=timeseries.dtype), copy=False, dt=nstride * timeseries.dt, t0=timeseries.t0, f0=0, df=sampling/nfft, unit=unit, name=timeseries.name, channel=timeseries.channel) # normalize over-dense grid density = nfft // nstride weights = get_window('triangle', density) for i in range(numtimes): # get indices of overlapping columns x = max(0, i+1-density) y = min(i+1, numtimes-density+1) if x == 0: wgt = weights[-y:] elif y == numtimes - density + 1: wgt = weights[:y-x] else: wgt = weights # calculate weighted average out.value[i, :] = numpy.average(data[x:y], axis=0, weights=wgt) return out
python
def spectrogram(timeseries, method_func, **kwargs): """Generate a spectrogram using a method function Each time bin of the resulting spectrogram is a PSD estimate using a single FFT """ from ...spectrogram import Spectrogram # get params sampling = timeseries.sample_rate.to('Hz').value nproc = kwargs.pop('nproc', 1) nfft = kwargs.pop('nfft') noverlap = kwargs.pop('noverlap') nstride = nfft - noverlap # sanity check parameters if noverlap >= nfft: raise ValueError("overlap must be less than fftlength") # set up single process Spectrogram method def _psd(series): """Calculate a single PSD for a spectrogram """ return method_func(series, nfft=nfft, **kwargs)[1] # define chunks chunks = [] x = 0 while x + nfft <= timeseries.size: y = min(timeseries.size, x + nfft) chunks.append((x, y)) x += nstride tschunks = (timeseries.value[i:j] for i, j in chunks) # calculate PSDs with multiprocessing psds = mp_utils.multiprocess_with_queues(nproc, _psd, tschunks) # convert PSDs to array with spacing for averages numtimes = 1 + int((timeseries.size - nstride) / nstride) numfreqs = int(nfft / 2 + 1) data = numpy.zeros((numtimes, numfreqs), dtype=timeseries.dtype) data[:len(psds)] = psds # create output spectrogram unit = fft_utils.scale_timeseries_unit( timeseries.unit, scaling=kwargs.get('scaling', 'density')) out = Spectrogram(numpy.empty((numtimes, numfreqs), dtype=timeseries.dtype), copy=False, dt=nstride * timeseries.dt, t0=timeseries.t0, f0=0, df=sampling/nfft, unit=unit, name=timeseries.name, channel=timeseries.channel) # normalize over-dense grid density = nfft // nstride weights = get_window('triangle', density) for i in range(numtimes): # get indices of overlapping columns x = max(0, i+1-density) y = min(i+1, numtimes-density+1) if x == 0: wgt = weights[-y:] elif y == numtimes - density + 1: wgt = weights[:y-x] else: wgt = weights # calculate weighted average out.value[i, :] = numpy.average(data[x:y], axis=0, weights=wgt) return out
[ "def", "spectrogram", "(", "timeseries", ",", "method_func", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", ".", "spectrogram", "import", "Spectrogram", "# get params", "sampling", "=", "timeseries", ".", "sample_rate", ".", "to", "(", "'Hz'", ")", "...
Generate a spectrogram using a method function Each time bin of the resulting spectrogram is a PSD estimate using a single FFT
[ "Generate", "a", "spectrogram", "using", "a", "method", "function" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_ui.py#L349-L418
train
211,442
gwpy/gwpy
gwpy/cli/spectrogram.py
Spectrogram.get_color_label
def get_color_label(self): """Text for colorbar label """ if self.args.norm: return 'Normalized to {}'.format(self.args.norm) if len(self.units) == 1 and self.usetex: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) elif len(self.units) == 1: return 'ASD ({0})'.format(self.units[0].to_string('generic')) return super(Spectrogram, self).get_color_label()
python
def get_color_label(self): """Text for colorbar label """ if self.args.norm: return 'Normalized to {}'.format(self.args.norm) if len(self.units) == 1 and self.usetex: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) elif len(self.units) == 1: return 'ASD ({0})'.format(self.units[0].to_string('generic')) return super(Spectrogram, self).get_color_label()
[ "def", "get_color_label", "(", "self", ")", ":", "if", "self", ".", "args", ".", "norm", ":", "return", "'Normalized to {}'", ".", "format", "(", "self", ".", "args", ".", "norm", ")", "if", "len", "(", "self", ".", "units", ")", "==", "1", "and", ...
Text for colorbar label
[ "Text", "for", "colorbar", "label" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/spectrogram.py#L64-L74
train
211,443
gwpy/gwpy
gwpy/cli/spectrogram.py
Spectrogram.get_stride
def get_stride(self): """Calculate the stride for the spectrogram This method returns the stride as a `float`, or `None` to indicate selected usage of `TimeSeries.spectrogram2`. """ fftlength = float(self.args.secpfft) overlap = fftlength * self.args.overlap stride = fftlength - overlap nfft = self.duration / stride # number of FFTs ffps = int(nfft / (self.width * 0.8)) # FFTs per second if ffps > 3: return max(2 * fftlength, ffps * stride + fftlength - 1) return None
python
def get_stride(self): """Calculate the stride for the spectrogram This method returns the stride as a `float`, or `None` to indicate selected usage of `TimeSeries.spectrogram2`. """ fftlength = float(self.args.secpfft) overlap = fftlength * self.args.overlap stride = fftlength - overlap nfft = self.duration / stride # number of FFTs ffps = int(nfft / (self.width * 0.8)) # FFTs per second if ffps > 3: return max(2 * fftlength, ffps * stride + fftlength - 1) return None
[ "def", "get_stride", "(", "self", ")", ":", "fftlength", "=", "float", "(", "self", ".", "args", ".", "secpfft", ")", "overlap", "=", "fftlength", "*", "self", ".", "args", ".", "overlap", "stride", "=", "fftlength", "-", "overlap", "nfft", "=", "self"...
Calculate the stride for the spectrogram This method returns the stride as a `float`, or `None` to indicate selected usage of `TimeSeries.spectrogram2`.
[ "Calculate", "the", "stride", "for", "the", "spectrogram" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/spectrogram.py#L76-L89
train
211,444
gwpy/gwpy
gwpy/cli/spectrogram.py
Spectrogram.get_spectrogram
def get_spectrogram(self): """Calculate the spectrogram to be plotted This exists as a separate method to allow subclasses to override this and not the entire `get_plot` method, e.g. `Coherencegram`. This method should not apply the normalisation from `args.norm`. """ args = self.args fftlength = float(args.secpfft) overlap = fftlength * args.overlap self.log(2, "Calculating spectrogram secpfft: %s, overlap: %s" % (fftlength, overlap)) stride = self.get_stride() if stride: specgram = self.timeseries[0].spectrogram( stride, fftlength=fftlength, overlap=overlap, window=args.window) nfft = stride * (stride // (fftlength - overlap)) self.log(3, 'Spectrogram calc, stride: %s, fftlength: %s, ' 'overlap: %sf, #fft: %d' % (stride, fftlength, overlap, nfft)) else: specgram = self.timeseries[0].spectrogram2( fftlength=fftlength, overlap=overlap, window=args.window) nfft = specgram.shape[0] self.log(3, 'HR-Spectrogram calc, fftlength: %s, overlap: %s, ' '#fft: %d' % (fftlength, overlap, nfft)) return specgram ** (1/2.)
python
def get_spectrogram(self): """Calculate the spectrogram to be plotted This exists as a separate method to allow subclasses to override this and not the entire `get_plot` method, e.g. `Coherencegram`. This method should not apply the normalisation from `args.norm`. """ args = self.args fftlength = float(args.secpfft) overlap = fftlength * args.overlap self.log(2, "Calculating spectrogram secpfft: %s, overlap: %s" % (fftlength, overlap)) stride = self.get_stride() if stride: specgram = self.timeseries[0].spectrogram( stride, fftlength=fftlength, overlap=overlap, window=args.window) nfft = stride * (stride // (fftlength - overlap)) self.log(3, 'Spectrogram calc, stride: %s, fftlength: %s, ' 'overlap: %sf, #fft: %d' % (stride, fftlength, overlap, nfft)) else: specgram = self.timeseries[0].spectrogram2( fftlength=fftlength, overlap=overlap, window=args.window) nfft = specgram.shape[0] self.log(3, 'HR-Spectrogram calc, fftlength: %s, overlap: %s, ' '#fft: %d' % (fftlength, overlap, nfft)) return specgram ** (1/2.)
[ "def", "get_spectrogram", "(", "self", ")", ":", "args", "=", "self", ".", "args", "fftlength", "=", "float", "(", "args", ".", "secpfft", ")", "overlap", "=", "fftlength", "*", "args", ".", "overlap", "self", ".", "log", "(", "2", ",", "\"Calculating ...
Calculate the spectrogram to be plotted This exists as a separate method to allow subclasses to override this and not the entire `get_plot` method, e.g. `Coherencegram`. This method should not apply the normalisation from `args.norm`.
[ "Calculate", "the", "spectrogram", "to", "be", "plotted" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/spectrogram.py#L91-L123
train
211,445
gwpy/gwpy
gwpy/cli/spectrum.py
Spectrum.get_ylabel
def get_ylabel(self): """Text for y-axis label """ if len(self.units) == 1: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) return 'ASD'
python
def get_ylabel(self): """Text for y-axis label """ if len(self.units) == 1: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) return 'ASD'
[ "def", "get_ylabel", "(", "self", ")", ":", "if", "len", "(", "self", ".", "units", ")", "==", "1", ":", "return", "r'ASD $\\left({0}\\right)$'", ".", "format", "(", "self", ".", "units", "[", "0", "]", ".", "to_string", "(", "'latex'", ")", ".", "st...
Text for y-axis label
[ "Text", "for", "y", "-", "axis", "label" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/spectrum.py#L60-L66
train
211,446
gwpy/gwpy
gwpy/timeseries/io/core.py
read
def read(cls, source, *args, **kwargs): """Read data from a source into a `gwpy.timeseries` object. This method is just the internal worker for `TimeSeries.read`, and `TimeSeriesDict.read`, and isn't meant to be called directly. """ # if reading a cache, read it now and sieve if io_cache.is_cache(source): from .cache import preformat_cache source = preformat_cache(source, *args[1:], start=kwargs.get('start'), end=kwargs.get('end')) # get join arguments pad = kwargs.pop('pad', None) gap = kwargs.pop('gap', 'raise' if pad is None else 'pad') joiner = _join_factory(cls, gap, pad) # read return io_read_multi(joiner, cls, source, *args, **kwargs)
python
def read(cls, source, *args, **kwargs): """Read data from a source into a `gwpy.timeseries` object. This method is just the internal worker for `TimeSeries.read`, and `TimeSeriesDict.read`, and isn't meant to be called directly. """ # if reading a cache, read it now and sieve if io_cache.is_cache(source): from .cache import preformat_cache source = preformat_cache(source, *args[1:], start=kwargs.get('start'), end=kwargs.get('end')) # get join arguments pad = kwargs.pop('pad', None) gap = kwargs.pop('gap', 'raise' if pad is None else 'pad') joiner = _join_factory(cls, gap, pad) # read return io_read_multi(joiner, cls, source, *args, **kwargs)
[ "def", "read", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# if reading a cache, read it now and sieve", "if", "io_cache", ".", "is_cache", "(", "source", ")", ":", "from", ".", "cache", "import", "preformat_cache", "sour...
Read data from a source into a `gwpy.timeseries` object. This method is just the internal worker for `TimeSeries.read`, and `TimeSeriesDict.read`, and isn't meant to be called directly.
[ "Read", "data", "from", "a", "source", "into", "a", "gwpy", ".", "timeseries", "object", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/core.py#L26-L45
train
211,447
gwpy/gwpy
gwpy/timeseries/io/core.py
_join_factory
def _join_factory(cls, gap, pad): """Build a joiner for the given cls, and the given padding options """ if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
python
def _join_factory(cls, gap, pad): """Build a joiner for the given cls, and the given padding options """ if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
[ "def", "_join_factory", "(", "cls", ",", "gap", ",", "pad", ")", ":", "if", "issubclass", "(", "cls", ",", "dict", ")", ":", "def", "_join", "(", "data", ")", ":", "out", "=", "cls", "(", ")", "data", "=", "list", "(", "data", ")", "while", "da...
Build a joiner for the given cls, and the given padding options
[ "Build", "a", "joiner", "for", "the", "given", "cls", "and", "the", "given", "padding", "options" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/core.py#L48-L66
train
211,448
gwpy/gwpy
gwpy/table/io/pycbc.py
table_from_file
def table_from_file(source, ifo=None, columns=None, selection=None, loudest=False, extended_metadata=True): """Read a `Table` from a PyCBC live HDF5 file Parameters ---------- source : `str`, `h5py.File`, `h5py.Group` the file path of open `h5py` object from which to read the data ifo : `str`, optional the interferometer prefix (e.g. ``'G1'``) to read; this is required if reading from a file path or `h5py.File` and the containing file stores data for multiple interferometers columns : `list` or `str`, optional the list of column names to read, defaults to all in group loudest : `bool`, optional read only those events marked as 'loudest', default: `False` (read all) extended_metadata : `bool`, optional record non-column datasets found in the H5 group (e.g. ``'psd'``) in the ``meta`` dict, default: `True` Returns ------- table : `~gwpy.table.EventTable` """ # find group if isinstance(source, h5py.File): source, ifo = _find_table_group(source, ifo=ifo) # -- by this point 'source' is guaranteed to be an h5py.Group # parse default columns if columns is None: columns = list(_get_columns(source)) readcols = set(columns) # parse selections selection = parse_column_filters(selection or []) if selection: readcols.update(list(zip(*selection))[0]) # set up meta dict meta = {'ifo': ifo} meta.update(source.attrs) if extended_metadata: meta.update(_get_extended_metadata(source)) if loudest: loudidx = source['loudest'][:] # map data to columns data = [] for name in readcols: # convert hdf5 dataset into Column try: arr = source[name][:] except KeyError: if name in GET_COLUMN: arr = GET_COLUMN[name](source) else: raise if loudest: arr = arr[loudidx] data.append(Table.Column(arr, name=name)) # read, applying selection filters, and column filters return filter_table(Table(data, meta=meta), selection)[columns]
python
def table_from_file(source, ifo=None, columns=None, selection=None, loudest=False, extended_metadata=True): """Read a `Table` from a PyCBC live HDF5 file Parameters ---------- source : `str`, `h5py.File`, `h5py.Group` the file path of open `h5py` object from which to read the data ifo : `str`, optional the interferometer prefix (e.g. ``'G1'``) to read; this is required if reading from a file path or `h5py.File` and the containing file stores data for multiple interferometers columns : `list` or `str`, optional the list of column names to read, defaults to all in group loudest : `bool`, optional read only those events marked as 'loudest', default: `False` (read all) extended_metadata : `bool`, optional record non-column datasets found in the H5 group (e.g. ``'psd'``) in the ``meta`` dict, default: `True` Returns ------- table : `~gwpy.table.EventTable` """ # find group if isinstance(source, h5py.File): source, ifo = _find_table_group(source, ifo=ifo) # -- by this point 'source' is guaranteed to be an h5py.Group # parse default columns if columns is None: columns = list(_get_columns(source)) readcols = set(columns) # parse selections selection = parse_column_filters(selection or []) if selection: readcols.update(list(zip(*selection))[0]) # set up meta dict meta = {'ifo': ifo} meta.update(source.attrs) if extended_metadata: meta.update(_get_extended_metadata(source)) if loudest: loudidx = source['loudest'][:] # map data to columns data = [] for name in readcols: # convert hdf5 dataset into Column try: arr = source[name][:] except KeyError: if name in GET_COLUMN: arr = GET_COLUMN[name](source) else: raise if loudest: arr = arr[loudidx] data.append(Table.Column(arr, name=name)) # read, applying selection filters, and column filters return filter_table(Table(data, meta=meta), selection)[columns]
[ "def", "table_from_file", "(", "source", ",", "ifo", "=", "None", ",", "columns", "=", "None", ",", "selection", "=", "None", ",", "loudest", "=", "False", ",", "extended_metadata", "=", "True", ")", ":", "# find group", "if", "isinstance", "(", "source", ...
Read a `Table` from a PyCBC live HDF5 file Parameters ---------- source : `str`, `h5py.File`, `h5py.Group` the file path of open `h5py` object from which to read the data ifo : `str`, optional the interferometer prefix (e.g. ``'G1'``) to read; this is required if reading from a file path or `h5py.File` and the containing file stores data for multiple interferometers columns : `list` or `str`, optional the list of column names to read, defaults to all in group loudest : `bool`, optional read only those events marked as 'loudest', default: `False` (read all) extended_metadata : `bool`, optional record non-column datasets found in the H5 group (e.g. ``'psd'``) in the ``meta`` dict, default: `True` Returns ------- table : `~gwpy.table.EventTable`
[ "Read", "a", "Table", "from", "a", "PyCBC", "live", "HDF5", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L44-L114
train
211,449
gwpy/gwpy
gwpy/table/io/pycbc.py
_find_table_group
def _find_table_group(h5file, ifo=None): """Find the right `h5py.Group` within the given `h5py.File` """ exclude = ('background',) if ifo is None: try: ifo, = [key for key in h5file if key not in exclude] except ValueError as exc: exc.args = ("PyCBC live HDF5 file contains dataset groups " "for multiple interferometers, please specify " "the prefix of the relevant interferometer via " "the `ifo` keyword argument, e.g: `ifo=G1`",) raise try: return h5file[ifo], ifo except KeyError as exc: exc.args = ("No group for ifo %r in PyCBC live HDF5 file" % ifo,) raise
python
def _find_table_group(h5file, ifo=None): """Find the right `h5py.Group` within the given `h5py.File` """ exclude = ('background',) if ifo is None: try: ifo, = [key for key in h5file if key not in exclude] except ValueError as exc: exc.args = ("PyCBC live HDF5 file contains dataset groups " "for multiple interferometers, please specify " "the prefix of the relevant interferometer via " "the `ifo` keyword argument, e.g: `ifo=G1`",) raise try: return h5file[ifo], ifo except KeyError as exc: exc.args = ("No group for ifo %r in PyCBC live HDF5 file" % ifo,) raise
[ "def", "_find_table_group", "(", "h5file", ",", "ifo", "=", "None", ")", ":", "exclude", "=", "(", "'background'", ",", ")", "if", "ifo", "is", "None", ":", "try", ":", "ifo", ",", "=", "[", "key", "for", "key", "in", "h5file", "if", "key", "not", ...
Find the right `h5py.Group` within the given `h5py.File`
[ "Find", "the", "right", "h5py", ".", "Group", "within", "the", "given", "h5py", ".", "File" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L117-L134
train
211,450
gwpy/gwpy
gwpy/table/io/pycbc.py
_get_columns
def _get_columns(h5group): """Find valid column names from a PyCBC HDF5 Group Returns a `set` of names. """ columns = set() for name in sorted(h5group): if (not isinstance(h5group[name], h5py.Dataset) or name == 'template_boundaries'): continue if name.endswith('_template') and name[:-9] in columns: continue columns.add(name) return columns - META_COLUMNS
python
def _get_columns(h5group): """Find valid column names from a PyCBC HDF5 Group Returns a `set` of names. """ columns = set() for name in sorted(h5group): if (not isinstance(h5group[name], h5py.Dataset) or name == 'template_boundaries'): continue if name.endswith('_template') and name[:-9] in columns: continue columns.add(name) return columns - META_COLUMNS
[ "def", "_get_columns", "(", "h5group", ")", ":", "columns", "=", "set", "(", ")", "for", "name", "in", "sorted", "(", "h5group", ")", ":", "if", "(", "not", "isinstance", "(", "h5group", "[", "name", "]", ",", "h5py", ".", "Dataset", ")", "or", "na...
Find valid column names from a PyCBC HDF5 Group Returns a `set` of names.
[ "Find", "valid", "column", "names", "from", "a", "PyCBC", "HDF5", "Group" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L137-L150
train
211,451
gwpy/gwpy
gwpy/table/io/pycbc.py
_get_extended_metadata
def _get_extended_metadata(h5group): """Extract the extended metadata for a PyCBC table in HDF5 This method packs non-table-column datasets in the given h5group into a metadata `dict` Returns ------- meta : `dict` the metadata dict """ meta = dict() # get PSD try: psd = h5group['psd'] except KeyError: pass else: from gwpy.frequencyseries import FrequencySeries meta['psd'] = FrequencySeries( psd[:], f0=0, df=psd.attrs['delta_f'], name='pycbc_live') # get everything else for key in META_COLUMNS - {'psd'}: try: value = h5group[key][:] except KeyError: pass else: meta[key] = value return meta
python
def _get_extended_metadata(h5group): """Extract the extended metadata for a PyCBC table in HDF5 This method packs non-table-column datasets in the given h5group into a metadata `dict` Returns ------- meta : `dict` the metadata dict """ meta = dict() # get PSD try: psd = h5group['psd'] except KeyError: pass else: from gwpy.frequencyseries import FrequencySeries meta['psd'] = FrequencySeries( psd[:], f0=0, df=psd.attrs['delta_f'], name='pycbc_live') # get everything else for key in META_COLUMNS - {'psd'}: try: value = h5group[key][:] except KeyError: pass else: meta[key] = value return meta
[ "def", "_get_extended_metadata", "(", "h5group", ")", ":", "meta", "=", "dict", "(", ")", "# get PSD", "try", ":", "psd", "=", "h5group", "[", "'psd'", "]", "except", "KeyError", ":", "pass", "else", ":", "from", "gwpy", ".", "frequencyseries", "import", ...
Extract the extended metadata for a PyCBC table in HDF5 This method packs non-table-column datasets in the given h5group into a metadata `dict` Returns ------- meta : `dict` the metadata dict
[ "Extract", "the", "extended", "metadata", "for", "a", "PyCBC", "table", "in", "HDF5" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L153-L185
train
211,452
gwpy/gwpy
gwpy/table/io/pycbc.py
filter_empty_files
def filter_empty_files(files, ifo=None): """Remove empty PyCBC-HDF5 files from a list Parameters ---------- files : `list` A list of file paths to test. ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- nonempty : `list` the subset of the input ``files`` that are considered not empty See also -------- empty_hdf5_file for details of the 'emptiness' test """ return type(files)([f for f in files if not empty_hdf5_file(f, ifo=ifo)])
python
def filter_empty_files(files, ifo=None): """Remove empty PyCBC-HDF5 files from a list Parameters ---------- files : `list` A list of file paths to test. ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- nonempty : `list` the subset of the input ``files`` that are considered not empty See also -------- empty_hdf5_file for details of the 'emptiness' test """ return type(files)([f for f in files if not empty_hdf5_file(f, ifo=ifo)])
[ "def", "filter_empty_files", "(", "files", ",", "ifo", "=", "None", ")", ":", "return", "type", "(", "files", ")", "(", "[", "f", "for", "f", "in", "files", "if", "not", "empty_hdf5_file", "(", "f", ",", "ifo", "=", "ifo", ")", "]", ")" ]
Remove empty PyCBC-HDF5 files from a list Parameters ---------- files : `list` A list of file paths to test. ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- nonempty : `list` the subset of the input ``files`` that are considered not empty See also -------- empty_hdf5_file for details of the 'emptiness' test
[ "Remove", "empty", "PyCBC", "-", "HDF5", "files", "from", "a", "list" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L188-L210
train
211,453
gwpy/gwpy
gwpy/table/io/pycbc.py
empty_hdf5_file
def empty_hdf5_file(h5f, ifo=None): """Determine whether PyCBC-HDF5 file is empty A file is considered empty if it contains no groups at the base level, or if the ``ifo`` group contains only the ``psd`` dataset. Parameters ---------- h5f : `str` path of the pycbc_live file to test ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- empty : `bool` `True` if the file looks to have no content, otherwise `False` """ # the decorator opens the HDF5 file for us, so h5f is guaranteed to # be an h5py.Group object h5f = h5f.file if list(h5f) == []: return True if ifo is not None and (ifo not in h5f or list(h5f[ifo]) == ['psd']): return True return False
python
def empty_hdf5_file(h5f, ifo=None): """Determine whether PyCBC-HDF5 file is empty A file is considered empty if it contains no groups at the base level, or if the ``ifo`` group contains only the ``psd`` dataset. Parameters ---------- h5f : `str` path of the pycbc_live file to test ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- empty : `bool` `True` if the file looks to have no content, otherwise `False` """ # the decorator opens the HDF5 file for us, so h5f is guaranteed to # be an h5py.Group object h5f = h5f.file if list(h5f) == []: return True if ifo is not None and (ifo not in h5f or list(h5f[ifo]) == ['psd']): return True return False
[ "def", "empty_hdf5_file", "(", "h5f", ",", "ifo", "=", "None", ")", ":", "# the decorator opens the HDF5 file for us, so h5f is guaranteed to", "# be an h5py.Group object", "h5f", "=", "h5f", ".", "file", "if", "list", "(", "h5f", ")", "==", "[", "]", ":", "return...
Determine whether PyCBC-HDF5 file is empty A file is considered empty if it contains no groups at the base level, or if the ``ifo`` group contains only the ``psd`` dataset. Parameters ---------- h5f : `str` path of the pycbc_live file to test ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- empty : `bool` `True` if the file looks to have no content, otherwise `False`
[ "Determine", "whether", "PyCBC", "-", "HDF5", "file", "is", "empty" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L214-L241
train
211,454
gwpy/gwpy
gwpy/table/io/pycbc.py
identify_pycbc_live
def identify_pycbc_live(origin, filepath, fileobj, *args, **kwargs): """Identify a PyCBC Live file as an HDF5 with the correct name """ if identify_hdf5(origin, filepath, fileobj, *args, **kwargs) and ( filepath is not None and PYCBC_FILENAME.match(basename(filepath))): return True return False
python
def identify_pycbc_live(origin, filepath, fileobj, *args, **kwargs): """Identify a PyCBC Live file as an HDF5 with the correct name """ if identify_hdf5(origin, filepath, fileobj, *args, **kwargs) and ( filepath is not None and PYCBC_FILENAME.match(basename(filepath))): return True return False
[ "def", "identify_pycbc_live", "(", "origin", ",", "filepath", ",", "fileobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "identify_hdf5", "(", "origin", ",", "filepath", ",", "fileobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ...
Identify a PyCBC Live file as an HDF5 with the correct name
[ "Identify", "a", "PyCBC", "Live", "file", "as", "an", "HDF5", "with", "the", "correct", "name" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L244-L250
train
211,455
gwpy/gwpy
gwpy/table/io/pycbc.py
get_new_snr
def get_new_snr(h5group, q=6., n=2.): # pylint: disable=invalid-name """Calculate the 'new SNR' column for this PyCBC HDF5 table group """ newsnr = h5group['snr'][:].copy() rchisq = h5group['chisq'][:] idx = numpy.where(rchisq > 1.)[0] newsnr[idx] *= _new_snr_scale(rchisq[idx], q=q, n=n) return newsnr
python
def get_new_snr(h5group, q=6., n=2.): # pylint: disable=invalid-name """Calculate the 'new SNR' column for this PyCBC HDF5 table group """ newsnr = h5group['snr'][:].copy() rchisq = h5group['chisq'][:] idx = numpy.where(rchisq > 1.)[0] newsnr[idx] *= _new_snr_scale(rchisq[idx], q=q, n=n) return newsnr
[ "def", "get_new_snr", "(", "h5group", ",", "q", "=", "6.", ",", "n", "=", "2.", ")", ":", "# pylint: disable=invalid-name", "newsnr", "=", "h5group", "[", "'snr'", "]", "[", ":", "]", ".", "copy", "(", ")", "rchisq", "=", "h5group", "[", "'chisq'", ...
Calculate the 'new SNR' column for this PyCBC HDF5 table group
[ "Calculate", "the", "new", "SNR", "column", "for", "this", "PyCBC", "HDF5", "table", "group" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L271-L278
train
211,456
gwpy/gwpy
gwpy/table/io/pycbc.py
get_mchirp
def get_mchirp(h5group): """Calculate the chipr mass column for this PyCBC HDF5 table group """ mass1 = h5group['mass1'][:] mass2 = h5group['mass2'][:] return (mass1 * mass2) ** (3/5.) / (mass1 + mass2) ** (1/5.)
python
def get_mchirp(h5group): """Calculate the chipr mass column for this PyCBC HDF5 table group """ mass1 = h5group['mass1'][:] mass2 = h5group['mass2'][:] return (mass1 * mass2) ** (3/5.) / (mass1 + mass2) ** (1/5.)
[ "def", "get_mchirp", "(", "h5group", ")", ":", "mass1", "=", "h5group", "[", "'mass1'", "]", "[", ":", "]", "mass2", "=", "h5group", "[", "'mass2'", "]", "[", ":", "]", "return", "(", "mass1", "*", "mass2", ")", "**", "(", "3", "/", "5.", ")", ...
Calculate the chipr mass column for this PyCBC HDF5 table group
[ "Calculate", "the", "chipr", "mass", "column", "for", "this", "PyCBC", "HDF5", "table", "group" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/pycbc.py#L284-L289
train
211,457
gwpy/gwpy
gwpy/types/sliceutils.py
format_nd_slice
def format_nd_slice(item, ndim): """Preformat a getitem argument as an N-tuple """ if not isinstance(item, tuple): item = (item,) return item[:ndim] + (None,) * (ndim - len(item))
python
def format_nd_slice(item, ndim): """Preformat a getitem argument as an N-tuple """ if not isinstance(item, tuple): item = (item,) return item[:ndim] + (None,) * (ndim - len(item))
[ "def", "format_nd_slice", "(", "item", ",", "ndim", ")", ":", "if", "not", "isinstance", "(", "item", ",", "tuple", ")", ":", "item", "=", "(", "item", ",", ")", "return", "item", "[", ":", "ndim", "]", "+", "(", "None", ",", ")", "*", "(", "nd...
Preformat a getitem argument as an N-tuple
[ "Preformat", "a", "getitem", "argument", "as", "an", "N", "-", "tuple" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/sliceutils.py#L31-L36
train
211,458
gwpy/gwpy
gwpy/types/sliceutils.py
slice_axis_attributes
def slice_axis_attributes(old, oldaxis, new, newaxis, slice_): """Set axis metadata for ``new`` by slicing an axis of ``old`` This is primarily for internal use in slice functions (__getitem__) Parameters ---------- old : `Array` array being sliced oldaxis : ``'x'`` or ``'y'`` the axis to slice new : `Array` product of slice newaxis : ``'x'`` or ``'y'`` the target axis slice_ : `slice`, `numpy.ndarray` the slice to apply to old (or an index array) See Also -------- Series.__getitem__ Array2D.__getitem__ """ slice_ = as_slice(slice_) # attribute names index = '{}index'.format origin = '{}0'.format delta = 'd{}'.format # if array has an index set already, use it if hasattr(old, '_{}index'.format(oldaxis)): setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) # otherwise if using a slice, use origin and delta properties elif isinstance(slice_, slice) or not numpy.sum(slice_): if isinstance(slice_, slice): offset = slice_.start or 0 step = slice_.step or 1 else: # empty ndarray slice (so just set attributes) offset = 0 step = 1 dx = getattr(old, delta(oldaxis)) x0 = getattr(old, origin(oldaxis)) # set new.x0 / new.y0 setattr(new, origin(newaxis), x0 + offset * dx) # set new.dx / new.dy setattr(new, delta(newaxis), dx * step) # otherwise slice with an index array else: setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) return new
python
def slice_axis_attributes(old, oldaxis, new, newaxis, slice_): """Set axis metadata for ``new`` by slicing an axis of ``old`` This is primarily for internal use in slice functions (__getitem__) Parameters ---------- old : `Array` array being sliced oldaxis : ``'x'`` or ``'y'`` the axis to slice new : `Array` product of slice newaxis : ``'x'`` or ``'y'`` the target axis slice_ : `slice`, `numpy.ndarray` the slice to apply to old (or an index array) See Also -------- Series.__getitem__ Array2D.__getitem__ """ slice_ = as_slice(slice_) # attribute names index = '{}index'.format origin = '{}0'.format delta = 'd{}'.format # if array has an index set already, use it if hasattr(old, '_{}index'.format(oldaxis)): setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) # otherwise if using a slice, use origin and delta properties elif isinstance(slice_, slice) or not numpy.sum(slice_): if isinstance(slice_, slice): offset = slice_.start or 0 step = slice_.step or 1 else: # empty ndarray slice (so just set attributes) offset = 0 step = 1 dx = getattr(old, delta(oldaxis)) x0 = getattr(old, origin(oldaxis)) # set new.x0 / new.y0 setattr(new, origin(newaxis), x0 + offset * dx) # set new.dx / new.dy setattr(new, delta(newaxis), dx * step) # otherwise slice with an index array else: setattr(new, index(newaxis), getattr(old, index(oldaxis))[slice_]) return new
[ "def", "slice_axis_attributes", "(", "old", ",", "oldaxis", ",", "new", ",", "newaxis", ",", "slice_", ")", ":", "slice_", "=", "as_slice", "(", "slice_", ")", "# attribute names", "index", "=", "'{}index'", ".", "format", "origin", "=", "'{}0'", ".", "for...
Set axis metadata for ``new`` by slicing an axis of ``old`` This is primarily for internal use in slice functions (__getitem__) Parameters ---------- old : `Array` array being sliced oldaxis : ``'x'`` or ``'y'`` the axis to slice new : `Array` product of slice newaxis : ``'x'`` or ``'y'`` the target axis slice_ : `slice`, `numpy.ndarray` the slice to apply to old (or an index array) See Also -------- Series.__getitem__ Array2D.__getitem__
[ "Set", "axis", "metadata", "for", "new", "by", "slicing", "an", "axis", "of", "old" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/sliceutils.py#L39-L99
train
211,459
gwpy/gwpy
gwpy/types/sliceutils.py
null_slice
def null_slice(slice_): """Returns True if a slice will have no affect """ try: slice_ = as_slice(slice_) except TypeError: return False if isinstance(slice_, numpy.ndarray) and numpy.all(slice_): return True if isinstance(slice_, slice) and slice_ in ( slice(None, None, None), slice(0, None, 1) ): return True
python
def null_slice(slice_): """Returns True if a slice will have no affect """ try: slice_ = as_slice(slice_) except TypeError: return False if isinstance(slice_, numpy.ndarray) and numpy.all(slice_): return True if isinstance(slice_, slice) and slice_ in ( slice(None, None, None), slice(0, None, 1) ): return True
[ "def", "null_slice", "(", "slice_", ")", ":", "try", ":", "slice_", "=", "as_slice", "(", "slice_", ")", "except", "TypeError", ":", "return", "False", "if", "isinstance", "(", "slice_", ",", "numpy", ".", "ndarray", ")", "and", "numpy", ".", "all", "(...
Returns True if a slice will have no affect
[ "Returns", "True", "if", "a", "slice", "will", "have", "no", "affect" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/sliceutils.py#L102-L115
train
211,460
gwpy/gwpy
gwpy/types/sliceutils.py
as_slice
def as_slice(slice_): """Convert an object to a slice, if possible """ if isinstance(slice_, (Integral, numpy.integer, type(None))): return slice(0, None, 1) if isinstance(slice_, (slice, numpy.ndarray)): return slice_ if isinstance(slice_, (list, tuple)): return tuple(map(as_slice, slice_)) raise TypeError("Cannot format {!r} as slice".format(slice_))
python
def as_slice(slice_): """Convert an object to a slice, if possible """ if isinstance(slice_, (Integral, numpy.integer, type(None))): return slice(0, None, 1) if isinstance(slice_, (slice, numpy.ndarray)): return slice_ if isinstance(slice_, (list, tuple)): return tuple(map(as_slice, slice_)) raise TypeError("Cannot format {!r} as slice".format(slice_))
[ "def", "as_slice", "(", "slice_", ")", ":", "if", "isinstance", "(", "slice_", ",", "(", "Integral", ",", "numpy", ".", "integer", ",", "type", "(", "None", ")", ")", ")", ":", "return", "slice", "(", "0", ",", "None", ",", "1", ")", "if", "isins...
Convert an object to a slice, if possible
[ "Convert", "an", "object", "to", "a", "slice", "if", "possible" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/sliceutils.py#L118-L130
train
211,461
gwpy/gwpy
gwpy/detector/io/cis.py
query
def query(name, use_kerberos=None, debug=False): """Query the Channel Information System for details on the given channel name Parameters ---------- name : `~gwpy.detector.Channel`, or `str` Name of the channel of interest Returns ------- channel : `~gwpy.detector.Channel` Channel with all details as acquired from the CIS """ url = '%s/?q=%s' % (CIS_API_URL, name) more = True out = ChannelList() while more: reply = _get(url, use_kerberos=use_kerberos, debug=debug) try: out.extend(map(parse_json, reply[u'results'])) except KeyError: pass except TypeError: # reply is a list out.extend(map(parse_json, reply)) break more = 'next' in reply and reply['next'] is not None if more: url = reply['next'] else: break out.sort(key=lambda c: c.name) return out
python
def query(name, use_kerberos=None, debug=False): """Query the Channel Information System for details on the given channel name Parameters ---------- name : `~gwpy.detector.Channel`, or `str` Name of the channel of interest Returns ------- channel : `~gwpy.detector.Channel` Channel with all details as acquired from the CIS """ url = '%s/?q=%s' % (CIS_API_URL, name) more = True out = ChannelList() while more: reply = _get(url, use_kerberos=use_kerberos, debug=debug) try: out.extend(map(parse_json, reply[u'results'])) except KeyError: pass except TypeError: # reply is a list out.extend(map(parse_json, reply)) break more = 'next' in reply and reply['next'] is not None if more: url = reply['next'] else: break out.sort(key=lambda c: c.name) return out
[ "def", "query", "(", "name", ",", "use_kerberos", "=", "None", ",", "debug", "=", "False", ")", ":", "url", "=", "'%s/?q=%s'", "%", "(", "CIS_API_URL", ",", "name", ")", "more", "=", "True", "out", "=", "ChannelList", "(", ")", "while", "more", ":", ...
Query the Channel Information System for details on the given channel name Parameters ---------- name : `~gwpy.detector.Channel`, or `str` Name of the channel of interest Returns ------- channel : `~gwpy.detector.Channel` Channel with all details as acquired from the CIS
[ "Query", "the", "Channel", "Information", "System", "for", "details", "on", "the", "given", "channel", "name" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/cis.py#L44-L76
train
211,462
gwpy/gwpy
gwpy/detector/io/cis.py
_get
def _get(url, use_kerberos=None, debug=False): """Perform a GET query against the CIS """ from ligo.org import request # perform query try: response = request(url, debug=debug, use_kerberos=use_kerberos) except HTTPError: raise ValueError("Channel not found at URL %s " "Information System. Please double check the " "name and try again." % url) if isinstance(response, bytes): response = response.decode('utf-8') return json.loads(response)
python
def _get(url, use_kerberos=None, debug=False): """Perform a GET query against the CIS """ from ligo.org import request # perform query try: response = request(url, debug=debug, use_kerberos=use_kerberos) except HTTPError: raise ValueError("Channel not found at URL %s " "Information System. Please double check the " "name and try again." % url) if isinstance(response, bytes): response = response.decode('utf-8') return json.loads(response)
[ "def", "_get", "(", "url", ",", "use_kerberos", "=", "None", ",", "debug", "=", "False", ")", ":", "from", "ligo", ".", "org", "import", "request", "# perform query", "try", ":", "response", "=", "request", "(", "url", ",", "debug", "=", "debug", ",", ...
Perform a GET query against the CIS
[ "Perform", "a", "GET", "query", "against", "the", "CIS" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/cis.py#L79-L94
train
211,463
gwpy/gwpy
gwpy/detector/io/cis.py
parse_json
def parse_json(data): """Parse the input data dict into a `Channel`. Parameters ---------- data : `dict` input data from CIS json query Returns ------- c : `Channel` a `Channel` built from the data """ sample_rate = data['datarate'] unit = data['units'] dtype = CIS_DATA_TYPE[data['datatype']] model = data['source'] url = data['displayurl'] return Channel(data['name'], sample_rate=sample_rate, unit=unit, dtype=dtype, model=model, url=url)
python
def parse_json(data): """Parse the input data dict into a `Channel`. Parameters ---------- data : `dict` input data from CIS json query Returns ------- c : `Channel` a `Channel` built from the data """ sample_rate = data['datarate'] unit = data['units'] dtype = CIS_DATA_TYPE[data['datatype']] model = data['source'] url = data['displayurl'] return Channel(data['name'], sample_rate=sample_rate, unit=unit, dtype=dtype, model=model, url=url)
[ "def", "parse_json", "(", "data", ")", ":", "sample_rate", "=", "data", "[", "'datarate'", "]", "unit", "=", "data", "[", "'units'", "]", "dtype", "=", "CIS_DATA_TYPE", "[", "data", "[", "'datatype'", "]", "]", "model", "=", "data", "[", "'source'", "]...
Parse the input data dict into a `Channel`. Parameters ---------- data : `dict` input data from CIS json query Returns ------- c : `Channel` a `Channel` built from the data
[ "Parse", "the", "input", "data", "dict", "into", "a", "Channel", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/detector/io/cis.py#L97-L116
train
211,464
gwpy/gwpy
gwpy/cli/timeseries.py
TimeSeries.get_ylabel
def get_ylabel(self): """Text for y-axis label, check if channel defines it """ units = self.units if len(units) == 1 and str(units[0]) == '': # dimensionless return '' if len(units) == 1 and self.usetex: return units[0].to_string('latex') elif len(units) == 1: return units[0].to_string() elif len(units) > 1: return 'Multiple units' return super(TimeSeries, self).get_ylabel()
python
def get_ylabel(self): """Text for y-axis label, check if channel defines it """ units = self.units if len(units) == 1 and str(units[0]) == '': # dimensionless return '' if len(units) == 1 and self.usetex: return units[0].to_string('latex') elif len(units) == 1: return units[0].to_string() elif len(units) > 1: return 'Multiple units' return super(TimeSeries, self).get_ylabel()
[ "def", "get_ylabel", "(", "self", ")", ":", "units", "=", "self", ".", "units", "if", "len", "(", "units", ")", "==", "1", "and", "str", "(", "units", "[", "0", "]", ")", "==", "''", ":", "# dimensionless", "return", "''", "if", "len", "(", "unit...
Text for y-axis label, check if channel defines it
[ "Text", "for", "y", "-", "axis", "label", "check", "if", "channel", "defines", "it" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/timeseries.py#L35-L47
train
211,465
gwpy/gwpy
gwpy/signal/spectral/_lal.py
generate_fft_plan
def generate_fft_plan(length, level=None, dtype='float64', forward=True): """Build a `REAL8FFTPlan` for a fast Fourier transform. Parameters ---------- length : `int` number of samples to plan for in each FFT. level : `int`, optional amount of work to do when planning the FFT, default set by `LAL_FFTPLAN_LEVEL` module variable. dtype : :class:`numpy.dtype`, `type`, `str`, optional numeric type of data to plan for forward : bool, optional, default: `True` whether to create a forward or reverse FFT plan Returns ------- plan : `REAL8FFTPlan` or similar FFT plan of the relevant data type """ from ...utils.lal import (find_typed_function, to_lal_type_str) # generate key for caching plan laltype = to_lal_type_str(dtype) key = (length, bool(forward), laltype) # find existing plan try: return LAL_FFTPLANS[key] # or create one except KeyError: create = find_typed_function(dtype, 'Create', 'FFTPlan') if level is None: level = LAL_FFTPLAN_LEVEL LAL_FFTPLANS[key] = create(length, int(bool(forward)), level) return LAL_FFTPLANS[key]
python
def generate_fft_plan(length, level=None, dtype='float64', forward=True): """Build a `REAL8FFTPlan` for a fast Fourier transform. Parameters ---------- length : `int` number of samples to plan for in each FFT. level : `int`, optional amount of work to do when planning the FFT, default set by `LAL_FFTPLAN_LEVEL` module variable. dtype : :class:`numpy.dtype`, `type`, `str`, optional numeric type of data to plan for forward : bool, optional, default: `True` whether to create a forward or reverse FFT plan Returns ------- plan : `REAL8FFTPlan` or similar FFT plan of the relevant data type """ from ...utils.lal import (find_typed_function, to_lal_type_str) # generate key for caching plan laltype = to_lal_type_str(dtype) key = (length, bool(forward), laltype) # find existing plan try: return LAL_FFTPLANS[key] # or create one except KeyError: create = find_typed_function(dtype, 'Create', 'FFTPlan') if level is None: level = LAL_FFTPLAN_LEVEL LAL_FFTPLANS[key] = create(length, int(bool(forward)), level) return LAL_FFTPLANS[key]
[ "def", "generate_fft_plan", "(", "length", ",", "level", "=", "None", ",", "dtype", "=", "'float64'", ",", "forward", "=", "True", ")", ":", "from", ".", ".", ".", "utils", ".", "lal", "import", "(", "find_typed_function", ",", "to_lal_type_str", ")", "#...
Build a `REAL8FFTPlan` for a fast Fourier transform. Parameters ---------- length : `int` number of samples to plan for in each FFT. level : `int`, optional amount of work to do when planning the FFT, default set by `LAL_FFTPLAN_LEVEL` module variable. dtype : :class:`numpy.dtype`, `type`, `str`, optional numeric type of data to plan for forward : bool, optional, default: `True` whether to create a forward or reverse FFT plan Returns ------- plan : `REAL8FFTPlan` or similar FFT plan of the relevant data type
[ "Build", "a", "REAL8FFTPlan", "for", "a", "fast", "Fourier", "transform", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L48-L86
train
211,466
gwpy/gwpy
gwpy/signal/spectral/_lal.py
generate_window
def generate_window(length, window=None, dtype='float64'): """Generate a time-domain window for use in a LAL FFT Parameters ---------- length : `int` length of window in samples. window : `str`, `tuple` name of window to generate, default: ``('kaiser', 24)``. Give `str` for simple windows, or tuple of ``(name, *args)`` for complicated windows dtype : :class:`numpy.dtype` numeric type of window, default `numpy.dtype(numpy.float64)` Returns ------- `window` : `REAL8Window` or similar time-domain window to use for FFT """ from ...utils.lal import (find_typed_function, to_lal_type_str) if window is None: window = ('kaiser', 24) # generate key for caching window laltype = to_lal_type_str(dtype) key = (length, str(window), laltype) # find existing window try: return LAL_WINDOWS[key] # or create one except KeyError: # parse window as name and arguments, e.g. ('kaiser', 24) if isinstance(window, (list, tuple)): window, beta = window else: beta = 0 window = canonical_name(window) # create window create = find_typed_function(dtype, 'CreateNamed', 'Window') LAL_WINDOWS[key] = create(window, beta, length) return LAL_WINDOWS[key]
python
def generate_window(length, window=None, dtype='float64'): """Generate a time-domain window for use in a LAL FFT Parameters ---------- length : `int` length of window in samples. window : `str`, `tuple` name of window to generate, default: ``('kaiser', 24)``. Give `str` for simple windows, or tuple of ``(name, *args)`` for complicated windows dtype : :class:`numpy.dtype` numeric type of window, default `numpy.dtype(numpy.float64)` Returns ------- `window` : `REAL8Window` or similar time-domain window to use for FFT """ from ...utils.lal import (find_typed_function, to_lal_type_str) if window is None: window = ('kaiser', 24) # generate key for caching window laltype = to_lal_type_str(dtype) key = (length, str(window), laltype) # find existing window try: return LAL_WINDOWS[key] # or create one except KeyError: # parse window as name and arguments, e.g. ('kaiser', 24) if isinstance(window, (list, tuple)): window, beta = window else: beta = 0 window = canonical_name(window) # create window create = find_typed_function(dtype, 'CreateNamed', 'Window') LAL_WINDOWS[key] = create(window, beta, length) return LAL_WINDOWS[key]
[ "def", "generate_window", "(", "length", ",", "window", "=", "None", ",", "dtype", "=", "'float64'", ")", ":", "from", ".", ".", ".", "utils", ".", "lal", "import", "(", "find_typed_function", ",", "to_lal_type_str", ")", "if", "window", "is", "None", ":...
Generate a time-domain window for use in a LAL FFT Parameters ---------- length : `int` length of window in samples. window : `str`, `tuple` name of window to generate, default: ``('kaiser', 24)``. Give `str` for simple windows, or tuple of ``(name, *args)`` for complicated windows dtype : :class:`numpy.dtype` numeric type of window, default `numpy.dtype(numpy.float64)` Returns ------- `window` : `REAL8Window` or similar time-domain window to use for FFT
[ "Generate", "a", "time", "-", "domain", "window", "for", "use", "in", "a", "LAL", "FFT" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L89-L133
train
211,467
gwpy/gwpy
gwpy/signal/spectral/_lal.py
window_from_array
def window_from_array(array): """Convert a `numpy.ndarray` into a LAL `Window` object """ from ...utils.lal import (find_typed_function) dtype = array.dtype # create sequence seq = find_typed_function(dtype, 'Create', 'Sequence')(array.size) seq.data = array # create window from sequence return find_typed_function(dtype, 'Create', 'WindowFromSequence')(seq)
python
def window_from_array(array): """Convert a `numpy.ndarray` into a LAL `Window` object """ from ...utils.lal import (find_typed_function) dtype = array.dtype # create sequence seq = find_typed_function(dtype, 'Create', 'Sequence')(array.size) seq.data = array # create window from sequence return find_typed_function(dtype, 'Create', 'WindowFromSequence')(seq)
[ "def", "window_from_array", "(", "array", ")", ":", "from", ".", ".", ".", "utils", ".", "lal", "import", "(", "find_typed_function", ")", "dtype", "=", "array", ".", "dtype", "# create sequence", "seq", "=", "find_typed_function", "(", "dtype", ",", "'Creat...
Convert a `numpy.ndarray` into a LAL `Window` object
[ "Convert", "a", "numpy", ".", "ndarray", "into", "a", "LAL", "Window", "object" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L136-L148
train
211,468
gwpy/gwpy
gwpy/signal/spectral/_lal.py
_lal_spectrum
def _lal_spectrum(timeseries, segmentlength, noverlap=None, method='welch', window=None, plan=None): """Generate a PSD `FrequencySeries` using |lal|_ Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. method : `str` average PSD method noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `lal.REAL8Window`, optional window to apply to timeseries prior to FFT plan : `lal.REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` """ import lal from ...utils.lal import find_typed_function # default to 50% overlap if noverlap is None: noverlap = int(segmentlength // 2) stride = segmentlength - noverlap # get window if window is None: window = generate_window(segmentlength, dtype=timeseries.dtype) # get FFT plan if plan is None: plan = generate_fft_plan(segmentlength, dtype=timeseries.dtype) method = method.lower() # check data length size = timeseries.size numsegs = 1 + int((size - segmentlength) / stride) if method == 'median-mean' and numsegs % 2: numsegs -= 1 if not numsegs: raise ValueError("Cannot calculate median-mean spectrum with " "this small a TimeSeries.") required = int((numsegs - 1) * stride + segmentlength) if size != required: warnings.warn("Data array is the wrong size for the correct number " "of averages given the input parameters. The trailing " "%d samples will not be used in this calculation." % (size - required)) timeseries = timeseries[:required] # generate output spectrum create = find_typed_function(timeseries.dtype, 'Create', 'FrequencySeries') lalfs = create(timeseries.name, lal.LIGOTimeGPS(timeseries.epoch.gps), 0, 1 / segmentlength, lal.StrainUnit, int(segmentlength // 2 + 1)) # find LAL method (e.g. median-mean -> lal.REAL8AverageSpectrumMedianMean) methodname = ''.join(map(str.title, re.split('[-_]', method))) spec_func = find_typed_function(timeseries.dtype, '', 'AverageSpectrum{}'.format(methodname)) # calculate spectrum spec_func(lalfs, timeseries.to_lal(), segmentlength, stride, window, plan) # format and return spec = FrequencySeries.from_lal(lalfs) spec.name = timeseries.name spec.channel = timeseries.channel spec.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return spec
python
def _lal_spectrum(timeseries, segmentlength, noverlap=None, method='welch', window=None, plan=None): """Generate a PSD `FrequencySeries` using |lal|_ Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. method : `str` average PSD method noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `lal.REAL8Window`, optional window to apply to timeseries prior to FFT plan : `lal.REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` """ import lal from ...utils.lal import find_typed_function # default to 50% overlap if noverlap is None: noverlap = int(segmentlength // 2) stride = segmentlength - noverlap # get window if window is None: window = generate_window(segmentlength, dtype=timeseries.dtype) # get FFT plan if plan is None: plan = generate_fft_plan(segmentlength, dtype=timeseries.dtype) method = method.lower() # check data length size = timeseries.size numsegs = 1 + int((size - segmentlength) / stride) if method == 'median-mean' and numsegs % 2: numsegs -= 1 if not numsegs: raise ValueError("Cannot calculate median-mean spectrum with " "this small a TimeSeries.") required = int((numsegs - 1) * stride + segmentlength) if size != required: warnings.warn("Data array is the wrong size for the correct number " "of averages given the input parameters. The trailing " "%d samples will not be used in this calculation." % (size - required)) timeseries = timeseries[:required] # generate output spectrum create = find_typed_function(timeseries.dtype, 'Create', 'FrequencySeries') lalfs = create(timeseries.name, lal.LIGOTimeGPS(timeseries.epoch.gps), 0, 1 / segmentlength, lal.StrainUnit, int(segmentlength // 2 + 1)) # find LAL method (e.g. median-mean -> lal.REAL8AverageSpectrumMedianMean) methodname = ''.join(map(str.title, re.split('[-_]', method))) spec_func = find_typed_function(timeseries.dtype, '', 'AverageSpectrum{}'.format(methodname)) # calculate spectrum spec_func(lalfs, timeseries.to_lal(), segmentlength, stride, window, plan) # format and return spec = FrequencySeries.from_lal(lalfs) spec.name = timeseries.name spec.channel = timeseries.channel spec.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return spec
[ "def", "_lal_spectrum", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "method", "=", "'welch'", ",", "window", "=", "None", ",", "plan", "=", "None", ")", ":", "import", "lal", "from", ".", ".", ".", "utils", ".", "lal", ...
Generate a PSD `FrequencySeries` using |lal|_ Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. method : `str` average PSD method noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `lal.REAL8Window`, optional window to apply to timeseries prior to FFT plan : `lal.REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries`
[ "Generate", "a", "PSD", "FrequencySeries", "using", "|lal|_" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L153-L237
train
211,469
gwpy/gwpy
gwpy/signal/spectral/_lal.py
welch
def welch(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate an PSD of this `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='welch', window=window, plan=plan)
python
def welch(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate an PSD of this `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='welch', window=window, plan=plan)
[ "def", "welch", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "window", "=", "None", ",", "plan", "=", "None", ")", ":", "return", "_lal_spectrum", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "noverlap", ","...
Calculate an PSD of this `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch
[ "Calculate", "an", "PSD", "of", "this", "TimeSeries", "using", "Welch", "s", "method" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L240-L270
train
211,470
gwpy/gwpy
gwpy/signal/spectral/_lal.py
bartlett
def bartlett(timeseries, segmentlength, noverlap=None, window=None, plan=None): # pylint: disable=unused-argument """Calculate an PSD of this `TimeSeries` using Bartlett's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch """ return _lal_spectrum(timeseries, segmentlength, noverlap=0, method='welch', window=window, plan=plan)
python
def bartlett(timeseries, segmentlength, noverlap=None, window=None, plan=None): # pylint: disable=unused-argument """Calculate an PSD of this `TimeSeries` using Bartlett's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch """ return _lal_spectrum(timeseries, segmentlength, noverlap=0, method='welch', window=window, plan=plan)
[ "def", "bartlett", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "window", "=", "None", ",", "plan", "=", "None", ")", ":", "# pylint: disable=unused-argument", "return", "_lal_spectrum", "(", "timeseries", ",", "segmentlength", ","...
Calculate an PSD of this `TimeSeries` using Bartlett's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch
[ "Calculate", "an", "PSD", "of", "this", "TimeSeries", "using", "Bartlett", "s", "method" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L273-L304
train
211,471
gwpy/gwpy
gwpy/signal/spectral/_lal.py
median
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate a PSD of this `TimeSeries` using a median average method The median average is similar to Welch's method, using a median average rather than mean. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedian """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median', window=window, plan=plan)
python
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate a PSD of this `TimeSeries` using a median average method The median average is similar to Welch's method, using a median average rather than mean. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedian """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median', window=window, plan=plan)
[ "def", "median", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "window", "=", "None", ",", "plan", "=", "None", ")", ":", "return", "_lal_spectrum", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "noverlap", ",...
Calculate a PSD of this `TimeSeries` using a median average method The median average is similar to Welch's method, using a median average rather than mean. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedian
[ "Calculate", "a", "PSD", "of", "this", "TimeSeries", "using", "a", "median", "average", "method" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L307-L340
train
211,472
gwpy/gwpy
gwpy/signal/spectral/_lal.py
median_mean
def median_mean(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate a PSD of this `TimeSeries` using a median-mean average method The median-mean average method divides overlapping segments into "even" and "odd" segments, and computes the bin-by-bin median of the "even" segments and the "odd" segments, and then takes the bin-by-bin average of these two median averages. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedianMean """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median-mean', window=window, plan=plan)
python
def median_mean(timeseries, segmentlength, noverlap=None, window=None, plan=None): """Calculate a PSD of this `TimeSeries` using a median-mean average method The median-mean average method divides overlapping segments into "even" and "odd" segments, and computes the bin-by-bin median of the "even" segments and the "odd" segments, and then takes the bin-by-bin average of these two median averages. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedianMean """ return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap, method='median-mean', window=window, plan=plan)
[ "def", "median_mean", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "None", ",", "window", "=", "None", ",", "plan", "=", "None", ")", ":", "return", "_lal_spectrum", "(", "timeseries", ",", "segmentlength", ",", "noverlap", "=", "noverlap",...
Calculate a PSD of this `TimeSeries` using a median-mean average method The median-mean average method divides overlapping segments into "even" and "odd" segments, and computes the bin-by-bin median of the "even" segments and the "odd" segments, and then takes the bin-by-bin average of these two median averages. Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumMedianMean
[ "Calculate", "a", "PSD", "of", "this", "TimeSeries", "using", "a", "median", "-", "mean", "average", "method" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_lal.py#L343-L379
train
211,473
gwpy/gwpy
gwpy/timeseries/io/gwf/lalframe.py
open_data_source
def open_data_source(source): """Open a GWF file source into a `lalframe.XLALFrStream` object Parameters ---------- source : `str`, `file`, `list` Data source to read. Returns ------- stream : `lalframe.FrStream` An open `FrStream`. Raises ------ ValueError If the input format cannot be identified. """ if isinstance(source, FILE_LIKE): source = source.name if isinstance(source, CacheEntry): source = source.path # read cache file if (isinstance(source, string_types) and source.endswith(('.lcf', '.cache'))): return lalframe.FrStreamCacheOpen(lal.CacheImport(source)) # read glue cache object if isinstance(source, list) and is_cache(source): cache = lal.Cache() for entry in file_list(source): cache = lal.CacheMerge(cache, lal.CacheGlob(*os.path.split(entry))) return lalframe.FrStreamCacheOpen(cache) # read lal cache object if isinstance(source, lal.Cache): return lalframe.FrStreamCacheOpen(source) # read single file if isinstance(source, string_types): return lalframe.FrStreamOpen(*map(str, os.path.split(source))) raise ValueError("Don't know how to open data source of type %r" % type(source))
python
def open_data_source(source): """Open a GWF file source into a `lalframe.XLALFrStream` object Parameters ---------- source : `str`, `file`, `list` Data source to read. Returns ------- stream : `lalframe.FrStream` An open `FrStream`. Raises ------ ValueError If the input format cannot be identified. """ if isinstance(source, FILE_LIKE): source = source.name if isinstance(source, CacheEntry): source = source.path # read cache file if (isinstance(source, string_types) and source.endswith(('.lcf', '.cache'))): return lalframe.FrStreamCacheOpen(lal.CacheImport(source)) # read glue cache object if isinstance(source, list) and is_cache(source): cache = lal.Cache() for entry in file_list(source): cache = lal.CacheMerge(cache, lal.CacheGlob(*os.path.split(entry))) return lalframe.FrStreamCacheOpen(cache) # read lal cache object if isinstance(source, lal.Cache): return lalframe.FrStreamCacheOpen(source) # read single file if isinstance(source, string_types): return lalframe.FrStreamOpen(*map(str, os.path.split(source))) raise ValueError("Don't know how to open data source of type %r" % type(source))
[ "def", "open_data_source", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "FILE_LIKE", ")", ":", "source", "=", "source", ".", "name", "if", "isinstance", "(", "source", ",", "CacheEntry", ")", ":", "source", "=", "source", ".", "path",...
Open a GWF file source into a `lalframe.XLALFrStream` object Parameters ---------- source : `str`, `file`, `list` Data source to read. Returns ------- stream : `lalframe.FrStream` An open `FrStream`. Raises ------ ValueError If the input format cannot be identified.
[ "Open", "a", "GWF", "file", "source", "into", "a", "lalframe", ".", "XLALFrStream", "object" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/lalframe.py#L50-L94
train
211,474
gwpy/gwpy
gwpy/timeseries/io/gwf/lalframe.py
get_stream_duration
def get_stream_duration(stream): """Find the duration of time stored in a frame stream Parameters ---------- stream : `lal.FrStream` stream of data to search Returns ------- duration : `float` the duration (seconds) of the data for this channel """ epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) # loop over each file in the stream cache and query its duration nfile = stream.cache.length duration = 0 for dummy_i in range(nfile): for dummy_j in range(lalframe.FrFileQueryNFrame(stream.file)): duration += lalframe.FrFileQueryDt(stream.file, 0) lalframe.FrStreamNext(stream) # rewind stream and return lalframe.FrStreamSeek(stream, epoch) return duration
python
def get_stream_duration(stream): """Find the duration of time stored in a frame stream Parameters ---------- stream : `lal.FrStream` stream of data to search Returns ------- duration : `float` the duration (seconds) of the data for this channel """ epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) # loop over each file in the stream cache and query its duration nfile = stream.cache.length duration = 0 for dummy_i in range(nfile): for dummy_j in range(lalframe.FrFileQueryNFrame(stream.file)): duration += lalframe.FrFileQueryDt(stream.file, 0) lalframe.FrStreamNext(stream) # rewind stream and return lalframe.FrStreamSeek(stream, epoch) return duration
[ "def", "get_stream_duration", "(", "stream", ")", ":", "epoch", "=", "lal", ".", "LIGOTimeGPS", "(", "stream", ".", "epoch", ".", "gpsSeconds", ",", "stream", ".", "epoch", ".", "gpsNanoSeconds", ")", "# loop over each file in the stream cache and query its duration",...
Find the duration of time stored in a frame stream Parameters ---------- stream : `lal.FrStream` stream of data to search Returns ------- duration : `float` the duration (seconds) of the data for this channel
[ "Find", "the", "duration", "of", "time", "stored", "in", "a", "frame", "stream" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/lalframe.py#L97-L121
train
211,475
gwpy/gwpy
gwpy/timeseries/io/gwf/lalframe.py
read
def read(source, channels, start=None, end=None, series_class=TimeSeries, scaled=None): """Read data from one or more GWF files using the LALFrame API """ # scaled must be provided to provide a consistent API with frameCPP if scaled is not None: warnings.warn( "the `scaled` keyword argument is not supported by lalframe, " "if you require ADC scaling, please install " "python-ldas-tools-framecpp", ) stream = open_data_source(source) # parse times and restrict to available data epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) streamdur = get_stream_duration(stream) if start is None: start = epoch else: start = max(epoch, lalutils.to_lal_ligotimegps(start)) if end is None: offset = float(start - epoch) duration = streamdur - offset else: end = min(epoch + streamdur, lalutils.to_lal_ligotimegps(end)) duration = float(end - start) # read data out = series_class.DictClass() for name in channels: out[name] = series_class.from_lal( _read_channel(stream, str(name), start=start, duration=duration), copy=False) lalframe.FrStreamSeek(stream, epoch) return out
python
def read(source, channels, start=None, end=None, series_class=TimeSeries, scaled=None): """Read data from one or more GWF files using the LALFrame API """ # scaled must be provided to provide a consistent API with frameCPP if scaled is not None: warnings.warn( "the `scaled` keyword argument is not supported by lalframe, " "if you require ADC scaling, please install " "python-ldas-tools-framecpp", ) stream = open_data_source(source) # parse times and restrict to available data epoch = lal.LIGOTimeGPS(stream.epoch.gpsSeconds, stream.epoch.gpsNanoSeconds) streamdur = get_stream_duration(stream) if start is None: start = epoch else: start = max(epoch, lalutils.to_lal_ligotimegps(start)) if end is None: offset = float(start - epoch) duration = streamdur - offset else: end = min(epoch + streamdur, lalutils.to_lal_ligotimegps(end)) duration = float(end - start) # read data out = series_class.DictClass() for name in channels: out[name] = series_class.from_lal( _read_channel(stream, str(name), start=start, duration=duration), copy=False) lalframe.FrStreamSeek(stream, epoch) return out
[ "def", "read", "(", "source", ",", "channels", ",", "start", "=", "None", ",", "end", "=", "None", ",", "series_class", "=", "TimeSeries", ",", "scaled", "=", "None", ")", ":", "# scaled must be provided to provide a consistent API with frameCPP", "if", "scaled", ...
Read data from one or more GWF files using the LALFrame API
[ "Read", "data", "from", "one", "or", "more", "GWF", "files", "using", "the", "LALFrame", "API" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/lalframe.py#L126-L162
train
211,476
gwpy/gwpy
gwpy/timeseries/io/gwf/lalframe.py
write
def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0): """Write data to a GWF file using the LALFrame API """ if not start: start = list(tsdict.values())[0].xspan[0] if not end: end = list(tsdict.values())[0].xspan[1] duration = end - start # get ifos list detectors = 0 for series in tsdict.values(): try: idx = list(lalutils.LAL_DETECTORS.keys()).index(series.channel.ifo) detectors |= 1 << 2*idx except (KeyError, AttributeError): continue # create new frame frame = lalframe.FrameNew(start, duration, name, run, 0, detectors) for series in tsdict.values(): # convert to LAL lalseries = series.to_lal() # find adder add_ = lalutils.find_typed_function( series.dtype, 'FrameAdd', 'TimeSeriesProcData', module=lalframe) # add time series to frame add_(frame, lalseries) # write frame lalframe.FrameWrite(frame, outfile)
python
def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0): """Write data to a GWF file using the LALFrame API """ if not start: start = list(tsdict.values())[0].xspan[0] if not end: end = list(tsdict.values())[0].xspan[1] duration = end - start # get ifos list detectors = 0 for series in tsdict.values(): try: idx = list(lalutils.LAL_DETECTORS.keys()).index(series.channel.ifo) detectors |= 1 << 2*idx except (KeyError, AttributeError): continue # create new frame frame = lalframe.FrameNew(start, duration, name, run, 0, detectors) for series in tsdict.values(): # convert to LAL lalseries = series.to_lal() # find adder add_ = lalutils.find_typed_function( series.dtype, 'FrameAdd', 'TimeSeriesProcData', module=lalframe) # add time series to frame add_(frame, lalseries) # write frame lalframe.FrameWrite(frame, outfile)
[ "def", "write", "(", "tsdict", ",", "outfile", ",", "start", "=", "None", ",", "end", "=", "None", ",", "name", "=", "'gwpy'", ",", "run", "=", "0", ")", ":", "if", "not", "start", ":", "start", "=", "list", "(", "tsdict", ".", "values", "(", "...
Write data to a GWF file using the LALFrame API
[ "Write", "data", "to", "a", "GWF", "file", "using", "the", "LALFrame", "API" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/lalframe.py#L174-L208
train
211,477
gwpy/gwpy
gwpy/segments/io/ligolw.py
segment_content_handler
def segment_content_handler(): """Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables """ from ligo.lw.lsctables import (SegmentTable, SegmentDefTable, SegmentSumTable) from ligo.lw.ligolw import PartialLIGOLWContentHandler def _filter(name, attrs): return reduce( operator.or_, [table_.CheckProperties(name, attrs) for table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)]) return build_content_handler(PartialLIGOLWContentHandler, _filter)
python
def segment_content_handler(): """Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables """ from ligo.lw.lsctables import (SegmentTable, SegmentDefTable, SegmentSumTable) from ligo.lw.ligolw import PartialLIGOLWContentHandler def _filter(name, attrs): return reduce( operator.or_, [table_.CheckProperties(name, attrs) for table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)]) return build_content_handler(PartialLIGOLWContentHandler, _filter)
[ "def", "segment_content_handler", "(", ")", ":", "from", "ligo", ".", "lw", ".", "lsctables", "import", "(", "SegmentTable", ",", "SegmentDefTable", ",", "SegmentSumTable", ")", "from", "ligo", ".", "lw", ".", "ligolw", "import", "PartialLIGOLWContentHandler", "...
Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
[ "Build", "a", "~xml", ".", "sax", ".", "handlers", ".", "ContentHandler", "to", "read", "segment", "XML", "tables" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/ligolw.py#L35-L48
train
211,478
gwpy/gwpy
gwpy/segments/io/ligolw.py
read_ligolw_dict
def read_ligolw_dict(source, names=None, coalesce=False, **kwargs): """Read segments for the given flag from the LIGO_LW XML file. Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one (or more) open files or file paths, or LIGO_LW `Document` objects names : `list`, `None`, optional list of names to read or `None` to read all into a single `DataQualityFlag`. coalesce : `bool`, optional if `True`, coalesce all parsed `DataQualityFlag` objects before returning, default: `False` **kwargs other keywords are passed to :meth:`DataQualityDict.from_ligolw_tables` Returns ------- flagdict : `DataQualityDict` a new `DataQualityDict` of `DataQualityFlag` entries with ``active`` and ``known`` segments seeded from the XML tables in the given file ``fp``. """ xmldoc = read_ligolw(source, contenthandler=segment_content_handler()) # parse tables with patch_ligotimegps(type(xmldoc.childNodes[0]).__module__): out = DataQualityDict.from_ligolw_tables( *xmldoc.childNodes, names=names, **kwargs ) # coalesce if coalesce: for flag in out: out[flag].coalesce() return out
python
def read_ligolw_dict(source, names=None, coalesce=False, **kwargs): """Read segments for the given flag from the LIGO_LW XML file. Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one (or more) open files or file paths, or LIGO_LW `Document` objects names : `list`, `None`, optional list of names to read or `None` to read all into a single `DataQualityFlag`. coalesce : `bool`, optional if `True`, coalesce all parsed `DataQualityFlag` objects before returning, default: `False` **kwargs other keywords are passed to :meth:`DataQualityDict.from_ligolw_tables` Returns ------- flagdict : `DataQualityDict` a new `DataQualityDict` of `DataQualityFlag` entries with ``active`` and ``known`` segments seeded from the XML tables in the given file ``fp``. """ xmldoc = read_ligolw(source, contenthandler=segment_content_handler()) # parse tables with patch_ligotimegps(type(xmldoc.childNodes[0]).__module__): out = DataQualityDict.from_ligolw_tables( *xmldoc.childNodes, names=names, **kwargs ) # coalesce if coalesce: for flag in out: out[flag].coalesce() return out
[ "def", "read_ligolw_dict", "(", "source", ",", "names", "=", "None", ",", "coalesce", "=", "False", ",", "*", "*", "kwargs", ")", ":", "xmldoc", "=", "read_ligolw", "(", "source", ",", "contenthandler", "=", "segment_content_handler", "(", ")", ")", "# par...
Read segments for the given flag from the LIGO_LW XML file. Parameters ---------- source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list` one (or more) open files or file paths, or LIGO_LW `Document` objects names : `list`, `None`, optional list of names to read or `None` to read all into a single `DataQualityFlag`. coalesce : `bool`, optional if `True`, coalesce all parsed `DataQualityFlag` objects before returning, default: `False` **kwargs other keywords are passed to :meth:`DataQualityDict.from_ligolw_tables` Returns ------- flagdict : `DataQualityDict` a new `DataQualityDict` of `DataQualityFlag` entries with ``active`` and ``known`` segments seeded from the XML tables in the given file ``fp``.
[ "Read", "segments", "for", "the", "given", "flag", "from", "the", "LIGO_LW", "XML", "file", "." ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/ligolw.py#L53-L94
train
211,479
gwpy/gwpy
gwpy/segments/io/ligolw.py
read_ligolw_flag
def read_ligolw_flag(source, name=None, **kwargs): """Read a single `DataQualityFlag` from a LIGO_LW XML file """ name = [name] if name is not None else None return list(read_ligolw_dict(source, names=name, **kwargs).values())[0]
python
def read_ligolw_flag(source, name=None, **kwargs): """Read a single `DataQualityFlag` from a LIGO_LW XML file """ name = [name] if name is not None else None return list(read_ligolw_dict(source, names=name, **kwargs).values())[0]
[ "def", "read_ligolw_flag", "(", "source", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "name", "=", "[", "name", "]", "if", "name", "is", "not", "None", "else", "None", "return", "list", "(", "read_ligolw_dict", "(", "source", ",", "n...
Read a single `DataQualityFlag` from a LIGO_LW XML file
[ "Read", "a", "single", "DataQualityFlag", "from", "a", "LIGO_LW", "XML", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/ligolw.py#L97-L101
train
211,480
gwpy/gwpy
gwpy/segments/io/ligolw.py
write_ligolw
def write_ligolw(flags, target, attrs=None, ilwdchar_compat=None, **kwargs): """Write this `DataQualityFlag` to the given LIGO_LW Document Parameters ---------- flags : `DataQualityFlag`, `DataQualityDict` `gwpy.segments` object to write target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into attrs : `dict`, optional extra attributes to write into segment tables **kwargs keyword arguments to use when writing See also -------- gwpy.io.ligolw.write_ligolw_tables for details of acceptable keyword arguments """ if isinstance(flags, DataQualityFlag): flags = DataQualityDict({flags.name: flags}) return write_tables( target, flags.to_ligolw_tables(ilwdchar_compat=ilwdchar_compat, **attrs or dict()), **kwargs )
python
def write_ligolw(flags, target, attrs=None, ilwdchar_compat=None, **kwargs): """Write this `DataQualityFlag` to the given LIGO_LW Document Parameters ---------- flags : `DataQualityFlag`, `DataQualityDict` `gwpy.segments` object to write target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into attrs : `dict`, optional extra attributes to write into segment tables **kwargs keyword arguments to use when writing See also -------- gwpy.io.ligolw.write_ligolw_tables for details of acceptable keyword arguments """ if isinstance(flags, DataQualityFlag): flags = DataQualityDict({flags.name: flags}) return write_tables( target, flags.to_ligolw_tables(ilwdchar_compat=ilwdchar_compat, **attrs or dict()), **kwargs )
[ "def", "write_ligolw", "(", "flags", ",", "target", ",", "attrs", "=", "None", ",", "ilwdchar_compat", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "flags", ",", "DataQualityFlag", ")", ":", "flags", "=", "DataQualityDict", "(...
Write this `DataQualityFlag` to the given LIGO_LW Document Parameters ---------- flags : `DataQualityFlag`, `DataQualityDict` `gwpy.segments` object to write target : `str`, `file`, :class:`~ligo.lw.ligolw.Document` the file or document to write into attrs : `dict`, optional extra attributes to write into segment tables **kwargs keyword arguments to use when writing See also -------- gwpy.io.ligolw.write_ligolw_tables for details of acceptable keyword arguments
[ "Write", "this", "DataQualityFlag", "to", "the", "given", "LIGO_LW", "Document" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/ligolw.py#L106-L135
train
211,481
gwpy/gwpy
gwpy/signal/spectral/_utils.py
scale_timeseries_unit
def scale_timeseries_unit(tsunit, scaling='density'): """Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`. """ # set units if scaling == 'density': baseunit = units.Hertz elif scaling == 'spectrum': baseunit = units.dimensionless_unscaled else: raise ValueError("Unknown scaling: %r" % scaling) if tsunit: specunit = tsunit ** 2 / baseunit else: specunit = baseunit ** -1 return specunit
python
def scale_timeseries_unit(tsunit, scaling='density'): """Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`. """ # set units if scaling == 'density': baseunit = units.Hertz elif scaling == 'spectrum': baseunit = units.dimensionless_unscaled else: raise ValueError("Unknown scaling: %r" % scaling) if tsunit: specunit = tsunit ** 2 / baseunit else: specunit = baseunit ** -1 return specunit
[ "def", "scale_timeseries_unit", "(", "tsunit", ",", "scaling", "=", "'density'", ")", ":", "# set units", "if", "scaling", "==", "'density'", ":", "baseunit", "=", "units", ".", "Hertz", "elif", "scaling", "==", "'spectrum'", ":", "baseunit", "=", "units", "...
Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`.
[ "Scale", "the", "unit", "of", "a", "TimeSeries", "to", "match", "that", "of", "a", "FrequencySeries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_utils.py#L27-L54
train
211,482
gwpy/gwpy
gwpy/io/cache.py
_iter_cache
def _iter_cache(cachefile, gpstype=LIGOTimeGPS): """Internal method that yields a `_CacheEntry` for each line in the file This method supports reading LAL- and (nested) FFL-format cache files. """ try: path = os.path.abspath(cachefile.name) except AttributeError: path = None for line in cachefile: try: yield _CacheEntry.parse(line, gpstype=LIGOTimeGPS) except ValueError: # virgo FFL format (seemingly) supports nested FFL files parts = line.split() if len(parts) == 3 and os.path.abspath(parts[0]) != path: with open(parts[0], 'r') as cache2: for entry in _iter_cache(cache2): yield entry else: raise
python
def _iter_cache(cachefile, gpstype=LIGOTimeGPS): """Internal method that yields a `_CacheEntry` for each line in the file This method supports reading LAL- and (nested) FFL-format cache files. """ try: path = os.path.abspath(cachefile.name) except AttributeError: path = None for line in cachefile: try: yield _CacheEntry.parse(line, gpstype=LIGOTimeGPS) except ValueError: # virgo FFL format (seemingly) supports nested FFL files parts = line.split() if len(parts) == 3 and os.path.abspath(parts[0]) != path: with open(parts[0], 'r') as cache2: for entry in _iter_cache(cache2): yield entry else: raise
[ "def", "_iter_cache", "(", "cachefile", ",", "gpstype", "=", "LIGOTimeGPS", ")", ":", "try", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "cachefile", ".", "name", ")", "except", "AttributeError", ":", "path", "=", "None", "for", "line", "...
Internal method that yields a `_CacheEntry` for each line in the file This method supports reading LAL- and (nested) FFL-format cache files.
[ "Internal", "method", "that", "yields", "a", "_CacheEntry", "for", "each", "line", "in", "the", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L132-L152
train
211,483
gwpy/gwpy
gwpy/io/cache.py
read_cache
def read_cache(cachefile, coltype=LIGOTimeGPS, sort=None, segment=None): """Read a LAL- or FFL-format cache file as a list of file paths Parameters ---------- cachefile : `str`, `file` Input file or file path to read. coltype : `LIGOTimeGPS`, `int`, optional Type for GPS times. sort : `callable`, optional A callable key function by which to sort the output list of file paths segment : `gwpy.segments.Segment`, optional A GPS `[start, stop)` interval, if given only files overlapping this interval will be returned. Returns ------- paths : `list` of `str` A list of file paths as read from the cache file. """ # open file if not isinstance(cachefile, FILE_LIKE): with open(file_path(cachefile), 'r') as fobj: return read_cache(fobj, coltype=coltype, sort=sort, segment=segment) # read file cache = [x.path for x in _iter_cache(cachefile, gpstype=coltype)] # sieve and sort if segment: cache = sieve(cache, segment=segment) if sort: cache.sort(key=sort) # read simple paths return cache
python
def read_cache(cachefile, coltype=LIGOTimeGPS, sort=None, segment=None): """Read a LAL- or FFL-format cache file as a list of file paths Parameters ---------- cachefile : `str`, `file` Input file or file path to read. coltype : `LIGOTimeGPS`, `int`, optional Type for GPS times. sort : `callable`, optional A callable key function by which to sort the output list of file paths segment : `gwpy.segments.Segment`, optional A GPS `[start, stop)` interval, if given only files overlapping this interval will be returned. Returns ------- paths : `list` of `str` A list of file paths as read from the cache file. """ # open file if not isinstance(cachefile, FILE_LIKE): with open(file_path(cachefile), 'r') as fobj: return read_cache(fobj, coltype=coltype, sort=sort, segment=segment) # read file cache = [x.path for x in _iter_cache(cachefile, gpstype=coltype)] # sieve and sort if segment: cache = sieve(cache, segment=segment) if sort: cache.sort(key=sort) # read simple paths return cache
[ "def", "read_cache", "(", "cachefile", ",", "coltype", "=", "LIGOTimeGPS", ",", "sort", "=", "None", ",", "segment", "=", "None", ")", ":", "# open file", "if", "not", "isinstance", "(", "cachefile", ",", "FILE_LIKE", ")", ":", "with", "open", "(", "file...
Read a LAL- or FFL-format cache file as a list of file paths Parameters ---------- cachefile : `str`, `file` Input file or file path to read. coltype : `LIGOTimeGPS`, `int`, optional Type for GPS times. sort : `callable`, optional A callable key function by which to sort the output list of file paths segment : `gwpy.segments.Segment`, optional A GPS `[start, stop)` interval, if given only files overlapping this interval will be returned. Returns ------- paths : `list` of `str` A list of file paths as read from the cache file.
[ "Read", "a", "LAL", "-", "or", "FFL", "-", "format", "cache", "file", "as", "a", "list", "of", "file", "paths" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L155-L194
train
211,484
gwpy/gwpy
gwpy/io/cache.py
write_cache
def write_cache(cache, fobj, format=None): """Write a `list` of cache entries to a file Parameters ---------- cache : `list` of `str` The list of file paths to write fobj : `file`, `str` The open file object, or file path to write to. format : `str`, optional The format to write to, one of - `None` : format each entry using `str` - ``'lal'`` : write a LAL-format cache - ``'ffl'`` : write an FFL-format cache """ # open file if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_cache(cache, fobj2, format=format) if format is None: formatter = str elif format.lower() == "lal": formatter = _format_entry_lal elif format.lower() == "ffl": formatter = _format_entry_ffl else: raise ValueError("Unrecognised cache format {!r}".format(format)) # write file for line in map(formatter, cache): try: print(line, file=fobj) except TypeError: # bytes-mode fobj.write("{}\n".format(line).encode("utf-8"))
python
def write_cache(cache, fobj, format=None): """Write a `list` of cache entries to a file Parameters ---------- cache : `list` of `str` The list of file paths to write fobj : `file`, `str` The open file object, or file path to write to. format : `str`, optional The format to write to, one of - `None` : format each entry using `str` - ``'lal'`` : write a LAL-format cache - ``'ffl'`` : write an FFL-format cache """ # open file if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_cache(cache, fobj2, format=format) if format is None: formatter = str elif format.lower() == "lal": formatter = _format_entry_lal elif format.lower() == "ffl": formatter = _format_entry_ffl else: raise ValueError("Unrecognised cache format {!r}".format(format)) # write file for line in map(formatter, cache): try: print(line, file=fobj) except TypeError: # bytes-mode fobj.write("{}\n".format(line).encode("utf-8"))
[ "def", "write_cache", "(", "cache", ",", "fobj", ",", "format", "=", "None", ")", ":", "# open file", "if", "isinstance", "(", "fobj", ",", "string_types", ")", ":", "with", "open", "(", "fobj", ",", "'w'", ")", "as", "fobj2", ":", "return", "write_cac...
Write a `list` of cache entries to a file Parameters ---------- cache : `list` of `str` The list of file paths to write fobj : `file`, `str` The open file object, or file path to write to. format : `str`, optional The format to write to, one of - `None` : format each entry using `str` - ``'lal'`` : write a LAL-format cache - ``'ffl'`` : write an FFL-format cache
[ "Write", "a", "list", "of", "cache", "entries", "to", "a", "file" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L225-L262
train
211,485
gwpy/gwpy
gwpy/io/cache.py
is_cache
def is_cache(cache): """Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False` """ if isinstance(cache, string_types + FILE_LIKE): try: return bool(len(read_cache(cache))) except (TypeError, ValueError, UnicodeDecodeError, ImportError): # failed to parse cache return False if HAS_CACHE and isinstance(cache, Cache): return True if (isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache))): return True return False
python
def is_cache(cache): """Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False` """ if isinstance(cache, string_types + FILE_LIKE): try: return bool(len(read_cache(cache))) except (TypeError, ValueError, UnicodeDecodeError, ImportError): # failed to parse cache return False if HAS_CACHE and isinstance(cache, Cache): return True if (isinstance(cache, (list, tuple)) and cache and all(map(is_cache_entry, cache))): return True return False
[ "def", "is_cache", "(", "cache", ")", ":", "if", "isinstance", "(", "cache", ",", "string_types", "+", "FILE_LIKE", ")", ":", "try", ":", "return", "bool", "(", "len", "(", "read_cache", "(", "cache", ")", ")", ")", "except", "(", "TypeError", ",", "...
Returns `True` if ``cache`` is a readable cache file or object Parameters ---------- cache : `str`, `file`, `list` Object to detect as cache Returns ------- iscache : `bool` `True` if the input object is a cache, or a file in LAL cache format, otherwise `False`
[ "Returns", "True", "if", "cache", "is", "a", "readable", "cache", "file", "or", "object" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L265-L291
train
211,486
gwpy/gwpy
gwpy/io/cache.py
is_cache_entry
def is_cache_entry(path): """Returns `True` if ``path`` can be represented as a cache entry In practice this just tests whether the input is |LIGO-T050017|_ compliant. Parameters ---------- path : `str`, :class:`lal.utils.CacheEntry` The input to test Returns ------- isentry : `bool` `True` if ``path`` is an instance of `CacheEntry`, or can be parsed using |LIGO-T050017|_. """ if HAS_CACHEENTRY and isinstance(path, CacheEntry): return True try: file_segment(path) except (ValueError, TypeError, AttributeError): return False return True
python
def is_cache_entry(path): """Returns `True` if ``path`` can be represented as a cache entry In practice this just tests whether the input is |LIGO-T050017|_ compliant. Parameters ---------- path : `str`, :class:`lal.utils.CacheEntry` The input to test Returns ------- isentry : `bool` `True` if ``path`` is an instance of `CacheEntry`, or can be parsed using |LIGO-T050017|_. """ if HAS_CACHEENTRY and isinstance(path, CacheEntry): return True try: file_segment(path) except (ValueError, TypeError, AttributeError): return False return True
[ "def", "is_cache_entry", "(", "path", ")", ":", "if", "HAS_CACHEENTRY", "and", "isinstance", "(", "path", ",", "CacheEntry", ")", ":", "return", "True", "try", ":", "file_segment", "(", "path", ")", "except", "(", "ValueError", ",", "TypeError", ",", "Attr...
Returns `True` if ``path`` can be represented as a cache entry In practice this just tests whether the input is |LIGO-T050017|_ compliant. Parameters ---------- path : `str`, :class:`lal.utils.CacheEntry` The input to test Returns ------- isentry : `bool` `True` if ``path`` is an instance of `CacheEntry`, or can be parsed using |LIGO-T050017|_.
[ "Returns", "True", "if", "path", "can", "be", "represented", "as", "a", "cache", "entry" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L294-L316
train
211,487
gwpy/gwpy
gwpy/io/cache.py
filename_metadata
def filename_metadata(filename): """Return metadata parsed from a filename following LIGO-T050017 This method is lenient with regards to integers in the GPS start time of the file, as opposed to `gwdatafind.utils.filename_metadata`, which is strict. Parameters ---------- filename : `str` the path name of a file Returns ------- obs : `str` the observatory metadata tag : `str` the file tag segment : `gwpy.segments.Segment` the GPS ``[float, float)`` interval for this file Notes ----- `LIGO-T050017 <https://dcc.ligo.org/LIGO-T050017>`__ declares a file naming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. Examples -------- >>> from gwpy.io.cache import filename_metadata >>> filename_metadata("A-B-0-1.txt") ('A', 'B', Segment(0, 1)) >>> filename_metadata("A-B-0.456-1.345.txt") ("A", "B", Segment(0.456, 1.801)) """ from ..segments import Segment name = Path(filename).name try: obs, desc, start, dur = name.split('-') except ValueError as exc: exc.args = ('Failed to parse {!r} as LIGO-T050017-compatible ' 'filename'.format(name),) raise start = float(start) dur = dur.rsplit('.', 1)[0] while True: # recursively remove extension components try: dur = float(dur) except ValueError: if '.' not in dur: raise dur = dur.rsplit('.', 1)[0] else: break return obs, desc, Segment(start, start+dur)
python
def filename_metadata(filename): """Return metadata parsed from a filename following LIGO-T050017 This method is lenient with regards to integers in the GPS start time of the file, as opposed to `gwdatafind.utils.filename_metadata`, which is strict. Parameters ---------- filename : `str` the path name of a file Returns ------- obs : `str` the observatory metadata tag : `str` the file tag segment : `gwpy.segments.Segment` the GPS ``[float, float)`` interval for this file Notes ----- `LIGO-T050017 <https://dcc.ligo.org/LIGO-T050017>`__ declares a file naming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. Examples -------- >>> from gwpy.io.cache import filename_metadata >>> filename_metadata("A-B-0-1.txt") ('A', 'B', Segment(0, 1)) >>> filename_metadata("A-B-0.456-1.345.txt") ("A", "B", Segment(0.456, 1.801)) """ from ..segments import Segment name = Path(filename).name try: obs, desc, start, dur = name.split('-') except ValueError as exc: exc.args = ('Failed to parse {!r} as LIGO-T050017-compatible ' 'filename'.format(name),) raise start = float(start) dur = dur.rsplit('.', 1)[0] while True: # recursively remove extension components try: dur = float(dur) except ValueError: if '.' not in dur: raise dur = dur.rsplit('.', 1)[0] else: break return obs, desc, Segment(start, start+dur)
[ "def", "filename_metadata", "(", "filename", ")", ":", "from", ".", ".", "segments", "import", "Segment", "name", "=", "Path", "(", "filename", ")", ".", "name", "try", ":", "obs", ",", "desc", ",", "start", ",", "dur", "=", "name", ".", "split", "("...
Return metadata parsed from a filename following LIGO-T050017 This method is lenient with regards to integers in the GPS start time of the file, as opposed to `gwdatafind.utils.filename_metadata`, which is strict. Parameters ---------- filename : `str` the path name of a file Returns ------- obs : `str` the observatory metadata tag : `str` the file tag segment : `gwpy.segments.Segment` the GPS ``[float, float)`` interval for this file Notes ----- `LIGO-T050017 <https://dcc.ligo.org/LIGO-T050017>`__ declares a file naming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. Examples -------- >>> from gwpy.io.cache import filename_metadata >>> filename_metadata("A-B-0-1.txt") ('A', 'B', Segment(0, 1)) >>> filename_metadata("A-B-0.456-1.345.txt") ("A", "B", Segment(0.456, 1.801))
[ "Return", "metadata", "parsed", "from", "a", "filename", "following", "LIGO", "-", "T050017" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L321-L377
train
211,488
gwpy/gwpy
gwpy/io/cache.py
file_segment
def file_segment(filename): """Return the data segment for a filename following T050017 Parameters --------- filename : `str`, :class:`~lal.utils.CacheEntry` the path name of a file Returns ------- segment : `~gwpy.segments.Segment` the ``[start, stop)`` GPS segment covered by the given file Notes ----- |LIGO-T050017|_ declares a filenaming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. """ from ..segments import Segment try: # CacheEntry return Segment(filename.segment) except AttributeError: # file path (str) return filename_metadata(filename)[2]
python
def file_segment(filename): """Return the data segment for a filename following T050017 Parameters --------- filename : `str`, :class:`~lal.utils.CacheEntry` the path name of a file Returns ------- segment : `~gwpy.segments.Segment` the ``[start, stop)`` GPS segment covered by the given file Notes ----- |LIGO-T050017|_ declares a filenaming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details. """ from ..segments import Segment try: # CacheEntry return Segment(filename.segment) except AttributeError: # file path (str) return filename_metadata(filename)[2]
[ "def", "file_segment", "(", "filename", ")", ":", "from", ".", ".", "segments", "import", "Segment", "try", ":", "# CacheEntry", "return", "Segment", "(", "filename", ".", "segment", ")", "except", "AttributeError", ":", "# file path (str)", "return", "filename_...
Return the data segment for a filename following T050017 Parameters --------- filename : `str`, :class:`~lal.utils.CacheEntry` the path name of a file Returns ------- segment : `~gwpy.segments.Segment` the ``[start, stop)`` GPS segment covered by the given file Notes ----- |LIGO-T050017|_ declares a filenaming convention that includes documenting the GPS start integer and integer duration of a file, see that document for more details.
[ "Return", "the", "data", "segment", "for", "a", "filename", "following", "T050017" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L380-L403
train
211,489
gwpy/gwpy
gwpy/io/cache.py
flatten
def flatten(*caches): """Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input. """ return list(OrderedDict.fromkeys(e for c in caches for e in c))
python
def flatten(*caches): """Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input. """ return list(OrderedDict.fromkeys(e for c in caches for e in c))
[ "def", "flatten", "(", "*", "caches", ")", ":", "return", "list", "(", "OrderedDict", ".", "fromkeys", "(", "e", "for", "c", "in", "caches", "for", "e", "in", "c", ")", ")" ]
Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input.
[ "Flatten", "a", "nested", "list", "of", "cache", "entries" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L427-L442
train
211,490
gwpy/gwpy
gwpy/io/cache.py
find_contiguous
def find_contiguous(*caches): """Separate one or more cache entry lists into time-contiguous sub-lists Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- caches : `iter` of `list` an interable yielding each contiguous cache """ flat = flatten(*caches) for segment in cache_segments(flat): yield sieve(flat, segment=segment)
python
def find_contiguous(*caches): """Separate one or more cache entry lists into time-contiguous sub-lists Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- caches : `iter` of `list` an interable yielding each contiguous cache """ flat = flatten(*caches) for segment in cache_segments(flat): yield sieve(flat, segment=segment)
[ "def", "find_contiguous", "(", "*", "caches", ")", ":", "flat", "=", "flatten", "(", "*", "caches", ")", "for", "segment", "in", "cache_segments", "(", "flat", ")", ":", "yield", "sieve", "(", "flat", ",", "segment", "=", "segment", ")" ]
Separate one or more cache entry lists into time-contiguous sub-lists Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- caches : `iter` of `list` an interable yielding each contiguous cache
[ "Separate", "one", "or", "more", "cache", "entry", "lists", "into", "time", "-", "contiguous", "sub", "-", "lists" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L445-L461
train
211,491
gwpy/gwpy
gwpy/io/cache.py
sieve
def sieve(cache, segment=None): """Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against. """ return type(cache)(e for e in cache if segment.intersects(file_segment(e)))
python
def sieve(cache, segment=None): """Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against. """ return type(cache)(e for e in cache if segment.intersects(file_segment(e)))
[ "def", "sieve", "(", "cache", ",", "segment", "=", "None", ")", ":", "return", "type", "(", "cache", ")", "(", "e", "for", "e", "in", "cache", "if", "segment", ".", "intersects", "(", "file_segment", "(", "e", ")", ")", ")" ]
Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against.
[ "Filter", "the", "cache", "to", "find", "those", "entries", "that", "overlap", "segment" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/cache.py#L464-L475
train
211,492
gwpy/gwpy
gwpy/cli/qtransform.py
Qtransform.arg_qxform
def arg_qxform(cls, parser): """Add an `~argparse.ArgumentGroup` for Q-transform options """ group = parser.add_argument_group('Q-transform options') group.add_argument('--plot', nargs='+', type=float, default=[.5], help='One or more times to plot') group.add_argument('--frange', nargs=2, type=float, help='Frequency range to plot') group.add_argument('--qrange', nargs=2, type=float, help='Search Q range') group.add_argument('--nowhiten', action='store_true', help='do not whiten input before transform')
python
def arg_qxform(cls, parser): """Add an `~argparse.ArgumentGroup` for Q-transform options """ group = parser.add_argument_group('Q-transform options') group.add_argument('--plot', nargs='+', type=float, default=[.5], help='One or more times to plot') group.add_argument('--frange', nargs=2, type=float, help='Frequency range to plot') group.add_argument('--qrange', nargs=2, type=float, help='Search Q range') group.add_argument('--nowhiten', action='store_true', help='do not whiten input before transform')
[ "def", "arg_qxform", "(", "cls", ",", "parser", ")", ":", "group", "=", "parser", ".", "add_argument_group", "(", "'Q-transform options'", ")", "group", ".", "add_argument", "(", "'--plot'", ",", "nargs", "=", "'+'", ",", "type", "=", "float", ",", "defaul...
Add an `~argparse.ArgumentGroup` for Q-transform options
[ "Add", "an", "~argparse", ".", "ArgumentGroup", "for", "Q", "-", "transform", "options" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/qtransform.py#L92-L104
train
211,493
gwpy/gwpy
gwpy/cli/qtransform.py
Qtransform.get_title
def get_title(self): """Default title for plot """ def fformat(x): # float format if isinstance(x, (list, tuple)): return '[{0}]'.format(', '.join(map(fformat, x))) if isinstance(x, Quantity): x = x.value elif isinstance(x, str): warnings.warn('WARNING: fformat called with a' + ' string. This has ' + 'been depricated and may disappear ' + 'in a future release.') x = float(x) return '{0:.2f}'.format(x) bits = [('Q', fformat(self.result.q))] bits.append(('tres', '{:.3g}'.format(self.qxfrm_args['tres']))) if self.qxfrm_args.get('qrange'): bits.append(('q-range', fformat(self.qxfrm_args['qrange']))) if self.qxfrm_args['whiten']: bits.append(('whitened',)) bits.extend([ ('f-range', fformat(self.result.yspan)), ('e-range', '[{:.3g}, {:.3g}]'.format(self.result.min(), self.result.max())), ]) return ', '.join([': '.join(bit) for bit in bits])
python
def get_title(self): """Default title for plot """ def fformat(x): # float format if isinstance(x, (list, tuple)): return '[{0}]'.format(', '.join(map(fformat, x))) if isinstance(x, Quantity): x = x.value elif isinstance(x, str): warnings.warn('WARNING: fformat called with a' + ' string. This has ' + 'been depricated and may disappear ' + 'in a future release.') x = float(x) return '{0:.2f}'.format(x) bits = [('Q', fformat(self.result.q))] bits.append(('tres', '{:.3g}'.format(self.qxfrm_args['tres']))) if self.qxfrm_args.get('qrange'): bits.append(('q-range', fformat(self.qxfrm_args['qrange']))) if self.qxfrm_args['whiten']: bits.append(('whitened',)) bits.extend([ ('f-range', fformat(self.result.yspan)), ('e-range', '[{:.3g}, {:.3g}]'.format(self.result.min(), self.result.max())), ]) return ', '.join([': '.join(bit) for bit in bits])
[ "def", "get_title", "(", "self", ")", ":", "def", "fformat", "(", "x", ")", ":", "# float format", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "'[{0}]'", ".", "format", "(", "', '", ".", "join", "(", "map"...
Default title for plot
[ "Default", "title", "for", "plot" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/qtransform.py#L163-L190
train
211,494
gwpy/gwpy
gwpy/cli/qtransform.py
Qtransform.get_spectrogram
def get_spectrogram(self): """Worked on a single timesharing and generates a single Q-transform spectrogram""" args = self.args asd = self.timeseries[0].asd().value if (asd.min() == 0): self.log(0, 'Input data has a zero in ASD. ' 'Q-transform not possible.') self.got_error = True qtrans = None else: gps = self.qxfrm_args['gps'] outseg = Segment(gps, gps).protract(args.plot[self.plot_num]) # This section tries to optimize the amount of data that is # processed and the time resolution needed to create a good # image. NB:For each time span specified # NB: the timeseries h enough data for the longest plot inseg = outseg.protract(4) & self.timeseries[0].span proc_ts = self.timeseries[0].crop(*inseg) # time resolution is calculated to provide about 4 times # the number of output pixels for interpolation tres = float(outseg.end - outseg.start) / 4 / self.args.nx self.qxfrm_args['tres'] = tres self.qxfrm_args['search'] = int(len(proc_ts) * proc_ts.dt.value) self.log(3, 'Q-transform arguments:') self.log(3, '{0:>15s} = {1}'.format('outseg', outseg)) for key in sorted(self.qxfrm_args): self.log(3, '{0:>15s} = {1}'.format(key, self.qxfrm_args[key])) qtrans = proc_ts.q_transform(outseg=outseg, **self.qxfrm_args) if args.ymin is None: # set before Spectrogram.make_plot args.ymin = qtrans.yspan[0] return qtrans
python
def get_spectrogram(self): """Worked on a single timesharing and generates a single Q-transform spectrogram""" args = self.args asd = self.timeseries[0].asd().value if (asd.min() == 0): self.log(0, 'Input data has a zero in ASD. ' 'Q-transform not possible.') self.got_error = True qtrans = None else: gps = self.qxfrm_args['gps'] outseg = Segment(gps, gps).protract(args.plot[self.plot_num]) # This section tries to optimize the amount of data that is # processed and the time resolution needed to create a good # image. NB:For each time span specified # NB: the timeseries h enough data for the longest plot inseg = outseg.protract(4) & self.timeseries[0].span proc_ts = self.timeseries[0].crop(*inseg) # time resolution is calculated to provide about 4 times # the number of output pixels for interpolation tres = float(outseg.end - outseg.start) / 4 / self.args.nx self.qxfrm_args['tres'] = tres self.qxfrm_args['search'] = int(len(proc_ts) * proc_ts.dt.value) self.log(3, 'Q-transform arguments:') self.log(3, '{0:>15s} = {1}'.format('outseg', outseg)) for key in sorted(self.qxfrm_args): self.log(3, '{0:>15s} = {1}'.format(key, self.qxfrm_args[key])) qtrans = proc_ts.q_transform(outseg=outseg, **self.qxfrm_args) if args.ymin is None: # set before Spectrogram.make_plot args.ymin = qtrans.yspan[0] return qtrans
[ "def", "get_spectrogram", "(", "self", ")", ":", "args", "=", "self", ".", "args", "asd", "=", "self", ".", "timeseries", "[", "0", "]", ".", "asd", "(", ")", ".", "value", "if", "(", "asd", ".", "min", "(", ")", "==", "0", ")", ":", "self", ...
Worked on a single timesharing and generates a single Q-transform spectrogram
[ "Worked", "on", "a", "single", "timesharing", "and", "generates", "a", "single", "Q", "-", "transform", "spectrogram" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/qtransform.py#L192-L230
train
211,495
gwpy/gwpy
gwpy/io/ligolw.py
_ligotimegps
def _ligotimegps(s, ns=0): """Catch TypeError and cast `s` and `ns` to `int` """ from lal import LIGOTimeGPS try: return LIGOTimeGPS(s, ns) except TypeError: return LIGOTimeGPS(int(s), int(ns))
python
def _ligotimegps(s, ns=0): """Catch TypeError and cast `s` and `ns` to `int` """ from lal import LIGOTimeGPS try: return LIGOTimeGPS(s, ns) except TypeError: return LIGOTimeGPS(int(s), int(ns))
[ "def", "_ligotimegps", "(", "s", ",", "ns", "=", "0", ")", ":", "from", "lal", "import", "LIGOTimeGPS", "try", ":", "return", "LIGOTimeGPS", "(", "s", ",", "ns", ")", "except", "TypeError", ":", "return", "LIGOTimeGPS", "(", "int", "(", "s", ")", ","...
Catch TypeError and cast `s` and `ns` to `int`
[ "Catch", "TypeError", "and", "cast", "s", "and", "ns", "to", "int" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L109-L116
train
211,496
gwpy/gwpy
gwpy/io/ligolw.py
patch_ligotimegps
def patch_ligotimegps(module="ligo.lw.lsctables"): """Context manager to on-the-fly patch LIGOTimeGPS to accept all int types """ module = import_module(module) orig = module.LIGOTimeGPS module.LIGOTimeGPS = _ligotimegps try: yield finally: module.LIGOTimeGPS = orig
python
def patch_ligotimegps(module="ligo.lw.lsctables"): """Context manager to on-the-fly patch LIGOTimeGPS to accept all int types """ module = import_module(module) orig = module.LIGOTimeGPS module.LIGOTimeGPS = _ligotimegps try: yield finally: module.LIGOTimeGPS = orig
[ "def", "patch_ligotimegps", "(", "module", "=", "\"ligo.lw.lsctables\"", ")", ":", "module", "=", "import_module", "(", "module", ")", "orig", "=", "module", ".", "LIGOTimeGPS", "module", ".", "LIGOTimeGPS", "=", "_ligotimegps", "try", ":", "yield", "finally", ...
Context manager to on-the-fly patch LIGOTimeGPS to accept all int types
[ "Context", "manager", "to", "on", "-", "the", "-", "fly", "patch", "LIGOTimeGPS", "to", "accept", "all", "int", "types" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L120-L129
train
211,497
gwpy/gwpy
gwpy/io/ligolw.py
get_partial_contenthandler
def get_partial_contenthandler(element): """Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element` """ from ligo.lw.ligolw import PartialLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name == element.tagName return build_content_handler(PartialLIGOLWContentHandler, _element_filter)
python
def get_partial_contenthandler(element): """Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element` """ from ligo.lw.ligolw import PartialLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name == element.tagName return build_content_handler(PartialLIGOLWContentHandler, _element_filter)
[ "def", "get_partial_contenthandler", "(", "element", ")", ":", "from", "ligo", ".", "lw", ".", "ligolw", "import", "PartialLIGOLWContentHandler", "from", "ligo", ".", "lw", ".", "table", "import", "Table", "if", "issubclass", "(", "element", ",", "Table", ")",...
Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element`
[ "Build", "a", "PartialLIGOLWContentHandler", "to", "read", "only", "this", "element" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L135-L159
train
211,498
gwpy/gwpy
gwpy/io/ligolw.py
get_filtering_contenthandler
def get_filtering_contenthandler(element): """Build a `FilteringLIGOLWContentHandler` to exclude this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element to exclude (and its children) Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.FilteringLIGOLWContentHandler` to exclude an element and its children """ from ligo.lw.ligolw import FilteringLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return ~element.CheckProperties(name, attrs) else: def _element_filter(name, _): # pylint: disable=unused-argument return name != element.tagName return build_content_handler(FilteringLIGOLWContentHandler, _element_filter)
python
def get_filtering_contenthandler(element): """Build a `FilteringLIGOLWContentHandler` to exclude this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element to exclude (and its children) Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.FilteringLIGOLWContentHandler` to exclude an element and its children """ from ligo.lw.ligolw import FilteringLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return ~element.CheckProperties(name, attrs) else: def _element_filter(name, _): # pylint: disable=unused-argument return name != element.tagName return build_content_handler(FilteringLIGOLWContentHandler, _element_filter)
[ "def", "get_filtering_contenthandler", "(", "element", ")", ":", "from", "ligo", ".", "lw", ".", "ligolw", "import", "FilteringLIGOLWContentHandler", "from", "ligo", ".", "lw", ".", "table", "import", "Table", "if", "issubclass", "(", "element", ",", "Table", ...
Build a `FilteringLIGOLWContentHandler` to exclude this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element to exclude (and its children) Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.FilteringLIGOLWContentHandler` to exclude an element and its children
[ "Build", "a", "FilteringLIGOLWContentHandler", "to", "exclude", "this", "element" ]
7a92b917e7dd2d99b15895293a1fa1d66cdb210a
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L163-L190
train
211,499