idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
22,600 | def get_ylabel ( self ) : units = self . units if len ( units ) == 1 and str ( units [ 0 ] ) == '' : return '' if len ( units ) == 1 and self . usetex : return units [ 0 ] . to_string ( 'latex' ) elif len ( units ) == 1 : return units [ 0 ] . to_string ( ) elif len ( units ) > 1 : return 'Multiple units' return super ( TimeSeries , self ) . get_ylabel ( ) | Text for y - axis label check if channel defines it |
22,601 | def generate_fft_plan ( length , level = None , dtype = 'float64' , forward = True ) : from . . . utils . lal import ( find_typed_function , to_lal_type_str ) laltype = to_lal_type_str ( dtype ) key = ( length , bool ( forward ) , laltype ) try : return LAL_FFTPLANS [ key ] except KeyError : create = find_typed_function ( dtype , 'Create' , 'FFTPlan' ) if level is None : level = LAL_FFTPLAN_LEVEL LAL_FFTPLANS [ key ] = create ( length , int ( bool ( forward ) ) , level ) return LAL_FFTPLANS [ key ] | Build a REAL8FFTPlan for a fast Fourier transform . |
22,602 | def generate_window ( length , window = None , dtype = 'float64' ) : from . . . utils . lal import ( find_typed_function , to_lal_type_str ) if window is None : window = ( 'kaiser' , 24 ) laltype = to_lal_type_str ( dtype ) key = ( length , str ( window ) , laltype ) try : return LAL_WINDOWS [ key ] except KeyError : if isinstance ( window , ( list , tuple ) ) : window , beta = window else : beta = 0 window = canonical_name ( window ) create = find_typed_function ( dtype , 'CreateNamed' , 'Window' ) LAL_WINDOWS [ key ] = create ( window , beta , length ) return LAL_WINDOWS [ key ] | Generate a time - domain window for use in a LAL FFT |
22,603 | def window_from_array ( array ) : from . . . utils . lal import ( find_typed_function ) dtype = array . dtype seq = find_typed_function ( dtype , 'Create' , 'Sequence' ) ( array . size ) seq . data = array return find_typed_function ( dtype , 'Create' , 'WindowFromSequence' ) ( seq ) | Convert a numpy . ndarray into a LAL Window object |
22,604 | def _lal_spectrum ( timeseries , segmentlength , noverlap = None , method = 'welch' , window = None , plan = None ) : import lal from . . . utils . lal import find_typed_function if noverlap is None : noverlap = int ( segmentlength // 2 ) stride = segmentlength - noverlap if window is None : window = generate_window ( segmentlength , dtype = timeseries . dtype ) if plan is None : plan = generate_fft_plan ( segmentlength , dtype = timeseries . dtype ) method = method . lower ( ) size = timeseries . size numsegs = 1 + int ( ( size - segmentlength ) / stride ) if method == 'median-mean' and numsegs % 2 : numsegs -= 1 if not numsegs : raise ValueError ( "Cannot calculate median-mean spectrum with " "this small a TimeSeries." ) required = int ( ( numsegs - 1 ) * stride + segmentlength ) if size != required : warnings . warn ( "Data array is the wrong size for the correct number " "of averages given the input parameters. The trailing " "%d samples will not be used in this calculation." % ( size - required ) ) timeseries = timeseries [ : required ] create = find_typed_function ( timeseries . dtype , 'Create' , 'FrequencySeries' ) lalfs = create ( timeseries . name , lal . LIGOTimeGPS ( timeseries . epoch . gps ) , 0 , 1 / segmentlength , lal . StrainUnit , int ( segmentlength // 2 + 1 ) ) methodname = '' . join ( map ( str . title , re . split ( '[-_]' , method ) ) ) spec_func = find_typed_function ( timeseries . dtype , '' , 'AverageSpectrum{}' . format ( methodname ) ) spec_func ( lalfs , timeseries . to_lal ( ) , segmentlength , stride , window , plan ) spec = FrequencySeries . from_lal ( lalfs ) spec . name = timeseries . name spec . channel = timeseries . channel spec . override_unit ( scale_timeseries_unit ( timeseries . unit , scaling = 'density' ) ) return spec | Generate a PSD FrequencySeries using |lal|_ |
22,605 | def welch ( timeseries , segmentlength , noverlap = None , window = None , plan = None ) : return _lal_spectrum ( timeseries , segmentlength , noverlap = noverlap , method = 'welch' , window = window , plan = plan ) | Calculate an PSD of this TimeSeries using Welch s method |
22,606 | def bartlett ( timeseries , segmentlength , noverlap = None , window = None , plan = None ) : return _lal_spectrum ( timeseries , segmentlength , noverlap = 0 , method = 'welch' , window = window , plan = plan ) | Calculate an PSD of this TimeSeries using Bartlett s method |
22,607 | def median ( timeseries , segmentlength , noverlap = None , window = None , plan = None ) : return _lal_spectrum ( timeseries , segmentlength , noverlap = noverlap , method = 'median' , window = window , plan = plan ) | Calculate a PSD of this TimeSeries using a median average method |
22,608 | def median_mean ( timeseries , segmentlength , noverlap = None , window = None , plan = None ) : return _lal_spectrum ( timeseries , segmentlength , noverlap = noverlap , method = 'median-mean' , window = window , plan = plan ) | Calculate a PSD of this TimeSeries using a median - mean average method |
22,609 | def open_data_source ( source ) : if isinstance ( source , FILE_LIKE ) : source = source . name if isinstance ( source , CacheEntry ) : source = source . path if ( isinstance ( source , string_types ) and source . endswith ( ( '.lcf' , '.cache' ) ) ) : return lalframe . FrStreamCacheOpen ( lal . CacheImport ( source ) ) if isinstance ( source , list ) and is_cache ( source ) : cache = lal . Cache ( ) for entry in file_list ( source ) : cache = lal . CacheMerge ( cache , lal . CacheGlob ( * os . path . split ( entry ) ) ) return lalframe . FrStreamCacheOpen ( cache ) if isinstance ( source , lal . Cache ) : return lalframe . FrStreamCacheOpen ( source ) if isinstance ( source , string_types ) : return lalframe . FrStreamOpen ( * map ( str , os . path . split ( source ) ) ) raise ValueError ( "Don't know how to open data source of type %r" % type ( source ) ) | Open a GWF file source into a lalframe . XLALFrStream object |
22,610 | def get_stream_duration ( stream ) : epoch = lal . LIGOTimeGPS ( stream . epoch . gpsSeconds , stream . epoch . gpsNanoSeconds ) nfile = stream . cache . length duration = 0 for dummy_i in range ( nfile ) : for dummy_j in range ( lalframe . FrFileQueryNFrame ( stream . file ) ) : duration += lalframe . FrFileQueryDt ( stream . file , 0 ) lalframe . FrStreamNext ( stream ) lalframe . FrStreamSeek ( stream , epoch ) return duration | Find the duration of time stored in a frame stream |
22,611 | def read ( source , channels , start = None , end = None , series_class = TimeSeries , scaled = None ) : if scaled is not None : warnings . warn ( "the `scaled` keyword argument is not supported by lalframe, " "if you require ADC scaling, please install " "python-ldas-tools-framecpp" , ) stream = open_data_source ( source ) epoch = lal . LIGOTimeGPS ( stream . epoch . gpsSeconds , stream . epoch . gpsNanoSeconds ) streamdur = get_stream_duration ( stream ) if start is None : start = epoch else : start = max ( epoch , lalutils . to_lal_ligotimegps ( start ) ) if end is None : offset = float ( start - epoch ) duration = streamdur - offset else : end = min ( epoch + streamdur , lalutils . to_lal_ligotimegps ( end ) ) duration = float ( end - start ) out = series_class . DictClass ( ) for name in channels : out [ name ] = series_class . from_lal ( _read_channel ( stream , str ( name ) , start = start , duration = duration ) , copy = False ) lalframe . FrStreamSeek ( stream , epoch ) return out | Read data from one or more GWF files using the LALFrame API |
22,612 | def write ( tsdict , outfile , start = None , end = None , name = 'gwpy' , run = 0 ) : if not start : start = list ( tsdict . values ( ) ) [ 0 ] . xspan [ 0 ] if not end : end = list ( tsdict . values ( ) ) [ 0 ] . xspan [ 1 ] duration = end - start detectors = 0 for series in tsdict . values ( ) : try : idx = list ( lalutils . LAL_DETECTORS . keys ( ) ) . index ( series . channel . ifo ) detectors |= 1 << 2 * idx except ( KeyError , AttributeError ) : continue frame = lalframe . FrameNew ( start , duration , name , run , 0 , detectors ) for series in tsdict . values ( ) : lalseries = series . to_lal ( ) add_ = lalutils . find_typed_function ( series . dtype , 'FrameAdd' , 'TimeSeriesProcData' , module = lalframe ) add_ ( frame , lalseries ) lalframe . FrameWrite ( frame , outfile ) | Write data to a GWF file using the LALFrame API |
22,613 | def segment_content_handler ( ) : from ligo . lw . lsctables import ( SegmentTable , SegmentDefTable , SegmentSumTable ) from ligo . lw . ligolw import PartialLIGOLWContentHandler def _filter ( name , attrs ) : return reduce ( operator . or_ , [ table_ . CheckProperties ( name , attrs ) for table_ in ( SegmentTable , SegmentDefTable , SegmentSumTable ) ] ) return build_content_handler ( PartialLIGOLWContentHandler , _filter ) | Build a ~xml . sax . handlers . ContentHandler to read segment XML tables |
22,614 | def read_ligolw_dict ( source , names = None , coalesce = False , ** kwargs ) : xmldoc = read_ligolw ( source , contenthandler = segment_content_handler ( ) ) with patch_ligotimegps ( type ( xmldoc . childNodes [ 0 ] ) . __module__ ) : out = DataQualityDict . from_ligolw_tables ( * xmldoc . childNodes , names = names , ** kwargs ) if coalesce : for flag in out : out [ flag ] . coalesce ( ) return out | Read segments for the given flag from the LIGO_LW XML file . |
22,615 | def read_ligolw_flag ( source , name = None , ** kwargs ) : name = [ name ] if name is not None else None return list ( read_ligolw_dict ( source , names = name , ** kwargs ) . values ( ) ) [ 0 ] | Read a single DataQualityFlag from a LIGO_LW XML file |
22,616 | def write_ligolw ( flags , target , attrs = None , ilwdchar_compat = None , ** kwargs ) : if isinstance ( flags , DataQualityFlag ) : flags = DataQualityDict ( { flags . name : flags } ) return write_tables ( target , flags . to_ligolw_tables ( ilwdchar_compat = ilwdchar_compat , ** attrs or dict ( ) ) , ** kwargs ) | Write this DataQualityFlag to the given LIGO_LW Document |
22,617 | def scale_timeseries_unit ( tsunit , scaling = 'density' ) : if scaling == 'density' : baseunit = units . Hertz elif scaling == 'spectrum' : baseunit = units . dimensionless_unscaled else : raise ValueError ( "Unknown scaling: %r" % scaling ) if tsunit : specunit = tsunit ** 2 / baseunit else : specunit = baseunit ** - 1 return specunit | Scale the unit of a TimeSeries to match that of a FrequencySeries |
22,618 | def _iter_cache ( cachefile , gpstype = LIGOTimeGPS ) : try : path = os . path . abspath ( cachefile . name ) except AttributeError : path = None for line in cachefile : try : yield _CacheEntry . parse ( line , gpstype = LIGOTimeGPS ) except ValueError : parts = line . split ( ) if len ( parts ) == 3 and os . path . abspath ( parts [ 0 ] ) != path : with open ( parts [ 0 ] , 'r' ) as cache2 : for entry in _iter_cache ( cache2 ) : yield entry else : raise | Internal method that yields a _CacheEntry for each line in the file |
22,619 | def read_cache ( cachefile , coltype = LIGOTimeGPS , sort = None , segment = None ) : if not isinstance ( cachefile , FILE_LIKE ) : with open ( file_path ( cachefile ) , 'r' ) as fobj : return read_cache ( fobj , coltype = coltype , sort = sort , segment = segment ) cache = [ x . path for x in _iter_cache ( cachefile , gpstype = coltype ) ] if segment : cache = sieve ( cache , segment = segment ) if sort : cache . sort ( key = sort ) return cache | Read a LAL - or FFL - format cache file as a list of file paths |
22,620 | def write_cache ( cache , fobj , format = None ) : if isinstance ( fobj , string_types ) : with open ( fobj , 'w' ) as fobj2 : return write_cache ( cache , fobj2 , format = format ) if format is None : formatter = str elif format . lower ( ) == "lal" : formatter = _format_entry_lal elif format . lower ( ) == "ffl" : formatter = _format_entry_ffl else : raise ValueError ( "Unrecognised cache format {!r}" . format ( format ) ) for line in map ( formatter , cache ) : try : print ( line , file = fobj ) except TypeError : fobj . write ( "{}\n" . format ( line ) . encode ( "utf-8" ) ) | Write a list of cache entries to a file |
22,621 | def is_cache ( cache ) : if isinstance ( cache , string_types + FILE_LIKE ) : try : return bool ( len ( read_cache ( cache ) ) ) except ( TypeError , ValueError , UnicodeDecodeError , ImportError ) : return False if HAS_CACHE and isinstance ( cache , Cache ) : return True if ( isinstance ( cache , ( list , tuple ) ) and cache and all ( map ( is_cache_entry , cache ) ) ) : return True return False | Returns True if cache is a readable cache file or object |
22,622 | def is_cache_entry ( path ) : if HAS_CACHEENTRY and isinstance ( path , CacheEntry ) : return True try : file_segment ( path ) except ( ValueError , TypeError , AttributeError ) : return False return True | Returns True if path can be represented as a cache entry |
22,623 | def filename_metadata ( filename ) : from . . segments import Segment name = Path ( filename ) . name try : obs , desc , start , dur = name . split ( '-' ) except ValueError as exc : exc . args = ( 'Failed to parse {!r} as LIGO-T050017-compatible ' 'filename' . format ( name ) , ) raise start = float ( start ) dur = dur . rsplit ( '.' , 1 ) [ 0 ] while True : try : dur = float ( dur ) except ValueError : if '.' not in dur : raise dur = dur . rsplit ( '.' , 1 ) [ 0 ] else : break return obs , desc , Segment ( start , start + dur ) | Return metadata parsed from a filename following LIGO - T050017 |
22,624 | def file_segment ( filename ) : from . . segments import Segment try : return Segment ( filename . segment ) except AttributeError : return filename_metadata ( filename ) [ 2 ] | Return the data segment for a filename following T050017 |
22,625 | def flatten ( * caches ) : return list ( OrderedDict . fromkeys ( e for c in caches for e in c ) ) | Flatten a nested list of cache entries |
22,626 | def find_contiguous ( * caches ) : flat = flatten ( * caches ) for segment in cache_segments ( flat ) : yield sieve ( flat , segment = segment ) | Separate one or more cache entry lists into time - contiguous sub - lists |
22,627 | def sieve ( cache , segment = None ) : return type ( cache ) ( e for e in cache if segment . intersects ( file_segment ( e ) ) ) | Filter the cache to find those entries that overlap segment |
22,628 | def arg_qxform ( cls , parser ) : group = parser . add_argument_group ( 'Q-transform options' ) group . add_argument ( '--plot' , nargs = '+' , type = float , default = [ .5 ] , help = 'One or more times to plot' ) group . add_argument ( '--frange' , nargs = 2 , type = float , help = 'Frequency range to plot' ) group . add_argument ( '--qrange' , nargs = 2 , type = float , help = 'Search Q range' ) group . add_argument ( '--nowhiten' , action = 'store_true' , help = 'do not whiten input before transform' ) | Add an ~argparse . ArgumentGroup for Q - transform options |
22,629 | def get_title ( self ) : def fformat ( x ) : if isinstance ( x , ( list , tuple ) ) : return '[{0}]' . format ( ', ' . join ( map ( fformat , x ) ) ) if isinstance ( x , Quantity ) : x = x . value elif isinstance ( x , str ) : warnings . warn ( 'WARNING: fformat called with a' + ' string. This has ' + 'been depricated and may disappear ' + 'in a future release.' ) x = float ( x ) return '{0:.2f}' . format ( x ) bits = [ ( 'Q' , fformat ( self . result . q ) ) ] bits . append ( ( 'tres' , '{:.3g}' . format ( self . qxfrm_args [ 'tres' ] ) ) ) if self . qxfrm_args . get ( 'qrange' ) : bits . append ( ( 'q-range' , fformat ( self . qxfrm_args [ 'qrange' ] ) ) ) if self . qxfrm_args [ 'whiten' ] : bits . append ( ( 'whitened' , ) ) bits . extend ( [ ( 'f-range' , fformat ( self . result . yspan ) ) , ( 'e-range' , '[{:.3g}, {:.3g}]' . format ( self . result . min ( ) , self . result . max ( ) ) ) , ] ) return ', ' . join ( [ ': ' . join ( bit ) for bit in bits ] ) | Default title for plot |
22,630 | def get_spectrogram ( self ) : args = self . args asd = self . timeseries [ 0 ] . asd ( ) . value if ( asd . min ( ) == 0 ) : self . log ( 0 , 'Input data has a zero in ASD. ' 'Q-transform not possible.' ) self . got_error = True qtrans = None else : gps = self . qxfrm_args [ 'gps' ] outseg = Segment ( gps , gps ) . protract ( args . plot [ self . plot_num ] ) inseg = outseg . protract ( 4 ) & self . timeseries [ 0 ] . span proc_ts = self . timeseries [ 0 ] . crop ( * inseg ) tres = float ( outseg . end - outseg . start ) / 4 / self . args . nx self . qxfrm_args [ 'tres' ] = tres self . qxfrm_args [ 'search' ] = int ( len ( proc_ts ) * proc_ts . dt . value ) self . log ( 3 , 'Q-transform arguments:' ) self . log ( 3 , '{0:>15s} = {1}' . format ( 'outseg' , outseg ) ) for key in sorted ( self . qxfrm_args ) : self . log ( 3 , '{0:>15s} = {1}' . format ( key , self . qxfrm_args [ key ] ) ) qtrans = proc_ts . q_transform ( outseg = outseg , ** self . qxfrm_args ) if args . ymin is None : args . ymin = qtrans . yspan [ 0 ] return qtrans | Worked on a single timesharing and generates a single Q - transform spectrogram |
22,631 | def _ligotimegps ( s , ns = 0 ) : from lal import LIGOTimeGPS try : return LIGOTimeGPS ( s , ns ) except TypeError : return LIGOTimeGPS ( int ( s ) , int ( ns ) ) | Catch TypeError and cast s and ns to int |
22,632 | def patch_ligotimegps ( module = "ligo.lw.lsctables" ) : module = import_module ( module ) orig = module . LIGOTimeGPS module . LIGOTimeGPS = _ligotimegps try : yield finally : module . LIGOTimeGPS = orig | Context manager to on - the - fly patch LIGOTimeGPS to accept all int types |
22,633 | def get_partial_contenthandler ( element ) : from ligo . lw . ligolw import PartialLIGOLWContentHandler from ligo . lw . table import Table if issubclass ( element , Table ) : def _element_filter ( name , attrs ) : return element . CheckProperties ( name , attrs ) else : def _element_filter ( name , _ ) : return name == element . tagName return build_content_handler ( PartialLIGOLWContentHandler , _element_filter ) | Build a PartialLIGOLWContentHandler to read only this element |
22,634 | def get_filtering_contenthandler ( element ) : from ligo . lw . ligolw import FilteringLIGOLWContentHandler from ligo . lw . table import Table if issubclass ( element , Table ) : def _element_filter ( name , attrs ) : return ~ element . CheckProperties ( name , attrs ) else : def _element_filter ( name , _ ) : return name != element . tagName return build_content_handler ( FilteringLIGOLWContentHandler , _element_filter ) | Build a FilteringLIGOLWContentHandler to exclude this element |
22,635 | def build_content_handler ( parent , filter_func ) : from ligo . lw . lsctables import use_in class _ContentHandler ( parent ) : def __init__ ( self , document ) : super ( _ContentHandler , self ) . __init__ ( document , filter_func ) return use_in ( _ContentHandler ) | Build a ~xml . sax . handler . ContentHandler with a given filter |
22,636 | def read_ligolw ( source , contenthandler = LIGOLWContentHandler , ** kwargs ) : from ligo . lw . ligolw import Document from ligo . lw import types from ligo . lw . lsctables import use_in from ligo . lw . utils import ( load_url , ligolw_add ) topytype = types . ToPyType . copy ( ) for key in types . ToPyType : if key in types . ToNumPyType : types . ToPyType [ key ] = numpy . dtype ( types . ToNumPyType [ key ] ) . type contenthandler = use_in ( contenthandler ) source = file_list ( source ) try : if len ( source ) == 1 : return load_url ( source [ 0 ] , contenthandler = contenthandler , ** kwargs ) return ligolw_add . ligolw_add ( Document ( ) , source , contenthandler = contenthandler , ** kwargs ) except LigolwElementError as exc : if LIGO_LW_COMPAT_ERROR . search ( str ( exc ) ) : try : return read_ligolw ( source , contenthandler = contenthandler , ilwdchar_compat = True , ** kwargs ) except Exception : pass raise finally : types . ToPyType = topytype | Read one or more LIGO_LW format files |
22,637 | def with_read_ligolw ( func = None , contenthandler = None ) : def decorator ( func_ ) : @ wraps ( func_ ) def decorated_func ( source , * args , ** kwargs ) : from ligo . lw . ligolw import Document from glue . ligolw . ligolw import Document as GlueDocument if not isinstance ( source , ( Document , GlueDocument ) ) : read_kw = { 'contenthandler' : kwargs . pop ( 'contenthandler' , contenthandler ) , 'verbose' : kwargs . pop ( 'verbose' , False ) , } return func_ ( read_ligolw ( source , ** read_kw ) , * args , ** kwargs ) return func_ ( source , * args , ** kwargs ) return decorated_func if func is not None : return decorator ( func ) return decorator | Decorate a LIGO_LW - reading function to open a filepath if needed |
22,638 | def open_xmldoc ( fobj , ** kwargs ) : from ligo . lw . ligolw import ( Document , LIGOLWContentHandler ) from ligo . lw . lsctables import use_in from ligo . lw . utils import ( load_filename , load_fileobj ) use_in ( kwargs . setdefault ( 'contenthandler' , LIGOLWContentHandler ) ) try : if isinstance ( fobj , string_types ) : return load_filename ( fobj , ** kwargs ) if isinstance ( fobj , FILE_LIKE ) : return load_fileobj ( fobj , ** kwargs ) [ 0 ] except ( OSError , IOError ) : return Document ( ) except LigolwElementError as exc : if LIGO_LW_COMPAT_ERROR . search ( str ( exc ) ) : try : return open_xmldoc ( fobj , ilwdchar_compat = True , ** kwargs ) except Exception : pass raise | Try and open an existing LIGO_LW - format file or create a new Document |
22,639 | def write_tables ( target , tables , append = False , overwrite = False , ** kwargs ) : from ligo . lw . ligolw import ( Document , LIGO_LW , LIGOLWContentHandler ) from ligo . lw import utils as ligolw_utils if isinstance ( target , ( Document , LIGO_LW ) ) : xmldoc = target elif append : xmldoc = open_xmldoc ( target , contenthandler = kwargs . pop ( 'contenthandler' , LIGOLWContentHandler ) ) elif ( not overwrite and isinstance ( target , string_types ) and os . path . isfile ( target ) ) : raise IOError ( "File exists: {}" . format ( target ) ) else : xmldoc = Document ( ) write_tables_to_document ( xmldoc , tables , overwrite = overwrite ) if isinstance ( target , string_types ) : kwargs . setdefault ( 'gz' , target . endswith ( '.gz' ) ) ligolw_utils . write_filename ( xmldoc , target , ** kwargs ) elif isinstance ( target , FILE_LIKE ) : kwargs . setdefault ( 'gz' , target . name . endswith ( '.gz' ) ) ligolw_utils . write_fileobj ( xmldoc , target , ** kwargs ) | Write an LIGO_LW table to file |
22,640 | def to_table_type ( val , cls , colname ) : from ligo . lw . types import ( ToNumPyType as numpytypes , ToPyType as pytypes , ) if val is None or colname not in cls . validcolumns : return val llwtype = cls . validcolumns [ colname ] if llwtype == 'ilwd:char' : return _to_ilwd ( val , cls . tableName , colname , ilwdchar_compat = _is_glue_ligolw_object ( cls ) ) try : return numpy . typeDict [ numpytypes [ llwtype ] ] ( val ) except KeyError : return pytypes [ llwtype ] ( val ) | Cast a value to the correct type for inclusion in a LIGO_LW table |
22,641 | def is_ligolw ( origin , filepath , fileobj , * args , ** kwargs ) : if fileobj is not None : loc = fileobj . tell ( ) fileobj . seek ( 0 ) try : line1 = fileobj . readline ( ) . lower ( ) line2 = fileobj . readline ( ) . lower ( ) try : return ( line1 . startswith ( XML_SIGNATURE ) and line2 . startswith ( ( LIGOLW_SIGNATURE , LIGOLW_ELEMENT ) ) ) except TypeError : return ( line1 . startswith ( XML_SIGNATURE . decode ( 'utf-8' ) ) and line2 . startswith ( ( LIGOLW_SIGNATURE . decode ( 'utf-8' ) , LIGOLW_ELEMENT . decode ( 'utf-8' ) ) ) ) finally : fileobj . seek ( loc ) try : from ligo . lw . ligolw import Element except ImportError : return False try : from glue . ligolw . ligolw import Element as GlueElement except ImportError : element_types = ( Element , ) else : element_types = ( Element , GlueElement ) return len ( args ) > 0 and isinstance ( args [ 0 ] , element_types ) | Identify a file object as LIGO_LW - format XML |
22,642 | def read_hdf5_timeseries ( h5f , path = None , start = None , end = None , ** kwargs ) : kwargs . setdefault ( 'array_type' , TimeSeries ) series = read_hdf5_array ( h5f , path = path , ** kwargs ) if start is not None or end is not None : return series . crop ( start , end ) return series | Read a TimeSeries from HDF5 |
22,643 | def read_hdf5_dict ( h5f , names = None , group = None , ** kwargs ) : if group : h5g = h5f [ group ] else : h5g = h5f if names is None : names = [ key for key in h5g if _is_timeseries_dataset ( h5g [ key ] ) ] out = kwargs . pop ( 'dict_type' , TimeSeriesDict ) ( ) kwargs . setdefault ( 'array_type' , out . EntryClass ) for name in names : out [ name ] = read_hdf5_timeseries ( h5g [ name ] , ** kwargs ) return out | Read a TimeSeriesDict from HDF5 |
22,644 | def write_hdf5_dict ( tsdict , h5f , group = None , ** kwargs ) : if group and group not in h5f : h5g = h5f . create_group ( group ) elif group : h5g = h5f [ group ] else : h5g = h5f kwargs . setdefault ( 'format' , 'hdf5' ) for key , series in tsdict . items ( ) : series . write ( h5g , path = str ( key ) , ** kwargs ) | Write a TimeSeriesBaseDict to HDF5 |
22,645 | def welch ( timeseries , segmentlength , noverlap = None , scheme = None , ** kwargs ) : from pycbc . psd import welch as pycbc_welch kwargs . setdefault ( 'avg_method' , 'mean' ) if scheme is None : scheme = null_context ( ) with scheme : pycbc_fseries = pycbc_welch ( timeseries . to_pycbc ( copy = False ) , seg_len = segmentlength , seg_stride = segmentlength - noverlap , ** kwargs ) fseries = FrequencySeries . from_pycbc ( pycbc_fseries , copy = False ) fseries . name = timeseries . name fseries . override_unit ( scale_timeseries_unit ( timeseries . unit , scaling = 'density' ) ) return fseries | Calculate a PSD using Welch s method with a mean average |
22,646 | def _ordinal ( n ) : idx = int ( ( n // 10 % 10 != 1 ) * ( n % 10 < 4 ) * n % 10 ) return '{}{}' . format ( n , "tsnrhtdd" [ idx : : 4 ] ) | Returns the ordinal string for a given integer |
22,647 | def ratio ( self , operand ) : if isinstance ( operand , string_types ) : if operand == 'mean' : operand = self . mean ( axis = 0 ) elif operand == 'median' : operand = self . median ( axis = 0 ) else : raise ValueError ( "operand %r unrecognised, please give a " "Quantity or one of: 'mean', 'median'" % operand ) out = self / operand return out | Calculate the ratio of this Spectrogram against a reference |
22,648 | def plot ( self , figsize = ( 12 , 6 ) , xscale = 'auto-gps' , ** kwargs ) : if 'imshow' in kwargs : warnings . warn ( 'the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead' , DeprecationWarning ) kwargs . setdefault ( 'method' , 'imshow' if kwargs . pop ( 'imshow' ) else 'pcolormesh' ) kwargs . update ( figsize = figsize , xscale = xscale ) return super ( Spectrogram , self ) . plot ( ** kwargs ) | Plot the data for this Spectrogram |
22,649 | def from_spectra ( cls , * spectra , ** kwargs ) : data = numpy . vstack ( [ s . value for s in spectra ] ) spec1 = list ( spectra ) [ 0 ] if not all ( s . f0 == spec1 . f0 for s in spectra ) : raise ValueError ( "Cannot stack spectra with different f0" ) if not all ( s . df == spec1 . df for s in spectra ) : raise ValueError ( "Cannot stack spectra with different df" ) kwargs . setdefault ( 'name' , spec1 . name ) kwargs . setdefault ( 'channel' , spec1 . channel ) kwargs . setdefault ( 'epoch' , spec1 . epoch ) kwargs . setdefault ( 'f0' , spec1 . f0 ) kwargs . setdefault ( 'df' , spec1 . df ) kwargs . setdefault ( 'unit' , spec1 . unit ) if not ( 'dt' in kwargs or 'times' in kwargs ) : try : kwargs . setdefault ( 'dt' , spectra [ 1 ] . epoch . gps - spec1 . epoch . gps ) except ( AttributeError , IndexError ) : raise ValueError ( "Cannot determine dt (time-spacing) for " "Spectrogram from inputs" ) return Spectrogram ( data , ** kwargs ) | Build a new Spectrogram from a list of spectra . |
22,650 | def percentile ( self , percentile ) : out = scipy . percentile ( self . value , percentile , axis = 0 ) if self . name is not None : name = '{}: {} percentile' . format ( self . name , _ordinal ( percentile ) ) else : name = None return FrequencySeries ( out , epoch = self . epoch , channel = self . channel , name = name , f0 = self . f0 , df = self . df , frequencies = ( hasattr ( self , '_frequencies' ) and self . frequencies or None ) ) | Calculate a given spectral percentile for this Spectrogram . |
22,651 | def variance ( self , bins = None , low = None , high = None , nbins = 500 , log = False , norm = False , density = False ) : from . . frequencyseries import SpectralVariance return SpectralVariance . from_spectrogram ( self , bins = bins , low = low , high = high , nbins = nbins , log = log , norm = norm , density = density ) | Calculate the SpectralVariance of this Spectrogram . |
22,652 | def crop_frequencies ( self , low = None , high = None , copy = False ) : if low is not None : low = units . Quantity ( low , self . _default_yunit ) if high is not None : high = units . Quantity ( high , self . _default_yunit ) if low is not None and low == self . f0 : low = None elif low is not None and low < self . f0 : warnings . warn ( 'Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.' ) if high is not None and high . value == self . band [ 1 ] : high = None elif high is not None and high . value > self . band [ 1 ] : warnings . warn ( 'Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.' ) if low is None : idx0 = None else : idx0 = int ( float ( low . value - self . f0 . value ) // self . df . value ) if high is None : idx1 = None else : idx1 = int ( float ( high . value - self . f0 . value ) // self . df . value ) if copy : return self [ : , idx0 : idx1 ] . copy ( ) return self [ : , idx0 : idx1 ] | Crop this Spectrogram to the specified frequencies |
22,653 | def find_mappable ( * axes ) : for ax in axes : for aset in ( 'collections' , 'images' ) : try : return getattr ( ax , aset ) [ - 1 ] except ( AttributeError , IndexError ) : continue raise ValueError ( "Cannot determine mappable layer on any axes " "for this colorbar" ) | Find the most recently added mappable layer in the given axes |
22,654 | def get_read_format ( cls , source , args , kwargs ) : ctx = None if isinstance ( source , FILE_LIKE ) : fileobj = source filepath = source . name if hasattr ( source , 'name' ) else None else : filepath = source try : ctx = get_readable_fileobj ( filepath , encoding = 'binary' ) fileobj = ctx . __enter__ ( ) except IOError : raise except Exception : fileobj = None try : return get_format ( 'read' , cls , filepath , fileobj , args , kwargs ) finally : if ctx is not None : ctx . __exit__ ( * sys . exc_info ( ) ) | Determine the read format for a given input source |
22,655 | def read ( fobj , ** kwargs ) : fsamp , arr = wavfile . read ( fobj , ** kwargs ) return TimeSeries ( arr , sample_rate = fsamp ) | Read a WAV file into a TimeSeries |
22,656 | def write ( series , output , scale = None ) : fsamp = int ( series . sample_rate . decompose ( ) . value ) if scale is None : scale = 1 / numpy . abs ( series . value ) . max ( ) data = ( series . value * scale ) . astype ( 'float32' ) return wavfile . write ( output , fsamp , data ) | Write a TimeSeries to a WAV file |
22,657 | def is_wav ( origin , filepath , fileobj , * args , ** kwargs ) : if origin == 'read' and fileobj is not None : loc = fileobj . tell ( ) fileobj . seek ( 0 ) try : riff , _ , fmt = struct . unpack ( '<4sI4s' , fileobj . read ( 12 ) ) if isinstance ( riff , bytes ) : riff = riff . decode ( 'utf-8' ) fmt = fmt . decode ( 'utf-8' ) return riff == WAV_SIGNATURE [ 0 ] and fmt == WAV_SIGNATURE [ 1 ] except ( UnicodeDecodeError , struct . error ) : return False finally : fileobj . seek ( loc ) elif filepath is not None : return filepath . endswith ( ( '.wav' , '.wave' ) ) else : try : wave . open ( args [ 0 ] ) except ( wave . Error , AttributeError ) : return False else : return True | Identify a file as WAV |
22,658 | def parse_column_filter ( definition ) : parts = list ( generate_tokens ( StringIO ( definition . strip ( ) ) . readline ) ) while parts [ - 1 ] [ 0 ] in ( token . ENDMARKER , token . NEWLINE ) : parts = parts [ : - 1 ] if len ( parts ) == 3 : a , b , c = parts if a [ 0 ] in [ token . NAME , token . STRING ] : name = QUOTE_REGEX . sub ( '' , a [ 1 ] ) oprtr = OPERATORS [ b [ 1 ] ] value = _float_or_str ( c [ 1 ] ) return [ ( name , oprtr , value ) ] elif b [ 0 ] in [ token . NAME , token . STRING ] : name = QUOTE_REGEX . sub ( '' , b [ 1 ] ) oprtr = OPERATORS_INV [ b [ 1 ] ] value = _float_or_str ( a [ 1 ] ) return [ ( name , oprtr , value ) ] elif len ( parts ) == 5 : a , b , c , d , e = list ( zip ( * parts ) ) [ 1 ] name = QUOTE_REGEX . sub ( '' , c ) return [ ( name , OPERATORS_INV [ b ] , _float_or_str ( a ) ) , ( name , OPERATORS [ d ] , _float_or_str ( e ) ) ] raise ValueError ( "Cannot parse filter definition from %r" % definition ) | Parse a str of the form column > 50 |
22,659 | def parse_column_filters ( * definitions ) : fltrs = [ ] for def_ in _flatten ( definitions ) : if is_filter_tuple ( def_ ) : fltrs . append ( def_ ) else : for splitdef in DELIM_REGEX . split ( def_ ) [ : : 2 ] : fltrs . extend ( parse_column_filter ( splitdef ) ) return fltrs | Parse multiple compound column filter definitions |
22,660 | def _flatten ( container ) : if isinstance ( container , string_types ) : container = [ container ] for elem in container : if isinstance ( elem , string_types ) or is_filter_tuple ( elem ) : yield elem else : for elem2 in _flatten ( elem ) : yield elem2 | Flatten arbitrary nested list of filters into a 1 - D list |
22,661 | def is_filter_tuple ( tup ) : return isinstance ( tup , ( tuple , list ) ) and ( len ( tup ) == 3 and isinstance ( tup [ 0 ] , string_types ) and callable ( tup [ 1 ] ) ) | Return whether a tuple matches the format for a column filter |
22,662 | def filter_table ( table , * column_filters ) : keep = numpy . ones ( len ( table ) , dtype = bool ) for name , op_func , operand in parse_column_filters ( * column_filters ) : col = table [ name ] . view ( numpy . ndarray ) keep &= op_func ( col , operand ) return table [ keep ] | Apply one or more column slice filters to a Table |
22,663 | def read_hdf5_array ( source , path = None , array_type = Array ) : dataset = io_hdf5 . find_dataset ( source , path = path ) attrs = dict ( dataset . attrs ) try : attrs [ 'channel' ] = _unpickle_channel ( attrs [ 'channel' ] ) except KeyError : pass for key in attrs : if isinstance ( attrs [ key ] , bytes ) : attrs [ key ] = attrs [ key ] . decode ( 'utf-8' ) return array_type ( dataset [ ( ) ] , ** attrs ) | Read an Array from the given HDF5 object |
22,664 | def _unpickle_channel ( raw ) : try : return pickle . loads ( raw ) except ( ValueError , pickle . UnpicklingError , EOFError , TypeError , IndexError ) as exc : if isinstance ( raw , bytes ) : raw = raw . decode ( 'utf-8' ) try : Channel . MATCH . match ( raw ) except ValueError : raise exc return raw | Try and unpickle a channel with sensible error handling |
22,665 | def _format_metadata_attribute ( value ) : if ( value is None or ( isinstance ( value , Index ) and value . regular ) ) : raise IgnoredAttribute for typekey , func in ATTR_TYPE_MAP . items ( ) : if issubclass ( type ( value ) , typekey ) : return func ( value ) return value | Format a value for writing to HDF5 as a h5py . Dataset attribute |
22,666 | def write_array_metadata ( dataset , array ) : for attr in ( 'unit' , ) + array . _metadata_slots : try : value = _format_metadata_attribute ( getattr ( array , '_%s' % attr , None ) ) except IgnoredAttribute : continue try : dataset . attrs [ attr ] = value except ( TypeError , ValueError , RuntimeError ) as exc : exc . args = ( "Failed to store {} ({}) for {}: {}" . format ( attr , type ( value ) . __name__ , type ( array ) . __name__ , str ( exc ) ) ) raise | Write metadata for array into the h5py . Dataset |
22,667 | def write_hdf5_array ( array , h5g , path = None , attrs = None , append = False , overwrite = False , compression = 'gzip' , ** kwargs ) : if path is None : path = array . name if path is None : raise ValueError ( "Cannot determine HDF5 path for %s, " "please set ``name`` attribute, or pass ``path=`` " "keyword when writing" % type ( array ) . __name__ ) dset = io_hdf5 . create_dataset ( h5g , path , overwrite = overwrite , data = array . value , compression = compression , ** kwargs ) write_array_metadata ( dset , array ) if attrs : for key in attrs : dset . attrs [ key ] = attrs [ key ] return dset | Write the array to an h5py . Dataset |
22,668 | def format_index_array_attrs ( series ) : attrs = { } for i , axis in zip ( range ( series . ndim ) , ( 'x' , 'y' ) ) : unit = '{}unit' . format ( axis ) origin = '{}0' . format ( axis ) delta = 'd{}' . format ( axis ) aunit = getattr ( series , unit ) attrs . update ( { unit : str ( aunit ) , origin : getattr ( series , origin ) . to ( aunit ) . value , delta : getattr ( series , delta ) . to ( aunit ) . value , } ) return attrs | Format metadata attributes for and indexed array |
22,669 | def write_hdf5_series ( series , output , path = None , attrs = None , ** kwargs ) : if attrs is None : attrs = format_index_array_attrs ( series ) return write_hdf5_array ( series , output , path = path , attrs = attrs , ** kwargs ) | Write a Series to HDF5 . |
22,670 | def read_multi ( flatten , cls , source , * args , ** kwargs ) : verbose = kwargs . pop ( 'verbose' , False ) try : files = file_list ( source ) except ValueError : files = [ source ] path = None else : path = files [ 0 ] if files else None if kwargs . get ( 'format' , None ) is None : kwargs [ 'format' ] = get_read_format ( cls , path , ( source , ) + args , kwargs ) nproc = min ( kwargs . pop ( 'nproc' , 1 ) , len ( files ) ) def _read_single_file ( fobj ) : try : return fobj , io_read ( cls , fobj , * args , ** kwargs ) except Exception as exc : if nproc == 1 : raise if isinstance ( exc , SAXException ) : return fobj , exc . getException ( ) return fobj , exc if verbose is True : verbose = 'Reading ({})' . format ( kwargs [ 'format' ] ) output = mp_utils . multiprocess_with_queues ( nproc , _read_single_file , files , verbose = verbose , unit = 'files' ) for fobj , exc in output : if isinstance ( exc , Exception ) : exc . args = ( 'Failed to read %s: %s' % ( fobj , str ( exc ) ) , ) raise exc _ , out = zip ( * output ) return flatten ( out ) | Read sources into a cls with multiprocessing |
22,671 | def read_json_flag ( fobj ) : if isinstance ( fobj , string_types ) : with open ( fobj , 'r' ) as fobj2 : return read_json_flag ( fobj2 ) txt = fobj . read ( ) if isinstance ( txt , bytes ) : txt = txt . decode ( 'utf-8' ) data = json . loads ( txt ) name = '{ifo}:{name}:{version}' . format ( ** data ) out = DataQualityFlag ( name , active = data [ 'active' ] , known = data [ 'known' ] ) try : out . description = data [ 'metadata' ] . get ( 'flag_description' , None ) except KeyError : pass else : out . isgood = not data [ 'metadata' ] . get ( 'active_indicates_ifo_badness' , False ) return out | Read a DataQualityFlag from a segments - web . ligo . org JSON file |
22,672 | def write_json_flag ( flag , fobj , ** kwargs ) : if isinstance ( fobj , string_types ) : with open ( fobj , 'w' ) as fobj2 : return write_json_flag ( flag , fobj2 , ** kwargs ) data = { } data [ 'ifo' ] = flag . ifo data [ 'name' ] = flag . tag data [ 'version' ] = flag . version data [ 'active' ] = flag . active data [ 'known' ] = flag . known data [ 'metadata' ] = { } data [ 'metadata' ] [ 'active_indicates_ifo_badness' ] = not flag . isgood data [ 'metadata' ] [ 'flag_description' ] = flag . description json . dump ( data , fobj , ** kwargs ) | Write a DataQualityFlag to a JSON file |
22,673 | def in_segmentlist ( column , segmentlist ) : segmentlist = type ( segmentlist ) ( segmentlist ) . coalesce ( ) idx = column . argsort ( ) contains = numpy . zeros ( column . shape [ 0 ] , dtype = bool ) j = 0 try : segstart , segend = segmentlist [ j ] except IndexError : return contains i = 0 while i < contains . shape [ 0 ] : x = idx [ i ] time = column [ x ] if time < segstart : i += 1 continue if time >= segend : j += 1 try : segstart , segend = segmentlist [ j ] continue except IndexError : break contains [ x ] = True i += 1 return contains | Return the index of values lying inside the given segmentlist |
22,674 | def fft ( self , nfft = None ) : from . . frequencyseries import FrequencySeries if nfft is None : nfft = self . size dft = npfft . rfft ( self . value , n = nfft ) / nfft dft [ 1 : ] *= 2.0 new = FrequencySeries ( dft , epoch = self . epoch , unit = self . unit , name = self . name , channel = self . channel ) try : new . frequencies = npfft . rfftfreq ( nfft , d = self . dx . value ) except AttributeError : new . frequencies = numpy . arange ( new . size ) / ( nfft * self . dx . value ) return new | Compute the one - dimensional discrete Fourier transform of this TimeSeries . |
22,675 | def average_fft ( self , fftlength = None , overlap = 0 , window = None ) : from gwpy . spectrogram import Spectrogram if fftlength is None : fftlength = self . duration if isinstance ( fftlength , units . Quantity ) : fftlength = fftlength . value nfft = int ( ( fftlength * self . sample_rate ) . decompose ( ) . value ) noverlap = int ( ( overlap * self . sample_rate ) . decompose ( ) . value ) navg = divmod ( self . size - noverlap , ( nfft - noverlap ) ) [ 0 ] if window is None : window = 'boxcar' if isinstance ( window , ( str , tuple ) ) : win = signal . get_window ( window , nfft ) else : win = numpy . asarray ( window ) if len ( win . shape ) != 1 : raise ValueError ( 'window must be 1-D' ) elif win . shape [ 0 ] != nfft : raise ValueError ( 'Window is the wrong size.' ) win = win . astype ( self . dtype ) scaling = 1. / numpy . absolute ( win ) . mean ( ) if nfft % 2 : nfreqs = ( nfft + 1 ) // 2 else : nfreqs = nfft // 2 + 1 ffts = Spectrogram ( numpy . zeros ( ( navg , nfreqs ) , dtype = numpy . complex ) , channel = self . channel , epoch = self . epoch , f0 = 0 , df = 1 / fftlength , dt = 1 , copy = True ) idx = 0 for i in range ( navg ) : idx_end = idx + nfft if idx_end > self . size : continue stepseries = self [ idx : idx_end ] . detrend ( ) * win fft_ = stepseries . fft ( nfft = nfft ) * scaling ffts . value [ i , : ] = fft_ . value idx += ( nfft - noverlap ) mean = ffts . mean ( 0 ) mean . name = self . name mean . epoch = self . epoch mean . channel = self . channel return mean | Compute the averaged one - dimensional DFT of this TimeSeries . |
22,676 | def psd ( self , fftlength = None , overlap = None , window = 'hann' , method = DEFAULT_FFT_METHOD , ** kwargs ) : method_func = spectral . get_method ( method ) return spectral . psd ( self , method_func , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) | Calculate the PSD FrequencySeries for this TimeSeries |
22,677 | def asd ( self , fftlength = None , overlap = None , window = 'hann' , method = DEFAULT_FFT_METHOD , ** kwargs ) : return self . psd ( method = method , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) ** ( 1 / 2. ) | Calculate the ASD FrequencySeries of this TimeSeries |
22,678 | def csd ( self , other , fftlength = None , overlap = None , window = 'hann' , ** kwargs ) : return spectral . psd ( ( self , other ) , spectral . csd , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) | Calculate the CSD FrequencySeries for two TimeSeries |
22,679 | def spectrogram ( self , stride , fftlength = None , overlap = None , window = 'hann' , method = DEFAULT_FFT_METHOD , nproc = 1 , ** kwargs ) : method_func = spectral . get_method ( method ) return spectral . average_spectrogram ( self , method_func , stride , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) | Calculate the average power spectrogram of this TimeSeries using the specified average spectrum method . |
22,680 | def spectrogram2 ( self , fftlength , overlap = None , window = 'hann' , ** kwargs ) : kwargs . setdefault ( 'fs' , self . sample_rate . to ( 'Hz' ) . value ) return spectral . spectrogram ( self , signal . periodogram , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) | Calculate the non - averaged power Spectrogram of this TimeSeries |
22,681 | def fftgram ( self , fftlength , overlap = None , window = 'hann' , ** kwargs ) : from . . spectrogram import Spectrogram try : from scipy . signal import spectrogram except ImportError : raise ImportError ( "Must have scipy>=0.16 to utilize " "this method." ) if isinstance ( fftlength , units . Quantity ) : fftlength = fftlength . value nfft = int ( ( fftlength * self . sample_rate ) . decompose ( ) . value ) if not overlap : noverlap = nfft // 8 else : noverlap = int ( ( overlap * self . sample_rate ) . decompose ( ) . value ) [ frequencies , times , sxx ] = spectrogram ( self , fs = self . sample_rate . value , window = window , nperseg = nfft , noverlap = noverlap , mode = 'complex' , ** kwargs ) return Spectrogram ( sxx . T , name = self . name , unit = self . unit , xindex = self . t0 . value + times , yindex = frequencies ) | Calculate the Fourier - gram of this TimeSeries . |
22,682 | def spectral_variance ( self , stride , fftlength = None , overlap = None , method = DEFAULT_FFT_METHOD , window = 'hann' , nproc = 1 , filter = None , bins = None , low = None , high = None , nbins = 500 , log = False , norm = False , density = False ) : specgram = self . spectrogram ( stride , fftlength = fftlength , overlap = overlap , method = method , window = window , nproc = nproc ) ** ( 1 / 2. ) if filter : specgram = specgram . filter ( * filter ) return specgram . variance ( bins = bins , low = low , high = high , nbins = nbins , log = log , norm = norm , density = density ) | Calculate the SpectralVariance of this TimeSeries . |
22,683 | def rayleigh_spectrum ( self , fftlength = None , overlap = None ) : return spectral . psd ( self , spectral . rayleigh , fftlength = fftlength , overlap = overlap , ) | Calculate the Rayleigh FrequencySeries for this TimeSeries . |
22,684 | def rayleigh_spectrogram ( self , stride , fftlength = None , overlap = 0 , nproc = 1 , ** kwargs ) : specgram = spectral . average_spectrogram ( self , spectral . rayleigh , stride , fftlength = fftlength , overlap = overlap , nproc = nproc , ** kwargs ) specgram . override_unit ( '' ) return specgram | Calculate the Rayleigh statistic spectrogram of this TimeSeries |
22,685 | def csd_spectrogram ( self , other , stride , fftlength = None , overlap = 0 , window = 'hann' , nproc = 1 , ** kwargs ) : return spectral . average_spectrogram ( ( self , other ) , spectral . csd , stride , fftlength = fftlength , overlap = overlap , window = window , nproc = nproc , ** kwargs ) | Calculate the cross spectral density spectrogram of this TimeSeries with other . |
22,686 | def highpass ( self , frequency , gpass = 2 , gstop = 30 , fstop = None , type = 'iir' , filtfilt = True , ** kwargs ) : filt = filter_design . highpass ( frequency , self . sample_rate , fstop = fstop , gpass = gpass , gstop = gstop , analog = False , type = type , ** kwargs ) return self . filter ( * filt , filtfilt = filtfilt ) | Filter this TimeSeries with a high - pass filter . |
22,687 | def bandpass ( self , flow , fhigh , gpass = 2 , gstop = 30 , fstop = None , type = 'iir' , filtfilt = True , ** kwargs ) : filt = filter_design . bandpass ( flow , fhigh , self . sample_rate , fstop = fstop , gpass = gpass , gstop = gstop , analog = False , type = type , ** kwargs ) return self . filter ( * filt , filtfilt = filtfilt ) | Filter this TimeSeries with a band - pass filter . |
22,688 | def resample ( self , rate , window = 'hamming' , ftype = 'fir' , n = None ) : if n is None and ftype == 'iir' : n = 8 elif n is None : n = 60 if isinstance ( rate , units . Quantity ) : rate = rate . value factor = ( self . sample_rate . value / rate ) if numpy . isclose ( factor , 1. , rtol = 1e-09 , atol = 0. ) : warnings . warn ( "resample() rate matches current sample_rate ({}), returning " "input data unmodified; please double-check your " "parameters" . format ( self . sample_rate ) , UserWarning , ) return self if factor . is_integer ( ) : if ftype == 'iir' : filt = signal . cheby1 ( n , 0.05 , 0.8 / factor , output = 'zpk' ) else : filt = signal . firwin ( n + 1 , 1. / factor , window = window ) return self . filter ( filt , filtfilt = True ) [ : : int ( factor ) ] else : nsamp = int ( self . shape [ 0 ] * self . dx . value * rate ) new = signal . resample ( self . value , nsamp , window = window ) . view ( self . __class__ ) new . __metadata_finalize__ ( self ) new . _unit = self . unit new . sample_rate = rate return new | Resample this Series to a new rate |
22,689 | def zpk ( self , zeros , poles , gain , analog = True , ** kwargs ) : return self . filter ( zeros , poles , gain , analog = analog , ** kwargs ) | Filter this TimeSeries by applying a zero - pole - gain filter |
22,690 | def filter ( self , * filt , ** kwargs ) : filtfilt = kwargs . pop ( 'filtfilt' , False ) form , filt = filter_design . parse_filter ( filt , analog = kwargs . pop ( 'analog' , False ) , sample_rate = self . sample_rate . to ( 'Hz' ) . value , ) if form == 'zpk' : try : sos = signal . zpk2sos ( * filt ) except AttributeError : sos = None b , a = signal . zpk2tf ( * filt ) else : sos = None b , a = filt kwargs . setdefault ( 'axis' , 0 ) if sos is not None and filtfilt : out = signal . sosfiltfilt ( sos , self , ** kwargs ) elif sos is not None : out = signal . sosfilt ( sos , self , ** kwargs ) elif filtfilt : out = signal . filtfilt ( b , a , self , ** kwargs ) else : out = signal . lfilter ( b , a , self , ** kwargs ) new = out . view ( type ( self ) ) new . __metadata_finalize__ ( self ) new . _unit = self . unit return new | Filter this TimeSeries with an IIR or FIR filter |
22,691 | def coherence ( self , other , fftlength = None , overlap = None , window = 'hann' , ** kwargs ) : from matplotlib import mlab from . . frequencyseries import FrequencySeries if self . sample_rate . to ( 'Hertz' ) != other . sample_rate . to ( 'Hertz' ) : sampling = min ( self . sample_rate . value , other . sample_rate . value ) if self . sample_rate . value == sampling : other = other . resample ( sampling ) self_ = self else : self_ = self . resample ( sampling ) else : sampling = self . sample_rate . value self_ = self if overlap is None : overlap = 0 else : overlap = int ( ( overlap * self_ . sample_rate ) . decompose ( ) . value ) if fftlength is None : fftlength = int ( self_ . size / 2. + overlap / 2. ) else : fftlength = int ( ( fftlength * self_ . sample_rate ) . decompose ( ) . value ) if window is not None : kwargs [ 'window' ] = signal . get_window ( window , fftlength ) coh , freqs = mlab . cohere ( self_ . value , other . value , NFFT = fftlength , Fs = sampling , noverlap = overlap , ** kwargs ) out = coh . view ( FrequencySeries ) out . xindex = freqs out . epoch = self . epoch out . name = 'Coherence between %s and %s' % ( self . name , other . name ) out . unit = 'coherence' return out | Calculate the frequency - coherence between this TimeSeries and another . |
22,692 | def auto_coherence ( self , dt , fftlength = None , overlap = None , window = 'hann' , ** kwargs ) : dt = abs ( dt ) self_ = self . crop ( self . span [ 0 ] , self . span [ 1 ] - dt ) other = self . crop ( self . span [ 0 ] + dt , self . span [ 1 ] ) return self_ . coherence ( other , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) | Calculate the frequency - coherence between this TimeSeries and a time - shifted copy of itself . |
22,693 | def coherence_spectrogram ( self , other , stride , fftlength = None , overlap = None , window = 'hann' , nproc = 1 ) : from . . spectrogram . coherence import from_timeseries return from_timeseries ( self , other , stride , fftlength = fftlength , overlap = overlap , window = window , nproc = nproc ) | Calculate the coherence spectrogram between this TimeSeries and other . |
22,694 | def rms ( self , stride = 1 ) : stridesamp = int ( stride * self . sample_rate . value ) nsteps = int ( self . size // stridesamp ) data = numpy . zeros ( nsteps ) for step in range ( nsteps ) : idx = int ( stridesamp * step ) idx_end = idx + stridesamp stepseries = self [ idx : idx_end ] rms_ = numpy . sqrt ( numpy . mean ( numpy . abs ( stepseries . value ) ** 2 ) ) data [ step ] = rms_ name = '%s %.2f-second RMS' % ( self . name , stride ) return self . __class__ ( data , channel = self . channel , t0 = self . t0 , name = name , sample_rate = ( 1 / float ( stride ) ) ) | Calculate the root - mean - square value of this TimeSeries once per stride . |
22,695 | def demodulate ( self , f , stride = 1 , exp = False , deg = True ) : stridesamp = int ( stride * self . sample_rate . value ) nsteps = int ( self . size // stridesamp ) out = type ( self ) ( numpy . zeros ( nsteps , dtype = complex ) ) out . __array_finalize__ ( self ) out . sample_rate = 1 / float ( stride ) w = 2 * numpy . pi * f * self . dt . decompose ( ) . value for step in range ( nsteps ) : istart = int ( stridesamp * step ) iend = istart + stridesamp idx = numpy . arange ( istart , iend ) mixed = 2 * numpy . exp ( - 1j * w * idx ) * self . value [ idx ] out . value [ step ] = mixed . mean ( ) if exp : return out mag = out . abs ( ) phase = type ( mag ) ( numpy . angle ( out , deg = deg ) ) phase . __array_finalize__ ( out ) phase . override_unit ( 'deg' if deg else 'rad' ) return ( mag , phase ) | Compute the average magnitude and phase of this TimeSeries once per stride at a given frequency . |
22,696 | def taper ( self , side = 'leftright' ) : if side not in ( 'left' , 'right' , 'leftright' ) : raise ValueError ( "side must be one of 'left', 'right', " "or 'leftright'" ) out = self . copy ( ) nleft , nright = 0 , 0 mini , = signal . argrelmin ( out . value ) maxi , = signal . argrelmax ( out . value ) if 'left' in side : nleft = max ( mini [ 0 ] , maxi [ 0 ] ) nleft = min ( nleft , self . size / 2 ) if 'right' in side : nright = out . size - min ( mini [ - 1 ] , maxi [ - 1 ] ) nright = min ( nright , self . size / 2 ) out *= planck ( out . size , nleft = nleft , nright = nright ) return out | Taper the ends of this TimeSeries smoothly to zero . |
22,697 | def whiten ( self , fftlength = None , overlap = 0 , method = DEFAULT_FFT_METHOD , window = 'hanning' , detrend = 'constant' , asd = None , fduration = 2 , highpass = None , ** kwargs ) : fftlength = fftlength if fftlength else _fft_length_default ( self . dt ) if asd is None : asd = self . asd ( fftlength , overlap = overlap , method = method , window = window , ** kwargs ) asd = asd . interpolate ( 1. / self . duration . decompose ( ) . value ) ncorner = int ( highpass / asd . df . decompose ( ) . value ) if highpass else 0 ntaps = int ( ( fduration * self . sample_rate ) . decompose ( ) . value ) tdw = filter_design . fir_from_transfer ( 1 / asd . value , ntaps = ntaps , window = window , ncorner = ncorner ) in_ = self . copy ( ) . detrend ( detrend ) out = in_ . convolve ( tdw , window = window ) return out * numpy . sqrt ( 2 * in_ . dt . decompose ( ) . value ) | Whiten this TimeSeries using inverse spectrum truncation |
22,698 | def gate ( self , tzero = 1.0 , tpad = 0.5 , whiten = True , threshold = 50. , cluster_window = 0.5 , ** whiten_kwargs ) : try : from scipy . signal import find_peaks except ImportError as exc : exc . args = ( "Must have scipy>=1.1.0 to utilize this method." , ) raise data = self . whiten ( ** whiten_kwargs ) if whiten else self window_samples = cluster_window * data . sample_rate . value gates = find_peaks ( abs ( data . value ) , height = threshold , distance = window_samples ) [ 0 ] out = self . copy ( ) nzero = int ( abs ( tzero ) * self . sample_rate . value ) npad = int ( abs ( tpad ) * self . sample_rate . value ) half = nzero + npad ntotal = 2 * half for gate in gates : left_idx = max ( 0 , gate - half ) right_idx = min ( gate + half , len ( self . value ) - 1 ) left_idx_window = half - ( gate - left_idx ) right_idx_window = half + ( right_idx - gate ) window = 1 - planck ( ntotal , nleft = npad , nright = npad ) window = window [ left_idx_window : right_idx_window ] out [ left_idx : right_idx ] *= window return out | Removes high amplitude peaks from data using inverse Planck window . Points will be discovered automatically using a provided threshold and clustered within a provided time window . |
22,699 | def convolve ( self , fir , window = 'hanning' ) : pad = int ( numpy . ceil ( fir . size / 2 ) ) nfft = min ( 8 * fir . size , self . size ) in_ = self . copy ( ) window = signal . get_window ( window , fir . size ) in_ . value [ : pad ] *= window [ : pad ] in_ . value [ - pad : ] *= window [ - pad : ] if nfft >= self . size / 2 : conv = signal . fftconvolve ( in_ . value , fir , mode = 'same' ) else : nstep = nfft - 2 * pad conv = numpy . zeros ( self . size ) conv [ : nfft - pad ] = signal . fftconvolve ( in_ . value [ : nfft ] , fir , mode = 'same' ) [ : nfft - pad ] k = nfft - pad while k < self . size - nfft + pad : yk = signal . fftconvolve ( in_ . value [ k - pad : k + nstep + pad ] , fir , mode = 'same' ) conv [ k : k + yk . size - 2 * pad ] = yk [ pad : - pad ] k += nstep conv [ - nfft + pad : ] = signal . fftconvolve ( in_ . value [ - nfft : ] , fir , mode = 'same' ) [ - nfft + pad : ] out = type ( self ) ( conv ) out . __array_finalize__ ( self ) return out | Convolve this TimeSeries with an FIR filter using the overlap - save method |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.