idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
22,400
def _overlapping ( files ) : segments = set ( ) for path in files : seg = file_segment ( path ) for s in segments : if seg . intersects ( s ) : return True segments . add ( seg ) return False
Quick method to see if a file list contains overlapping files
22,401
def fetch_losc_data ( detector , start , end , cls = TimeSeries , ** kwargs ) : start = to_gps ( start ) end = to_gps ( end ) span = Segment ( start , end ) kwargs . update ( { 'start' : start , 'end' : end , } ) url_kw = { key : kwargs . pop ( key ) for key in GWOSC_LOCATE_KWARGS if key in kwargs } if 'sample_rate' in url_kw : url_kw [ 'sample_rate' ] = Quantity ( url_kw [ 'sample_rate' ] , 'Hz' ) . value cache = get_urls ( detector , int ( start ) , int ( ceil ( end ) ) , ** url_kw ) if len ( cache ) and _overlapping ( cache ) : cache . sort ( key = lambda x : abs ( file_segment ( x ) ) ) for url in cache : a , b = file_segment ( url ) if a <= start and b >= end : cache = [ url ] break if kwargs . get ( 'verbose' , False ) : host = urlparse ( cache [ 0 ] ) . netloc print ( "Fetched {0} URLs from {1} for [{2} .. {3}))" . format ( len ( cache ) , host , int ( start ) , int ( ceil ( end ) ) ) ) is_gwf = cache [ 0 ] . endswith ( '.gwf' ) if is_gwf and len ( cache ) : args = ( kwargs . pop ( 'channel' , None ) , ) else : args = ( ) out = None kwargs [ 'cls' ] = cls for url in cache : keep = file_segment ( url ) & span new = _fetch_losc_data_file ( url , * args , ** kwargs ) . crop ( * keep , copy = False ) if is_gwf and ( not args or args [ 0 ] is None ) : args = ( new . name , ) if out is None : out = new . copy ( ) else : out . append ( new , resize = True ) return out
Fetch LOSC data for a given detector
22,402
def read_losc_hdf5 ( h5f , path = 'strain/Strain' , start = None , end = None , copy = False ) : dataset = io_hdf5 . find_dataset ( h5f , path ) nddata = dataset [ ( ) ] xunit = parse_unit ( dataset . attrs [ 'Xunits' ] ) epoch = dataset . attrs [ 'Xstart' ] dt = Quantity ( dataset . attrs [ 'Xspacing' ] , xunit ) unit = dataset . attrs [ 'Yunits' ] return TimeSeries ( nddata , epoch = epoch , sample_rate = ( 1 / dt ) . to ( 'Hertz' ) , unit = unit , name = path . rsplit ( '/' , 1 ) [ 1 ] , copy = copy ) . crop ( start = start , end = end )
Read a TimeSeries from a LOSC - format HDF file .
22,403
def read_losc_hdf5_state ( f , path = 'quality/simple' , start = None , end = None , copy = False ) : dataset = io_hdf5 . find_dataset ( f , '%s/DQmask' % path ) maskset = io_hdf5 . find_dataset ( f , '%s/DQDescriptions' % path ) nddata = dataset [ ( ) ] bits = [ bytes . decode ( bytes ( b ) , 'utf-8' ) for b in maskset [ ( ) ] ] epoch = dataset . attrs [ 'Xstart' ] try : dt = dataset . attrs [ 'Xspacing' ] except KeyError : dt = Quantity ( 1 , 's' ) else : xunit = parse_unit ( dataset . attrs [ 'Xunits' ] ) dt = Quantity ( dt , xunit ) return StateVector ( nddata , bits = bits , t0 = epoch , name = 'Data quality' , dx = dt , copy = copy ) . crop ( start = start , end = end )
Read a StateVector from a LOSC - format HDF file .
22,404
def _gwf_channel ( path , series_class = TimeSeries , verbose = False ) : channels = list ( io_gwf . iter_channel_names ( file_path ( path ) ) ) if issubclass ( series_class , StateVector ) : regex = DQMASK_CHANNEL_REGEX else : regex = STRAIN_CHANNEL_REGEX found , = list ( filter ( regex . match , channels ) ) if verbose : print ( "Using channel {0!r}" . format ( found ) ) return found
Find the right channel name for a LOSC GWF file
22,405
def from_segwizard ( source , gpstype = LIGOTimeGPS , strict = True ) : if isinstance ( source , string_types ) : with open ( source , 'r' ) as fobj : return from_segwizard ( fobj , gpstype = gpstype , strict = strict ) out = SegmentList ( ) fmt_pat = None for line in source : if line . startswith ( ( '#' , ';' ) ) : continue if fmt_pat is None : fmt_pat = _line_format ( line ) tokens , = fmt_pat . findall ( line ) out . append ( _format_segment ( tokens [ - 3 : ] , gpstype = gpstype , strict = strict ) ) return out
Read segments from a segwizard format file into a SegmentList
22,406
def _line_format ( line ) : for pat in ( FOUR_COL_REGEX , THREE_COL_REGEX , TWO_COL_REGEX ) : if pat . match ( line ) : return pat raise ValueError ( "unable to parse segment from line {!r}" . format ( line ) )
Determine the column format pattern for a line in an ASCII segment file .
22,407
def _format_segment ( tokens , strict = True , gpstype = LIGOTimeGPS ) : try : start , end , dur = tokens except ValueError : return Segment ( * map ( gpstype , tokens ) ) seg = Segment ( gpstype ( start ) , gpstype ( end ) ) if strict and not float ( abs ( seg ) ) == float ( dur ) : raise ValueError ( "segment {0!r} has incorrect duration {1!r}" . format ( seg , dur ) , ) return seg
Format a list of tokens parsed from an ASCII file into a segment .
22,408
def to_segwizard ( segs , target , header = True , coltype = LIGOTimeGPS ) : if isinstance ( target , string_types ) : with open ( target , 'w' ) as fobj : return to_segwizard ( segs , fobj , header = header , coltype = coltype ) if header : print ( '# seg\tstart\tstop\tduration' , file = target ) for i , seg in enumerate ( segs ) : a = coltype ( seg [ 0 ] ) b = coltype ( seg [ 1 ] ) c = float ( b - a ) print ( '\t' . join ( map ( str , ( i , a , b , c ) ) ) , file = target , )
Write the given SegmentList to a file in SegWizard format .
22,409
def gopen ( name , * args , ** kwargs ) : if name . endswith ( '.gz' ) : return gzip . open ( name , * args , ** kwargs ) fobj = open ( name , * args , ** kwargs ) sig = fobj . read ( 3 ) fobj . seek ( 0 ) if sig == GZIP_SIGNATURE : fobj . close ( ) return gzip . open ( name , * args , ** kwargs ) return fobj
Open a file handling optional gzipping
22,410
def file_list ( flist ) : if ( isinstance ( flist , string_types ) and flist . endswith ( ( '.cache' , '.lcf' , '.ffl' ) ) ) : from . cache import read_cache return read_cache ( flist ) if isinstance ( flist , string_types ) : return flist . split ( ',' ) if isinstance ( flist , ( list , tuple ) ) : return list ( map ( file_path , flist ) ) try : return [ file_path ( flist ) ] except ValueError as exc : exc . args = ( "Could not parse input {!r} as one or more " "file-like objects" . format ( flist ) , ) raise
Parse a number of possible input types into a list of filepaths .
22,411
def file_path ( fobj ) : if isinstance ( fobj , string_types ) and fobj . startswith ( "file:" ) : return urlparse ( fobj ) . path if isinstance ( fobj , string_types ) : return fobj if ( isinstance ( fobj , FILE_LIKE ) and hasattr ( fobj , "name" ) ) : return fobj . name try : return fobj . path except AttributeError : raise ValueError ( "Cannot parse file name for {!r}" . format ( fobj ) )
Determine the path of a file .
22,412
def process_in_out_queues ( func , q_in , q_out ) : while True : idx , arg = q_in . get ( ) if idx is None : break q_out . put ( ( idx , func ( arg ) ) )
Iterate through a Queue call func and Queue the result
22,413
def multiprocess_with_queues ( nproc , func , inputs , verbose = False , ** progress_kw ) : if nproc != 1 and os . name == 'nt' : warnings . warn ( "multiprocessing is currently not supported on Windows, see " "https://github.com/gwpy/gwpy/issues/880, will continue with " "serial procesing (nproc=1)" ) nproc = 1 if progress_kw . pop ( 'raise_exceptions' , None ) is not None : warnings . warn ( "the `raise_exceptions` keyword to " "multiprocess_with_queues is deprecated, and will be " "removed in a future release, all exceptions will be " "raised if they occur" , DeprecationWarning ) if bool ( verbose ) : if not isinstance ( verbose , bool ) : progress_kw [ 'desc' ] = str ( verbose ) if isinstance ( inputs , ( list , tuple ) ) : progress_kw . setdefault ( 'total' , len ( inputs ) ) pbar = progress_bar ( ** progress_kw ) else : pbar = None def _inner ( x ) : try : return func ( x ) except Exception as exc : if nproc == 1 : raise return exc finally : if pbar and nproc == 1 : pbar . update ( ) if nproc == 1 : return list ( map ( _inner , inputs ) ) q_in = Queue ( ) q_out = Queue ( ) proclist = [ Process ( target = process_in_out_queues , args = ( _inner , q_in , q_out ) ) for _ in range ( nproc ) ] for proc in proclist : proc . daemon = True proc . start ( ) sent = [ q_in . put ( x , block = False ) for x in enumerate ( inputs ) ] for _ in range ( nproc ) : q_in . put ( ( None , None ) ) res = [ ] for _ in range ( len ( sent ) ) : x = q_out . get ( ) if pbar : pbar . update ( ) res . append ( x ) for proc in proclist : proc . join ( ) if pbar : pbar . close ( ) results = [ out for _ , out in sorted ( res , key = itemgetter ( 0 ) ) ] for res in results : if isinstance ( res , Exception ) : raise res return results
Map a function over a list of inputs using multiprocess
22,414
def epoch ( self ) : try : if self . _epoch is None : return None return Time ( * modf ( self . _epoch ) [ : : - 1 ] , format = 'gps' , scale = 'utc' ) except AttributeError : self . _epoch = None return self . _epoch
GPS epoch associated with these data
22,415
def override_unit ( self , unit , parse_strict = 'raise' ) : self . _unit = parse_unit ( unit , parse_strict = parse_strict )
Forcefully reset the unit of these data
22,416
def flatten ( self , order = 'C' ) : return super ( Array , self ) . flatten ( order = order ) . view ( Quantity )
Return a copy of the array collapsed into one dimension .
22,417
def from_timeseries ( ts1 , ts2 , stride , fftlength = None , overlap = None , window = None , nproc = 1 , ** kwargs ) : if fftlength is None : fftlength = stride / 2. nsteps = int ( ts1 . size // ( stride * ts1 . sample_rate . value ) ) nproc = min ( nsteps , nproc ) if nsteps == 0 or nproc == 1 : return _from_timeseries ( ts1 , ts2 , stride , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) def _specgram ( queue_ , tsa , tsb ) : try : queue_ . put ( _from_timeseries ( tsa , tsb , stride , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) ) except Exception as exc : queue_ . put ( exc ) stepperproc = int ( ceil ( nsteps / nproc ) ) nsamp = [ stepperproc * ts . sample_rate . value * stride for ts in ( ts1 , ts2 ) ] queue = ProcessQueue ( nproc ) processlist = [ ] for i in range ( nproc ) : process = Process ( target = _specgram , args = ( queue , ts1 [ i * nsamp [ 0 ] : ( i + 1 ) * nsamp [ 0 ] ] , ts2 [ i * nsamp [ 1 ] : ( i + 1 ) * nsamp [ 1 ] ] ) ) process . daemon = True processlist . append ( process ) process . start ( ) if ( ( i + 1 ) * nsamp [ 0 ] ) >= ts1 . size : break data = [ ] for process in processlist : result = queue . get ( ) if isinstance ( result , Exception ) : raise result else : data . append ( result ) for process in processlist : process . join ( ) out = SpectrogramList ( * data ) out . sort ( key = lambda spec : spec . epoch . gps ) return out . join ( )
Calculate the coherence Spectrogram between two TimeSeries .
22,418
def read_channel_list_file ( * source ) : config = configparser . ConfigParser ( dict_type = OrderedDict ) source = file_list ( source ) success_ = config . read ( * source ) if len ( success_ ) != len ( source ) : raise IOError ( "Failed to read one or more CLF files" ) out = ChannelList ( ) out . source = source append = out . append for group in config . sections ( ) : params = OrderedDict ( config . items ( group ) ) channels = params . pop ( 'channels' ) . strip ( '\n' ) . split ( '\n' ) if 'flow' in params or 'fhigh' in params : low = params . pop ( 'flow' , 0 ) high = params . pop ( 'fhigh' , inf ) if isinstance ( high , string_types ) and high . lower ( ) == 'nyquist' : high = inf frange = float ( low ) , float ( high ) else : frange = None for channel in channels : try : match = CHANNEL_DEFINITION . match ( channel ) . groupdict ( ) except AttributeError as exc : exc . args = ( 'Cannot parse %r as channel list entry' % channel , ) raise match = dict ( ( k , v ) for k , v in match . items ( ) if v is not None ) match . setdefault ( 'safe' , 'safe' ) match . setdefault ( 'fidelity' , 'clean' ) safe = match . get ( 'safe' , 'safe' ) . lower ( ) != 'unsafe' channel = Channel ( match . pop ( 'name' ) , frequency_range = frange , safe = safe , sample_rate = match . pop ( 'sample_rate' ) ) channel . params = params . copy ( ) channel . params . update ( match ) channel . group = group for key in [ 'frametype' ] : setattr ( channel , key , channel . params . pop ( key , None ) ) append ( channel ) return out
Read a ~gwpy . detector . ChannelList from a Channel List File
22,419
def write_channel_list_file ( channels , fobj ) : if not isinstance ( fobj , FILE_LIKE ) : with open ( fobj , "w" ) as fobj : return write_channel_list_file ( channels , fobj ) out = configparser . ConfigParser ( dict_type = OrderedDict ) for channel in channels : group = channel . group if not out . has_section ( group ) : out . add_section ( group ) for param , value in channel . params . items ( ) : out . set ( group , param , value ) if channel . sample_rate is not None : entry = '%s %s' % ( str ( channel ) , str ( channel . sample_rate . to ( 'Hz' ) . value ) ) else : entry = str ( channel ) entry += ' %s' % channel . params . get ( 'safe' , 'safe' ) entry += ' %s' % channel . params . get ( 'fidelity' , 'clean' ) try : clist = out . get ( group , 'channels' ) except configparser . NoOptionError : out . set ( group , 'channels' , '\n%s' % entry ) else : out . set ( group , 'channels' , clist + '\n%s' % entry ) out . write ( fobj )
Write a ~gwpy . detector . ChannelList to a INI - format channel list file
22,420
def register_method ( func , name = None , deprecated = False ) : if deprecated : func = deprecated_function ( func , "the {0!r} PSD methods is deprecated, and will be removed " "in a future release, please consider using {1!r} instead" . format ( name , name . split ( '-' , 1 ) [ 1 ] , ) , ) if name is None : name = func . __name__ name = _format_name ( name ) METHODS [ name ] = func return name
Register a method of calculating an average spectrogram .
22,421
def get_method ( name ) : name = _format_name ( name ) try : return METHODS [ name ] except KeyError as exc : exc . args = ( "no PSD method registered with name {0!r}" . format ( name ) , ) raise
Return the PSD method registered with the given name .
22,422
def inspiral_range_psd ( psd , snr = 8 , mass1 = 1.4 , mass2 = 1.4 , horizon = False ) : mass1 = units . Quantity ( mass1 , 'solMass' ) . to ( 'kg' ) mass2 = units . Quantity ( mass2 , 'solMass' ) . to ( 'kg' ) mtotal = mass1 + mass2 mchirp = ( mass1 * mass2 ) ** ( 3 / 5. ) / mtotal ** ( 1 / 5. ) fisco = ( constants . c ** 3 / ( constants . G * 6 ** 1.5 * pi * mtotal ) ) . to ( 'Hz' ) prefactor = ( ( 1.77 ** 2 * 5 * constants . c ** ( 1 / 3. ) * ( mchirp * constants . G / constants . c ** 2 ) ** ( 5 / 3. ) ) / ( 96 * pi ** ( 4 / 3. ) * snr ** 2 ) ) integrand = 1 / psd * psd . frequencies ** ( - 7 / 3. ) * prefactor integrand = integrand [ psd . frequencies . value < fisco . value ] if integrand . f0 . value == 0.0 : integrand [ 0 ] = 0.0 if horizon : integrand *= 2.26 ** 2 return integrand . to ( 'Mpc^2 / Hz' )
Compute the inspiral sensitive distance PSD from a GW strain PSD
22,423
def inspiral_range ( psd , snr = 8 , mass1 = 1.4 , mass2 = 1.4 , fmin = None , fmax = None , horizon = False ) : mass1 = units . Quantity ( mass1 , 'solMass' ) . to ( 'kg' ) mass2 = units . Quantity ( mass2 , 'solMass' ) . to ( 'kg' ) mtotal = mass1 + mass2 fisco = ( constants . c ** 3 / ( constants . G * 6 ** 1.5 * pi * mtotal ) ) . to ( 'Hz' ) fmax = units . Quantity ( fmax or fisco , 'Hz' ) if fmax > fisco : warnings . warn ( "Upper frequency bound greater than %s-%s ISCO " "frequency of %s, using ISCO" % ( mass1 , mass2 , fisco ) ) fmax = fisco if fmin is None : fmin = psd . df fmin = units . Quantity ( fmin , 'Hz' ) f = psd . frequencies . to ( 'Hz' ) condition = ( f >= fmin ) & ( f < fmax ) integrand = inspiral_range_psd ( psd [ condition ] , snr = snr , mass1 = mass1 , mass2 = mass2 , horizon = horizon ) result = units . Quantity ( integrate . trapz ( integrand . value , f . value [ condition ] ) , unit = integrand . unit * units . Hertz ) return ( result ** ( 1 / 2. ) ) . to ( 'Mpc' )
Calculate the inspiral sensitive distance from a GW strain PSD
22,424
def burst_range_spectrum ( psd , snr = 8 , energy = 1e-2 ) : a = ( constants . G * energy * constants . M_sun * 0.4 / ( pi ** 2 * constants . c ) ) ** ( 1 / 2. ) dspec = psd ** ( - 1 / 2. ) * a / ( snr * psd . frequencies ) rspec = dspec . to ( 'Mpc' ) if rspec . f0 . value == 0.0 : rspec [ 0 ] = 0.0 return rspec
Calculate the frequency - dependent GW burst range from a strain PSD
22,425
def burst_range ( psd , snr = 8 , energy = 1e-2 , fmin = 100 , fmax = 500 ) : freqs = psd . frequencies . value if not fmin : fmin = psd . f0 if not fmax : fmax = psd . span [ 1 ] condition = ( freqs >= fmin ) & ( freqs < fmax ) integrand = burst_range_spectrum ( psd [ condition ] , snr = snr , energy = energy ) ** 3 result = integrate . trapz ( integrand . value , freqs [ condition ] ) r = units . Quantity ( result / ( fmax - fmin ) , unit = integrand . unit ) ** ( 1 / 3. ) return r . to ( 'Mpc' )
Calculate the integrated GRB - like GW burst range from a strain PSD
22,426
def format_norm ( kwargs , current = None ) : norm = kwargs . pop ( 'norm' , current ) or 'linear' vmin = kwargs . pop ( 'vmin' , None ) vmax = kwargs . pop ( 'vmax' , None ) clim = kwargs . pop ( 'clim' , ( vmin , vmax ) ) or ( None , None ) clip = kwargs . pop ( 'clip' , None ) if norm == 'linear' : norm = colors . Normalize ( ) elif norm == 'log' : norm = colors . LogNorm ( ) elif not isinstance ( norm , colors . Normalize ) : raise ValueError ( "unrecognised value for norm {!r}" . format ( norm ) ) for attr , value in ( ( 'vmin' , clim [ 0 ] ) , ( 'vmax' , clim [ 1 ] ) , ( 'clip' , clip ) ) : if value is not None : setattr ( norm , attr , value ) return norm , kwargs
Format a ~matplotlib . colors . Normalize from a set of kwargs
22,427
def identify_gwf ( origin , filepath , fileobj , * args , ** kwargs ) : if fileobj is not None : loc = fileobj . tell ( ) fileobj . seek ( 0 ) try : if fileobj . read ( 4 ) == GWF_SIGNATURE : return True finally : fileobj . seek ( loc ) if filepath is not None : if filepath . endswith ( '.gwf' ) : return True if filepath . endswith ( ( '.lcf' , '.cache' ) ) : try : cache = read_cache ( filepath ) except IOError : return False else : if cache [ 0 ] . path . endswith ( '.gwf' ) : return True
Identify a filename or file object as GWF
22,428
def open_gwf ( filename , mode = 'r' ) : if mode not in ( 'r' , 'w' ) : raise ValueError ( "mode must be either 'r' or 'w'" ) from LDAStools import frameCPP filename = urlparse ( filename ) . path if mode == 'r' : return frameCPP . IFrameFStream ( str ( filename ) ) return frameCPP . OFrameFStream ( str ( filename ) )
Open a filename for reading or writing GWF format data
22,429
def write_frames ( filename , frames , compression = 257 , compression_level = 6 ) : from LDAStools import frameCPP stream = open_gwf ( filename , 'w' ) if isinstance ( frames , frameCPP . FrameH ) : frames = [ frames ] for frame in frames : stream . WriteFrame ( frame , compression , compression_level )
Write a list of frame objects to a file
22,430
def get_channel_type ( channel , framefile ) : channel = str ( channel ) for name , type_ in _iter_channels ( framefile ) : if channel == name : return type_ raise ValueError ( "%s not found in table-of-contents for %s" % ( channel , framefile ) )
Find the channel type in a given GWF file
22,431
def channel_in_frame ( channel , framefile ) : channel = str ( channel ) for name in iter_channel_names ( framefile ) : if channel == name : return True return False
Determine whether a channel is stored in this framefile
22,432
def _iter_channels ( framefile ) : from LDAStools import frameCPP if not isinstance ( framefile , frameCPP . IFrameFStream ) : framefile = open_gwf ( framefile , 'r' ) toc = framefile . GetTOC ( ) for typename in ( 'Sim' , 'Proc' , 'ADC' ) : typen = typename . lower ( ) for name in getattr ( toc , 'Get{0}' . format ( typename ) ) ( ) : yield name , typen
Yields the name and type of each channel in a GWF file TOC
22,433
def data_segments ( paths , channel , warn = True ) : segments = SegmentList ( ) for path in paths : segments . extend ( _gwf_channel_segments ( path , channel , warn = warn ) ) return segments . coalesce ( )
Returns the segments containing data for a channel
22,434
def _gwf_channel_segments ( path , channel , warn = True ) : stream = open_gwf ( path ) toc = stream . GetTOC ( ) secs = toc . GetGTimeS ( ) nano = toc . GetGTimeN ( ) dur = toc . GetDt ( ) readers = [ getattr ( stream , 'ReadFr{0}Data' . format ( type_ . title ( ) ) ) for type_ in ( "proc" , "sim" , "adc" ) ] for i , ( s , ns , dt ) in enumerate ( zip ( secs , nano , dur ) ) : for read in readers : try : read ( i , channel ) except ( IndexError , ValueError ) : continue readers = [ read ] epoch = LIGOTimeGPS ( s , ns ) yield Segment ( epoch , epoch + dt ) break else : if warn : warnings . warn ( "{0!r} not found in frame {1} of {2}" . format ( channel , i , path ) , )
Yields the segments containing data for channel in this GWF path
22,435
def fdfilter ( data , * filt , ** kwargs ) : inplace = kwargs . pop ( 'inplace' , False ) analog = kwargs . pop ( 'analog' , False ) fs = kwargs . pop ( 'sample_rate' , None ) if kwargs : raise TypeError ( "filter() got an unexpected keyword argument '%s'" % list ( kwargs . keys ( ) ) [ 0 ] ) if fs is None : fs = 2 * ( data . shape [ - 1 ] * data . df ) . to ( 'Hz' ) . value form , filt = parse_filter ( filt , analog = analog , sample_rate = fs ) lti = signal . lti ( * filt ) freqs = data . frequencies . value . copy ( ) fresp = numpy . nan_to_num ( abs ( lti . freqresp ( w = freqs ) [ 1 ] ) ) if inplace : data *= fresp return data new = data * fresp return new
Filter a frequency - domain data object
22,436
def main ( args = None ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( "-V" , "--version" , action = "version" , version = __version__ , help = "show version number and exit" ) parser . add_argument ( "-l" , "--local" , action = "store_true" , default = False , help = "print datetimes in local timezone" ) parser . add_argument ( "-f" , "--format" , type = str , action = "store" , default = r"%Y-%m-%d %H:%M:%S.%f %Z" , help = "output datetime format (default: %(default)r)" ) parser . add_argument ( "input" , help = "GPS or datetime string to convert" , nargs = "*" ) args = parser . parse_args ( args ) input_ = " " . join ( args . input ) output = tconvert ( input_ ) if isinstance ( output , datetime . datetime ) : output = output . replace ( tzinfo = tz . tzutc ( ) ) if args . local : output = output . astimezone ( tz . tzlocal ( ) ) print ( output . strftime ( args . format ) ) else : print ( output )
Parse command - line arguments tconvert inputs and print
22,437
def timer ( func ) : name = func . __name__ @ wraps ( func ) def timed_func ( self , * args , ** kwargs ) : _start = time . time ( ) out = func ( self , * args , ** kwargs ) self . log ( 2 , '{0} took {1:.1f} sec' . format ( name , time . time ( ) - _start ) ) return out return timed_func
Time a method and print its duration after return
22,438
def to_float ( unit ) : def converter ( x ) : return Quantity ( x , unit ) . value converter . __doc__ %= str ( unit ) return converter
Factory to build a converter from quantity string to float
22,439
def log_norm ( func ) : @ wraps ( func ) def decorated_func ( * args , ** kwargs ) : norm , kwargs = format_norm ( kwargs ) kwargs [ 'norm' ] = norm return func ( * args , ** kwargs ) return decorated_func
Wrap func to handle custom gwpy keywords for a LogNorm colouring
22,440
def xlim_as_gps ( func ) : @ wraps ( func ) def wrapped_func ( self , left = None , right = None , ** kw ) : if right is None and numpy . iterable ( left ) : left , right = left kw [ 'left' ] = left kw [ 'right' ] = right gpsscale = self . get_xscale ( ) in GPS_SCALES for key in ( 'left' , 'right' ) : if gpsscale : try : kw [ key ] = numpy . longdouble ( str ( to_gps ( kw [ key ] ) ) ) except TypeError : pass return func ( self , ** kw ) return wrapped_func
Wrap func to handle pass limit inputs through gwpy . time . to_gps
22,441
def restore_grid ( func ) : @ wraps ( func ) def wrapped_func ( self , * args , ** kwargs ) : grid = ( self . xaxis . _gridOnMinor , self . xaxis . _gridOnMajor , self . yaxis . _gridOnMinor , self . yaxis . _gridOnMajor ) try : return func ( self , * args , ** kwargs ) finally : self . xaxis . grid ( grid [ 0 ] , which = "minor" ) self . xaxis . grid ( grid [ 1 ] , which = "major" ) self . yaxis . grid ( grid [ 2 ] , which = "minor" ) self . yaxis . grid ( grid [ 3 ] , which = "major" ) return wrapped_func
Wrap func to preserve the Axes current grid settings .
22,442
def set_epoch ( self , epoch ) : scale = self . get_xscale ( ) return self . set_xscale ( scale , epoch = epoch )
Set the epoch for the current GPS scale .
22,443
def imshow ( self , array , * args , ** kwargs ) : if isinstance ( array , Array2D ) : return self . _imshow_array2d ( array , * args , ** kwargs ) image = super ( Axes , self ) . imshow ( array , * args , ** kwargs ) self . autoscale ( enable = None , axis = 'both' , tight = None ) return image
Display an image i . e . data on a 2D regular raster .
22,444
def _imshow_array2d ( self , array , origin = 'lower' , interpolation = 'none' , aspect = 'auto' , ** kwargs ) : extent = tuple ( array . xspan ) + tuple ( array . yspan ) if self . get_xscale ( ) == 'log' and extent [ 0 ] == 0. : extent = ( 1e-300 , ) + extent [ 1 : ] if self . get_yscale ( ) == 'log' and extent [ 2 ] == 0. : extent = extent [ : 2 ] + ( 1e-300 , ) + extent [ 3 : ] kwargs . setdefault ( 'extent' , extent ) return self . imshow ( array . value . T , origin = origin , aspect = aspect , interpolation = interpolation , ** kwargs )
Render an ~gwpy . types . Array2D using Axes . imshow
22,445
def pcolormesh ( self , * args , ** kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , Array2D ) : return self . _pcolormesh_array2d ( * args , ** kwargs ) return super ( Axes , self ) . pcolormesh ( * args , ** kwargs )
Create a pseudocolor plot with a non - regular rectangular grid .
22,446
def _pcolormesh_array2d ( self , array , * args , ** kwargs ) : x = numpy . concatenate ( ( array . xindex . value , array . xspan [ - 1 : ] ) ) y = numpy . concatenate ( ( array . yindex . value , array . yspan [ - 1 : ] ) ) xcoord , ycoord = numpy . meshgrid ( x , y , copy = False , sparse = True ) return self . pcolormesh ( xcoord , ycoord , array . value . T , * args , ** kwargs )
Render an ~gwpy . types . Array2D using Axes . pcolormesh
22,447
def plot_mmm ( self , data , lower = None , upper = None , ** kwargs ) : alpha = kwargs . pop ( 'alpha' , .1 ) line , = self . plot ( data , ** kwargs ) out = [ line ] kwargs . update ( { 'label' : '' , 'linewidth' : line . get_linewidth ( ) / 2 , 'color' : line . get_color ( ) , 'alpha' : alpha * 2 , } ) fill = [ data . xindex . value , data . value , data . value ] for i , bound in enumerate ( ( lower , upper ) ) : if bound is not None : out . extend ( self . plot ( bound , ** kwargs ) ) fill [ i + 1 ] = bound . value out . append ( self . fill_between ( * fill , alpha = alpha , color = kwargs [ 'color' ] , rasterized = kwargs . get ( 'rasterized' , True ) ) ) return out
Plot a Series as a line with a shaded region around it .
22,448
def tile ( self , x , y , w , h , color = None , anchor = 'center' , edgecolors = 'face' , linewidth = 0.8 , ** kwargs ) : if color is not None and kwargs . get ( 'c_sort' , True ) : sortidx = color . argsort ( ) x = x [ sortidx ] y = y [ sortidx ] w = w [ sortidx ] h = h [ sortidx ] color = color [ sortidx ] if anchor == 'll' : def _poly ( x , y , w , h ) : return ( ( x , y ) , ( x , y + h ) , ( x + w , y + h ) , ( x + w , y ) ) elif anchor == 'lr' : def _poly ( x , y , w , h ) : return ( ( x - w , y ) , ( x - w , y + h ) , ( x , y + h ) , ( x , y ) ) elif anchor == 'ul' : def _poly ( x , y , w , h ) : return ( ( x , y - h ) , ( x , y ) , ( x + w , y ) , ( x + w , y - h ) ) elif anchor == 'ur' : def _poly ( x , y , w , h ) : return ( ( x - w , y - h ) , ( x - w , y ) , ( x , y ) , ( x , y - h ) ) elif anchor == 'center' : def _poly ( x , y , w , h ) : return ( ( x - w / 2. , y - h / 2. ) , ( x - w / 2. , y + h / 2. ) , ( x + w / 2. , y + h / 2. ) , ( x + w / 2. , y - h / 2. ) ) else : raise ValueError ( "Unrecognised tile anchor {!r}" . format ( anchor ) ) cmap = kwargs . pop ( 'cmap' , rcParams [ 'image.cmap' ] ) coll = PolyCollection ( ( _poly ( * tile ) for tile in zip ( x , y , w , h ) ) , edgecolors = edgecolors , linewidth = linewidth , ** kwargs ) if color is not None : coll . set_array ( color ) coll . set_cmap ( cmap ) out = self . add_collection ( coll ) self . autoscale_view ( ) return out
Plot rectanguler tiles based onto these Axes .
22,449
def colorbar ( self , mappable = None , ** kwargs ) : fig = self . get_figure ( ) if kwargs . get ( 'use_axesgrid' , True ) : kwargs . setdefault ( 'fraction' , 0. ) if kwargs . get ( 'fraction' , 0. ) == 0. : kwargs . setdefault ( 'use_axesgrid' , True ) mappable , kwargs = gcbar . process_colorbar_kwargs ( fig , mappable = mappable , ax = self , ** kwargs ) if isinstance ( fig , Plot ) : kwargs [ 'use_axesgrid' ] = False return fig . colorbar ( mappable , ** kwargs )
Add a ~matplotlib . colorbar . Colorbar to these Axes
22,450
def tconvert ( gpsordate = 'now' ) : try : float ( gpsordate ) except ( TypeError , ValueError ) : return to_gps ( gpsordate ) return from_gps ( gpsordate )
Convert GPS times to ISO - format date - times and vice - versa .
22,451
def from_gps ( gps ) : try : gps = LIGOTimeGPS ( gps ) except ( ValueError , TypeError , RuntimeError ) : gps = LIGOTimeGPS ( float ( gps ) ) sec , nano = gps . gpsSeconds , gps . gpsNanoSeconds date = Time ( sec , format = 'gps' , scale = 'utc' ) . datetime return date + datetime . timedelta ( microseconds = nano * 1e-3 )
Convert a GPS time into a datetime . datetime .
22,452
def _str_to_datetime ( datestr ) : try : return DATE_STRINGS [ str ( datestr ) . lower ( ) ] ( ) except KeyError : pass try : import maya return maya . when ( datestr ) . datetime ( ) except ImportError : pass with warnings . catch_warnings ( ) : warnings . simplefilter ( "error" , RuntimeWarning ) try : return dateparser . parse ( datestr ) except RuntimeWarning : raise ValueError ( "Cannot parse date string with timezone " "without maya, please install maya" ) except ( ValueError , TypeError ) as exc : exc . args = ( "Cannot parse date string {0!r}: {1}" . format ( datestr , exc . args [ 0 ] ) , ) raise
Convert str to datetime . datetime .
22,453
def _time_to_gps ( time ) : time = time . utc date = time . datetime micro = date . microsecond if isinstance ( date , datetime . datetime ) else 0 return LIGOTimeGPS ( int ( time . gps ) , int ( micro * 1e3 ) )
Convert a Time into LIGOTimeGPS .
22,454
def with_read_hdf5 ( func ) : @ wraps ( func ) def decorated_func ( fobj , * args , ** kwargs ) : if not isinstance ( fobj , h5py . HLObject ) : if isinstance ( fobj , FILE_LIKE ) : fobj = fobj . name with h5py . File ( fobj , 'r' ) as h5f : return func ( h5f , * args , ** kwargs ) return func ( fobj , * args , ** kwargs ) return decorated_func
Decorate an HDF5 - reading function to open a filepath if needed
22,455
def find_dataset ( h5o , path = None ) : if isinstance ( h5o , h5py . Dataset ) : return h5o elif path is None and len ( h5o ) == 1 : path = list ( h5o . keys ( ) ) [ 0 ] elif path is None : raise ValueError ( "Please specify the HDF5 path via the " "``path=`` keyword argument" ) return h5o [ path ]
Find and return the relevant dataset inside the given H5 object
22,456
def with_write_hdf5 ( func ) : @ wraps ( func ) def decorated_func ( obj , fobj , * args , ** kwargs ) : if not isinstance ( fobj , h5py . HLObject ) : append = kwargs . get ( 'append' , False ) overwrite = kwargs . get ( 'overwrite' , False ) if os . path . exists ( fobj ) and not ( overwrite or append ) : raise IOError ( "File exists: %s" % fobj ) with h5py . File ( fobj , 'a' if append else 'w' ) as h5f : return func ( obj , h5f , * args , ** kwargs ) return func ( obj , fobj , * args , ** kwargs ) return decorated_func
Decorate an HDF5 - writing function to open a filepath if needed
22,457
def create_dataset ( parent , path , overwrite = False , ** kwargs ) : if path in parent and overwrite : del parent [ path ] try : return parent . create_dataset ( path , ** kwargs ) except RuntimeError as exc : if str ( exc ) == 'Unable to create link (Name already exists)' : exc . args = ( '{0}: {1!r}, pass overwrite=True ' 'to ignore existing datasets' . format ( str ( exc ) , path ) , ) raise
Create a new dataset inside the parent HDF5 object
22,458
def format_db_selection ( selection , engine = None ) : if selection is None : return '' selections = [ ] for col , op_ , value in parse_column_filters ( selection ) : if engine and engine . name == 'postgresql' : col = '"%s"' % col try : opstr = [ key for key in OPERATORS if OPERATORS [ key ] is op_ ] [ 0 ] except KeyError : raise ValueError ( "Cannot format database 'WHERE' command with " "selection operator %r" % op_ ) selections . append ( '{0} {1} {2!r}' . format ( col , opstr , value ) ) if selections : return 'WHERE %s' % ' AND ' . join ( selections ) return ''
Format a column filter selection as a SQL database WHERE string
22,459
def fetch ( engine , tablename , columns = None , selection = None , ** kwargs ) : import pandas as pd if columns is None : columnstr = '*' else : columnstr = ', ' . join ( '"%s"' % c for c in columns ) selectionstr = format_db_selection ( selection , engine = engine ) qstr = 'SELECT %s FROM %s %s' % ( columnstr , tablename , selectionstr ) tab = pd . read_sql ( qstr , engine , ** kwargs ) types = tab . apply ( lambda x : pd . api . types . infer_dtype ( x . values ) ) if not tab . empty : for col in types [ types == 'unicode' ] . index : tab [ col ] = tab [ col ] . astype ( str ) return Table . from_pandas ( tab ) . filled ( )
Fetch data from an SQL table into an EventTable
22,460
def q_scan ( data , mismatch = DEFAULT_MISMATCH , qrange = DEFAULT_QRANGE , frange = DEFAULT_FRANGE , duration = None , sampling = None , ** kwargs ) : from gwpy . timeseries import TimeSeries if isinstance ( data , TimeSeries ) : duration = abs ( data . span ) sampling = data . sample_rate . to ( 'Hz' ) . value kwargs . update ( { 'epoch' : data . t0 . value } ) data = data . fft ( ) . value qgram , N = QTiling ( duration , sampling , mismatch = mismatch , qrange = qrange , frange = frange ) . transform ( data , ** kwargs ) far = 1.5 * N * numpy . exp ( - qgram . peak [ 'energy' ] ) / duration return ( qgram , far )
Transform data by scanning over a QTiling
22,461
def _iter_qs ( self ) : cumum = log ( self . qrange [ 1 ] / self . qrange [ 0 ] ) / 2 ** ( 1 / 2. ) nplanes = int ( max ( ceil ( cumum / self . deltam ) , 1 ) ) dq = cumum / nplanes for i in xrange ( nplanes ) : yield self . qrange [ 0 ] * exp ( 2 ** ( 1 / 2. ) * dq * ( i + .5 ) )
Iterate over the Q values
22,462
def transform ( self , fseries , ** kwargs ) : weight = 1 + numpy . log10 ( self . qrange [ 1 ] / self . qrange [ 0 ] ) / numpy . sqrt ( 2 ) nind , nplanes , peak , result = ( 0 , 0 , 0 , None ) for plane in self : nplanes += 1 nind += sum ( [ 1 + row . ntiles * row . deltam for row in plane ] ) result = plane . transform ( fseries , ** kwargs ) if result . peak [ 'energy' ] > peak : out = result peak = out . peak [ 'energy' ] return ( out , nind * weight / nplanes )
Compute the time - frequency plane at fixed Q with the most significant tile
22,463
def farray ( self ) : bandwidths = 2 * pi ** ( 1 / 2. ) * self . frequencies / self . q return self . frequencies - bandwidths / 2.
Array of frequencies for the lower - edge of each frequency bin
22,464
def ntiles ( self ) : tcum_mismatch = self . duration * 2 * pi * self . frequency / self . q return next_power_of_two ( tcum_mismatch / self . deltam )
The number of tiles in this row
22,465
def get_window ( self ) : wfrequencies = self . _get_indices ( ) / self . duration xfrequencies = wfrequencies * self . qprime / self . frequency norm = self . ntiles / ( self . duration * self . sampling ) * ( 315 * self . qprime / ( 128 * self . frequency ) ) ** ( 1 / 2. ) return ( 1 - xfrequencies ** 2 ) ** 2 * norm
Generate the bi - square window for this row
22,466
def get_data_indices ( self ) : return numpy . round ( self . _get_indices ( ) + 1 + self . frequency * self . duration ) . astype ( int )
Returns the index array of interesting frequencies for this row
22,467
def interpolate ( self , tres = "<default>" , fres = "<default>" , logf = False , outseg = None ) : from scipy . interpolate import ( interp2d , InterpolatedUnivariateSpline ) from . . spectrogram import Spectrogram if outseg is None : outseg = self . energies [ 0 ] . span frequencies = self . plane . frequencies dtype = self . energies [ 0 ] . dtype if tres == "<default>" : tres = abs ( Segment ( outseg ) ) / 1000. xout = numpy . arange ( * outseg , step = tres ) nx = xout . size ny = frequencies . size out = Spectrogram ( numpy . empty ( ( nx , ny ) , dtype = dtype ) , t0 = outseg [ 0 ] , dt = tres , frequencies = frequencies ) out . q = self . plane . q for i , row in enumerate ( self . energies ) : xrow = numpy . arange ( row . x0 . value , ( row . x0 + row . duration ) . value , row . dx . value ) interp = InterpolatedUnivariateSpline ( xrow , row . value ) out [ : , i ] = interp ( xout ) . astype ( dtype , casting = "same_kind" , copy = False ) if fres is None : return out interp = interp2d ( xout , frequencies , out . value . T , kind = 'cubic' ) if not logf : if fres == "<default>" : fres = .5 outfreq = numpy . arange ( self . plane . frange [ 0 ] , self . plane . frange [ 1 ] , fres , dtype = dtype ) else : if fres == "<default>" : fres = 500 logfmin = numpy . log10 ( self . plane . frange [ 0 ] ) logfmax = numpy . log10 ( self . plane . frange [ 1 ] ) outfreq = numpy . logspace ( logfmin , logfmax , num = int ( fres ) ) new = type ( out ) ( interp ( xout , outfreq ) . T . astype ( dtype , casting = "same_kind" , copy = False ) , t0 = outseg [ 0 ] , dt = tres , frequencies = outfreq , ) new . q = self . plane . q return new
Interpolate this QGram over a regularly - gridded spectrogram
22,468
def table ( self , snrthresh = 5.5 ) : from . . table import EventTable freqs = self . plane . frequencies bws = 2 * ( freqs - self . plane . farray ) names = ( 'time' , 'frequency' , 'duration' , 'bandwidth' , 'energy' ) rec = numpy . recarray ( ( 0 , ) , names = names , formats = [ 'f8' ] * len ( names ) ) for f , bw , row in zip ( freqs , bws , self . energies ) : ind , = ( row . value >= snrthresh ** 2 / 2. ) . nonzero ( ) new = ind . size if new > 0 : rec . resize ( ( rec . size + new , ) , refcheck = False ) rec [ 'time' ] [ - new : ] = row . times . value [ ind ] rec [ 'frequency' ] [ - new : ] = f rec [ 'duration' ] [ - new : ] = row . dt . to ( 's' ) . value rec [ 'bandwidth' ] [ - new : ] = bw rec [ 'energy' ] [ - new : ] = row . value [ ind ] out = EventTable ( rec , copy = False ) out . meta [ 'q' ] = self . plane . q return out
Represent this QPlane as an EventTable
22,469
def define ( cls , start , step , num , dtype = None ) : if dtype is None : dtype = max ( numpy . array ( start , subok = True , copy = False ) . dtype , numpy . array ( step , subok = True , copy = False ) . dtype , ) start = start . astype ( dtype , copy = False ) step = step . astype ( dtype , copy = False ) return cls ( start + numpy . arange ( num , dtype = dtype ) * step , copy = False )
Define a new Index .
22,470
def regular ( self ) : try : return self . info . meta [ 'regular' ] except ( TypeError , KeyError ) : if self . info . meta is None : self . info . meta = { } self . info . meta [ 'regular' ] = self . is_regular ( ) return self . info . meta [ 'regular' ]
True if this index is linearly increasing
22,471
def is_regular ( self ) : if self . size <= 1 : return False return numpy . isclose ( numpy . diff ( self . value , n = 2 ) , 0 ) . all ( )
Determine whether this Index contains linearly increasing samples
22,472
def table_from_omicron ( source , * args , ** kwargs ) : if not args : kwargs . setdefault ( 'treename' , 'triggers' ) return EventTable . read ( source , * args , format = 'root' , ** kwargs )
Read an EventTable from an Omicron ROOT file
22,473
def plot ( self , * args , ** kwargs ) : out = [ ] args = list ( args ) while args : try : plotter = self . _plot_method ( args [ 0 ] ) except TypeError : break out . append ( plotter ( args [ 0 ] , ** kwargs ) ) args . pop ( 0 ) if args : out . extend ( super ( SegmentAxes , self ) . plot ( * args , ** kwargs ) ) self . autoscale ( enable = None , axis = 'both' , tight = False ) return out
Plot data onto these axes
22,474
def plot_dict ( self , flags , label = 'key' , known = 'x' , ** kwargs ) : out = [ ] for lab , flag in flags . items ( ) : if label . lower ( ) == 'name' : lab = flag . name elif label . lower ( ) != 'key' : lab = label out . append ( self . plot_flag ( flag , label = to_string ( lab ) , known = known , ** kwargs ) ) return out
Plot a ~gwpy . segments . DataQualityDict onto these axes
22,475
def plot_flag ( self , flag , y = None , ** kwargs ) : if y is None : y = self . get_next_y ( ) if flag . isgood : kwargs . setdefault ( 'facecolor' , '#33cc33' ) kwargs . setdefault ( 'known' , '#ff0000' ) else : kwargs . setdefault ( 'facecolor' , '#ff0000' ) kwargs . setdefault ( 'known' , '#33cc33' ) known = kwargs . pop ( 'known' ) name = kwargs . pop ( 'label' , flag . label or flag . name ) kwargs . setdefault ( 'zorder' , 0 ) coll = self . plot_segmentlist ( flag . active , y = y , label = name , ** kwargs ) if known not in ( None , False ) : known_kw = { 'facecolor' : coll . get_facecolor ( ) [ 0 ] , 'collection' : 'ignore' , 'zorder' : - 1000 , } if isinstance ( known , dict ) : known_kw . update ( known ) elif known == 'fancy' : known_kw . update ( height = kwargs . get ( 'height' , .8 ) * .05 ) elif known in HATCHES : known_kw . update ( fill = False , hatch = known ) else : known_kw . update ( fill = True , facecolor = known , height = kwargs . get ( 'height' , .8 ) * .5 ) self . plot_segmentlist ( flag . known , y = y , label = name , ** known_kw ) return coll
Plot a ~gwpy . segments . DataQualityFlag onto these axes .
22,476
def plot_segmentlist ( self , segmentlist , y = None , height = .8 , label = None , collection = True , rasterized = None , ** kwargs ) : facecolor = kwargs . pop ( 'facecolor' , kwargs . pop ( 'color' , '#629fca' ) ) if is_color_like ( facecolor ) : kwargs . setdefault ( 'edgecolor' , tint ( facecolor , factor = .5 ) ) if y is None : y = self . get_next_y ( ) patches = [ SegmentRectangle ( seg , y , height = height , facecolor = facecolor , ** kwargs ) for seg in segmentlist ] if collection : coll = PatchCollection ( patches , match_original = patches , zorder = kwargs . get ( 'zorder' , 1 ) ) coll . set_rasterized ( rasterized ) coll . _ignore = collection == 'ignore' coll . _ypos = y out = self . add_collection ( coll ) if label is None : label = coll . get_label ( ) coll . set_label ( to_string ( label ) ) else : out = [ ] for patch in patches : patch . set_label ( label ) patch . set_rasterized ( rasterized ) label = '' out . append ( self . add_patch ( patch ) ) self . autoscale ( enable = None , axis = 'both' , tight = False ) return out
Plot a ~gwpy . segments . SegmentList onto these axes
22,477
def plot_segmentlistdict ( self , segmentlistdict , y = None , dy = 1 , ** kwargs ) : if y is None : y = self . get_next_y ( ) collections = [ ] for name , segmentlist in segmentlistdict . items ( ) : collections . append ( self . plot_segmentlist ( segmentlist , y = y , label = name , ** kwargs ) ) y += dy return collections
Plot a ~gwpy . segments . SegmentListDict onto these axes
22,478
def get_collections ( self , ignore = None ) : if ignore is None : return self . collections return [ c for c in self . collections if getattr ( c , '_ignore' , None ) == ignore ]
Return the collections matching the given _ignore value
22,479
def parse_keytab ( keytab ) : try : out = subprocess . check_output ( [ 'klist' , '-k' , keytab ] , stderr = subprocess . PIPE ) except OSError : raise KerberosError ( "Failed to locate klist, cannot read keytab" ) except subprocess . CalledProcessError : raise KerberosError ( "Cannot read keytab {!r}" . format ( keytab ) ) principals = [ ] for line in out . splitlines ( ) : if isinstance ( line , bytes ) : line = line . decode ( 'utf-8' ) try : kvno , principal , = re . split ( r'\s+' , line . strip ( ' ' ) , 1 ) except ValueError : continue else : if not kvno . isdigit ( ) : continue principals . append ( tuple ( principal . split ( '@' ) ) + ( int ( kvno ) , ) ) return list ( OrderedDict . fromkeys ( principals ) . keys ( ) )
Read the contents of a KRB5 keytab file returning a list of credentials listed within
22,480
def y0 ( self ) : try : return self . _y0 except AttributeError : self . _y0 = Quantity ( 0 , self . yunit ) return self . _y0
Y - axis coordinate of the first data point
22,481
def dy ( self ) : try : return self . _dy except AttributeError : try : self . _yindex except AttributeError : self . _dy = Quantity ( 1 , self . yunit ) else : if not self . yindex . regular : raise AttributeError ( "This series has an irregular y-axis " "index, so 'dy' is not well defined" ) self . _dy = self . yindex [ 1 ] - self . yindex [ 0 ] return self . _dy
Y - axis sample separation
22,482
def yunit ( self ) : try : return self . _dy . unit except AttributeError : try : return self . _y0 . unit except AttributeError : return self . _default_yunit
Unit of Y - axis index
22,483
def yindex ( self ) : try : return self . _yindex except AttributeError : self . _yindex = Index . define ( self . y0 , self . dy , self . shape [ 1 ] ) return self . _yindex
Positions of the data on the y - axis
22,484
def is_compatible ( self , other ) : super ( Array2D , self ) . is_compatible ( other ) if isinstance ( other , type ( self ) ) : try : if not self . dy == other . dy : raise ValueError ( "%s sample sizes do not match: " "%s vs %s." % ( type ( self ) . __name__ , self . dy , other . dy ) ) except AttributeError : raise ValueError ( "Series with irregular y-indexes cannot " "be compatible" ) return True
Check whether this array and other have compatible metadata
22,485
def find_flag_groups ( h5group , strict = True ) : names = [ ] for group in h5group : try : names . append ( h5group [ group ] . attrs [ 'name' ] ) except KeyError : if strict : raise continue return names
Returns all HDF5 Groups under the given group that contain a flag
22,486
def _is_flag_group ( obj ) : return ( isinstance ( obj , h5py . Group ) and isinstance ( obj . get ( "active" ) , h5py . Dataset ) and isinstance ( obj . get ( "known" ) , h5py . Dataset ) )
Returns True if obj is an h5py . Group that looks like if contains a flag
22,487
def _find_flag_groups ( h5f ) : flag_groups = [ ] def _find ( name , obj ) : if _is_flag_group ( obj ) : flag_groups . append ( name ) h5f . visititems ( _find ) return flag_groups
Return all groups in h5f that look like flags
22,488
def _get_flag_group ( h5f , path ) : if path : return h5f [ path ] if _is_flag_group ( h5f ) : return h5f try : path , = _find_flag_groups ( h5f ) except ValueError : pass else : return h5f [ path ] raise ValueError ( "please pass a valid HDF5 Group, or specify the HDF5 Group " "path via the ``path=`` keyword argument" , )
Determine the group to use in order to read a flag
22,489
def read_hdf5_flag ( h5f , path = None , gpstype = LIGOTimeGPS ) : dataset = _get_flag_group ( h5f , path ) active = SegmentList . read ( dataset [ 'active' ] , format = 'hdf5' , gpstype = gpstype ) try : known = SegmentList . read ( dataset [ 'known' ] , format = 'hdf5' , gpstype = gpstype ) except KeyError as first_keyerror : try : known = SegmentList . read ( dataset [ 'valid' ] , format = 'hdf5' , gpstype = gpstype ) except KeyError : raise first_keyerror return DataQualityFlag ( active = active , known = known , ** dict ( dataset . attrs ) )
Read a DataQualityFlag object from an HDF5 file or group .
22,490
def read_hdf5_segmentlist ( h5f , path = None , gpstype = LIGOTimeGPS , ** kwargs ) : dataset = io_hdf5 . find_dataset ( h5f , path = path ) segtable = Table . read ( dataset , format = 'hdf5' , ** kwargs ) out = SegmentList ( ) for row in segtable : start = LIGOTimeGPS ( int ( row [ 'start_time' ] ) , int ( row [ 'start_time_ns' ] ) ) end = LIGOTimeGPS ( int ( row [ 'end_time' ] ) , int ( row [ 'end_time_ns' ] ) ) if gpstype is LIGOTimeGPS : out . append ( Segment ( start , end ) ) else : out . append ( Segment ( gpstype ( start ) , gpstype ( end ) ) ) return out
Read a SegmentList object from an HDF5 file or group .
22,491
def read_hdf5_dict ( h5f , names = None , path = None , on_missing = 'error' , ** kwargs ) : if path : h5f = h5f [ path ] if names is None : names = kwargs . pop ( 'flags' , None ) if names is None : try : names = find_flag_groups ( h5f , strict = True ) except KeyError : names = None if not names : raise ValueError ( "Failed to automatically parse available flag " "names from HDF5, please give a list of names " "to read via the ``names=`` keyword" ) out = DataQualityDict ( ) for name in names : try : out [ name ] = read_hdf5_flag ( h5f , name , ** kwargs ) except KeyError as exc : if on_missing == 'ignore' : pass elif on_missing == 'warn' : warnings . warn ( str ( exc ) ) else : raise ValueError ( 'no H5Group found for flag ' '{0!r}' . format ( name ) ) return out
Read a DataQualityDict from an HDF5 file
22,492
def write_hdf5_flag_group ( flag , h5group , ** kwargs ) : flag . active . write ( h5group , 'active' , ** kwargs ) kwargs [ 'append' ] = True flag . known . write ( h5group , 'known' , ** kwargs ) for attr in [ 'name' , 'label' , 'category' , 'description' , 'isgood' , 'padding' ] : value = getattr ( flag , attr ) if value is None : continue elif isinstance ( value , Quantity ) : h5group . attrs [ attr ] = value . value elif isinstance ( value , UnitBase ) : h5group . attrs [ attr ] = str ( value ) else : h5group . attrs [ attr ] = value return h5group
Write a DataQualityFlag into the given HDF5 group
22,493
def write_hdf5_dict ( flags , output , path = None , append = False , overwrite = False , ** kwargs ) : if path : try : parent = output [ path ] except KeyError : parent = output . create_group ( path ) else : parent = output for name in flags : if name in parent : if not ( overwrite and append ) : raise IOError ( "Group '%s' already exists, give ``append=True, " "overwrite=True`` to overwrite it" % os . path . join ( parent . name , name ) ) del parent [ name ] group = parent . create_group ( name ) write_hdf5_flag_group ( flags [ name ] , group , ** kwargs )
Write this DataQualityFlag to a h5py . Group .
22,494
def float_to_latex ( x , format = "%.2g" ) : r if x == 0. : return '0' base_str = format % x if "e" not in base_str : return base_str mantissa , exponent = base_str . split ( "e" ) if float ( mantissa ) . is_integer ( ) : mantissa = int ( float ( mantissa ) ) exponent = exponent . lstrip ( "0+" ) if exponent . startswith ( '-0' ) : exponent = '-' + exponent [ 2 : ] if float ( mantissa ) == 1.0 : return r"10^{%s}" % exponent return r"%s\!\!\times\!\!10^{%s}" % ( mantissa , exponent )
r Convert a floating point number to a latex representation .
22,495
def label_to_latex ( text ) : r if text is None : return '' out = [ ] x = None for m in re_latex_control . finditer ( text ) : a , b = m . span ( ) char = m . group ( ) [ 0 ] out . append ( text [ x : a ] ) out . append ( r'\%s' % char ) x = b if not x : return text out . append ( text [ b : ] ) return '' . join ( out )
r Convert text into a latex - passable representation .
22,496
def preformat_cache ( cache , start = None , end = None ) : if isinstance ( cache , FILE_LIKE + string_types ) : return read_cache ( cache , sort = file_segment , segment = Segment ( start , end ) ) cache = type ( cache ) ( cache ) try : cache . sort ( key = file_segment ) except ValueError : return cache if start is None : start = file_segment ( cache [ 0 ] ) [ 0 ] if end is None : end = file_segment ( cache [ - 1 ] ) [ - 1 ] return sieve ( cache , segment = Segment ( start , end ) )
Preprocess a list of file paths for reading .
22,497
def progress_bar ( ** kwargs ) : tqdm_kw = { 'desc' : 'Processing' , 'file' : sys . stdout , 'bar_format' : TQDM_BAR_FORMAT , } tqdm_kw . update ( kwargs ) pbar = tqdm ( ** tqdm_kw ) if not pbar . disable : pbar . desc = pbar . desc . rstrip ( ': ' ) pbar . refresh ( ) return pbar
Create a tqdm . tqdm progress bar
22,498
def num_taps ( sample_rate , transitionwidth , gpass , gstop ) : gpass = 10 ** ( - gpass / 10. ) gstop = 10 ** ( - gstop / 10. ) return int ( 2 / 3. * log10 ( 1 / ( 10 * gpass * gstop ) ) * sample_rate / transitionwidth )
Returns the number of taps for an FIR filter with the given shape
22,499
def is_zpk ( zpktup ) : return ( isinstance ( zpktup , ( tuple , list ) ) and len ( zpktup ) == 3 and isinstance ( zpktup [ 0 ] , ( list , tuple , numpy . ndarray ) ) and isinstance ( zpktup [ 1 ] , ( list , tuple , numpy . ndarray ) ) and isinstance ( zpktup [ 2 ] , float ) )
Determin whether the given tuple is a ZPK - format filter definition