idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
22,700
def correlate ( self , mfilter , window = 'hanning' , detrend = 'linear' , whiten = False , wduration = 2 , highpass = None , ** asd_kw ) : self . is_compatible ( mfilter ) if whiten is True : fftlength = asd_kw . pop ( 'fftlength' , _fft_length_default ( self . dt ) ) overlap = asd_kw . pop ( 'overlap' , None ) if overlap is None : overlap = recommended_overlap ( window ) * fftlength asd = self . asd ( fftlength , overlap , window = window , ** asd_kw ) npad = int ( wduration * mfilter . sample_rate . decompose ( ) . value / 2 ) mfilter = mfilter . pad ( npad ) with numpy . errstate ( all = 'raise' ) : in_ = self . whiten ( window = window , fduration = wduration , asd = asd , highpass = highpass , detrend = detrend ) mfilter = mfilter . whiten ( window = window , fduration = wduration , asd = asd , highpass = highpass , detrend = detrend ) [ npad : - npad ] else : in_ = self . detrend ( detrend ) mfilter = mfilter . detrend ( detrend ) stdev = numpy . sqrt ( ( mfilter . value ** 2 ) . sum ( ) ) snr = in_ . convolve ( mfilter [ : : - 1 ] , window = window ) / stdev snr . __array_finalize__ ( self ) return snr
Cross - correlate this TimeSeries with another signal
22,701
def detrend ( self , detrend = 'constant' ) : data = signal . detrend ( self . value , type = detrend ) . view ( type ( self ) ) data . __metadata_finalize__ ( self ) data . _unit = self . unit return data
Remove the trend from this TimeSeries
22,702
def notch ( self , frequency , type = 'iir' , filtfilt = True , ** kwargs ) : zpk = filter_design . notch ( frequency , self . sample_rate . value , type = type , ** kwargs ) return self . filter ( * zpk , filtfilt = filtfilt )
Notch out a frequency in this TimeSeries .
22,703
def q_gram ( self , qrange = qtransform . DEFAULT_QRANGE , frange = qtransform . DEFAULT_FRANGE , mismatch = qtransform . DEFAULT_MISMATCH , snrthresh = 5.5 , ** kwargs ) : qscan , _ = qtransform . q_scan ( self , mismatch = mismatch , qrange = qrange , frange = frange , ** kwargs ) qgram = qscan . table ( snrthresh = snrthresh ) return qgram
Scan a TimeSeries using the multi - Q transform and return an EventTable of the most significant tiles
22,704
def q_transform ( self , qrange = qtransform . DEFAULT_QRANGE , frange = qtransform . DEFAULT_FRANGE , gps = None , search = .5 , tres = "<default>" , fres = "<default>" , logf = False , norm = 'median' , mismatch = qtransform . DEFAULT_MISMATCH , outseg = None , whiten = True , fduration = 2 , highpass = None , ** asd_kw ) : from . . frequencyseries import FrequencySeries if whiten is True : window = asd_kw . pop ( 'window' , 'hann' ) fftlength = asd_kw . pop ( 'fftlength' , _fft_length_default ( self . dt ) ) overlap = asd_kw . pop ( 'overlap' , None ) if overlap is None and fftlength == self . duration . value : asd_kw [ 'method' ] = DEFAULT_FFT_METHOD overlap = 0 elif overlap is None : overlap = recommended_overlap ( window ) * fftlength whiten = self . asd ( fftlength , overlap , window = window , ** asd_kw ) if isinstance ( whiten , FrequencySeries ) : with numpy . errstate ( all = 'raise' ) : data = self . whiten ( asd = whiten , fduration = fduration , highpass = highpass ) else : data = self if gps is None : search = None elif search is not None : search = Segment ( gps - search / 2 , gps + search / 2 ) & self . span qgram , _ = qtransform . q_scan ( data , frange = frange , qrange = qrange , norm = norm , mismatch = mismatch , search = search ) return qgram . interpolate ( tres = tres , fres = fres , logf = logf , outseg = outseg )
Scan a TimeSeries using the multi - Q transform and return an interpolated high - resolution spectrogram
22,705
def _update_index ( self , axis , key , value ) : if value is None : return delattr ( self , key ) _key = "_{}" . format ( key ) index = "{[0]}index" . format ( axis ) unit = "{[0]}unit" . format ( axis ) if not isinstance ( value , Quantity ) : try : value = Quantity ( value , getattr ( self , unit ) ) except TypeError : value = Quantity ( float ( value ) , getattr ( self , unit ) ) try : curr = getattr ( self , _key ) except AttributeError : delattr ( self , index ) else : if ( value is None or getattr ( self , key ) is None or not value . unit . is_equivalent ( curr . unit ) or value != curr ) : delattr ( self , index ) setattr ( self , _key , value ) return value
Update the current axis index based on a given key or value
22,706
def _set_index ( self , key , index ) : axis = key [ 0 ] origin = "{}0" . format ( axis ) delta = "d{}" . format ( axis ) if index is None : return delattr ( self , key ) if not isinstance ( index , Index ) : try : unit = index . unit except AttributeError : unit = getattr ( self , "_default_{}unit" . format ( axis ) ) index = Index ( index , unit = unit , copy = False ) setattr ( self , origin , index [ 0 ] ) if index . regular : setattr ( self , delta , index [ 1 ] - index [ 0 ] ) else : delattr ( self , delta ) setattr ( self , "_{}" . format ( key ) , index )
Set a new index array for this series
22,707
def x0 ( self ) : try : return self . _x0 except AttributeError : self . _x0 = Quantity ( 0 , self . xunit ) return self . _x0
X - axis coordinate of the first data point
22,708
def dx ( self ) : try : return self . _dx except AttributeError : try : self . _xindex except AttributeError : self . _dx = Quantity ( 1 , self . xunit ) else : if not self . xindex . regular : raise AttributeError ( "This series has an irregular x-axis " "index, so 'dx' is not well defined" ) self . _dx = self . xindex [ 1 ] - self . xindex [ 0 ] return self . _dx
X - axis sample separation
22,709
def xindex ( self ) : try : return self . _xindex except AttributeError : self . _xindex = Index . define ( self . x0 , self . dx , self . shape [ 0 ] ) return self . _xindex
Positions of the data on the x - axis
22,710
def xunit ( self ) : try : return self . _dx . unit except AttributeError : try : return self . _x0 . unit except AttributeError : return self . _default_xunit
Unit of x - axis index
22,711
def plot ( self , method = 'plot' , ** kwargs ) : from . . plot import Plot from . . plot . text import default_unit_label if kwargs . get ( 'xscale' ) == 'log' and self . x0 . value == 0 : kwargs . setdefault ( 'xlim' , ( self . dx . value , self . xspan [ 1 ] ) ) plot = Plot ( self , method = method , ** kwargs ) default_unit_label ( plot . gca ( ) . yaxis , self . unit ) return plot
Plot the data for this series
22,712
def step ( self , ** kwargs ) : kwargs . setdefault ( 'linestyle' , kwargs . pop ( 'where' , 'steps-post' ) ) data = self . append ( self . value [ - 1 : ] , inplace = False ) return data . plot ( ** kwargs )
Create a step plot of this series
22,713
def shift ( self , delta ) : self . x0 = self . x0 + Quantity ( delta , self . xunit )
Shift this Series forward on the X - axis by delta
22,714
def value_at ( self , x ) : x = Quantity ( x , self . xindex . unit ) . value try : idx = ( self . xindex . value == x ) . nonzero ( ) [ 0 ] [ 0 ] except IndexError as e : e . args = ( "Value %r not found in array index" % x , ) raise return self [ idx ]
Return the value of this Series at the given xindex value
22,715
def is_contiguous ( self , other , tol = 1 / 2. ** 18 ) : self . is_compatible ( other ) if isinstance ( other , type ( self ) ) : if abs ( float ( self . xspan [ 1 ] - other . xspan [ 0 ] ) ) < tol : return 1 elif abs ( float ( other . xspan [ 1 ] - self . xspan [ 0 ] ) ) < tol : return - 1 return 0 elif type ( other ) in [ list , tuple , numpy . ndarray ] : return 1
Check whether other is contiguous with self .
22,716
def is_compatible ( self , other ) : if isinstance ( other , type ( self ) ) : try : if not self . dx == other . dx : raise ValueError ( "%s sample sizes do not match: " "%s vs %s." % ( type ( self ) . __name__ , self . dx , other . dx ) ) except AttributeError : raise ValueError ( "Series with irregular xindexes cannot " "be compatible" ) if not self . unit == other . unit and not ( self . unit in [ dimensionless_unscaled , None ] and other . unit in [ dimensionless_unscaled , None ] ) : raise ValueError ( "%s units do not match: %s vs %s." % ( type ( self ) . __name__ , str ( self . unit ) , str ( other . unit ) ) ) else : arr = numpy . asarray ( other ) if arr . ndim != self . ndim : raise ValueError ( "Dimensionality does not match" ) if arr . dtype != self . dtype : warn ( "Array data types do not match: %s vs %s" % ( self . dtype , other . dtype ) ) return True
Check whether this series and other have compatible metadata
22,717
def prepend ( self , other , inplace = True , pad = None , gap = None , resize = True ) : out = other . append ( self , inplace = False , gap = gap , pad = pad , resize = resize ) if inplace : self . resize ( out . shape , refcheck = False ) self [ : ] = out [ : ] self . x0 = out . x0 . copy ( ) del out return self return out
Connect another series onto the start of the current one .
22,718
def update ( self , other , inplace = True ) : return self . append ( other , inplace = inplace , resize = False )
Update this series by appending new data from an other and dropping the same amount of data off the start .
22,719
def crop ( self , start = None , end = None , copy = False ) : x0 , x1 = self . xspan xtype = type ( x0 ) if isinstance ( start , Quantity ) : start = start . to ( self . xunit ) . value if isinstance ( end , Quantity ) : end = end . to ( self . xunit ) . value if start == x0 : start = None elif start is not None and xtype ( start ) < x0 : warn ( '%s.crop given start smaller than current start, ' 'crop will begin when the Series actually starts.' % type ( self ) . __name__ ) start = None if end == x1 : end = None if end is not None and xtype ( end ) > x1 : warn ( '%s.crop given end larger than current end, ' 'crop will end when the Series actually ends.' % type ( self ) . __name__ ) end = None if start is None : idx0 = None else : idx0 = int ( ( xtype ( start ) - x0 ) // self . dx . value ) if end is None : idx1 = None else : idx1 = int ( ( xtype ( end ) - x0 ) // self . dx . value ) if idx1 >= self . size : idx1 = None if copy : return self [ idx0 : idx1 ] . copy ( ) return self [ idx0 : idx1 ]
Crop this series to the given x - axis extent .
22,720
def pad ( self , pad_width , ** kwargs ) : kwargs . setdefault ( 'mode' , 'constant' ) if isinstance ( pad_width , int ) : pad_width = ( pad_width , ) new = numpy . pad ( self , pad_width , ** kwargs ) . view ( type ( self ) ) new . __metadata_finalize__ ( self ) new . _unit = self . unit new . x0 -= self . dx * pad_width [ 0 ] return new
Pad this series to a new size
22,721
def inject ( self , other ) : self . is_compatible ( other ) if ( self . xunit == second ) and ( other . xspan [ 0 ] < self . xspan [ 0 ] ) : other = other . crop ( start = self . xspan [ 0 ] ) if ( self . xunit == second ) and ( other . xspan [ 1 ] > self . xspan [ 1 ] ) : other = other . crop ( end = self . xspan [ 1 ] ) ox0 = other . x0 . to ( self . x0 . unit ) idx = ( ( ox0 - self . x0 ) / self . dx ) . value if not idx . is_integer ( ) : warn ( 'Series have overlapping xspan but their x-axis values are ' 'uniformly offset. Returning a copy of the original Series.' ) return self . copy ( ) slice_ = slice ( int ( idx ) , int ( idx ) + other . size ) out = self . copy ( ) out . value [ slice_ ] += other . value return out
Add two compatible Series along their shared x - axis values .
22,722
def _select_query_method ( cls , url ) : if urlparse ( url ) . netloc . startswith ( 'geosegdb.' ) : return cls . query_segdb return cls . query_dqsegdb
Select the correct query method based on the URL
22,723
def query ( cls , flag , * args , ** kwargs ) : query_ = _select_query_method ( cls , kwargs . get ( 'url' , DEFAULT_SEGMENT_SERVER ) ) return query_ ( flag , * args , ** kwargs )
Query for segments of a given flag
22,724
def query_segdb ( cls , flag , * args , ** kwargs ) : warnings . warn ( "query_segdb is deprecated and will be removed in a " "future release" , DeprecationWarning ) qsegs = _parse_query_segments ( args , cls . query_segdb ) try : flags = DataQualityDict . query_segdb ( [ flag ] , qsegs , ** kwargs ) except TypeError as exc : if 'DataQualityDict' in str ( exc ) : raise TypeError ( str ( exc ) . replace ( 'DataQualityDict' , cls . __name__ ) ) else : raise if len ( flags ) > 1 : raise RuntimeError ( "Multiple flags returned for single query, " "something went wrong:\n %s" % '\n ' . join ( flags . keys ( ) ) ) elif len ( flags ) == 0 : raise RuntimeError ( "No flags returned for single query, " "something went wrong." ) return flags [ flag ]
Query the initial LIGO segment database for the given flag
22,725
def query_dqsegdb ( cls , flag , * args , ** kwargs ) : qsegs = _parse_query_segments ( args , cls . query_dqsegdb ) url = kwargs . pop ( 'url' , DEFAULT_SEGMENT_SERVER ) out = cls ( name = flag ) if out . ifo is None or out . tag is None : raise ValueError ( "Cannot parse ifo or tag (name) for flag %r" % flag ) for start , end in qsegs : if float ( end ) == + inf : end = to_gps ( 'now' ) . seconds try : data = query_segments ( flag , int ( start ) , int ( end ) , host = url ) except HTTPError as exc : if exc . code == 404 : exc . msg += ' [{0}]' . format ( flag ) raise new = cls . read ( BytesIO ( json . dumps ( data ) . encode ( 'utf-8' ) ) , format = 'json' , ) segl = SegmentList ( [ Segment ( start , end ) ] ) new . known &= segl new . active &= segl out += new out . description = new . description out . isgood = new . isgood return out
Query the advanced LIGO DQSegDB for the given flag
22,726
def fetch_open_data ( cls , flag , start , end , ** kwargs ) : start = to_gps ( start ) . gpsSeconds end = to_gps ( end ) . gpsSeconds known = [ ( start , end ) ] active = timeline . get_segments ( flag , start , end , ** kwargs ) return cls ( flag . replace ( '_' , ':' , 1 ) , known = known , active = active , label = flag )
Fetch Open Data timeline segments into a flag .
22,727
def read ( cls , source , * args , ** kwargs ) : if 'flag' in kwargs : warnings . warn ( '\'flag\' keyword was renamed \'name\', this ' 'warning will result in an error in the future' ) kwargs . setdefault ( 'name' , kwargs . pop ( 'flags' ) ) coalesce = kwargs . pop ( 'coalesce' , False ) def combiner ( flags ) : out = flags [ 0 ] for flag in flags [ 1 : ] : out . known += flag . known out . active += flag . active if coalesce : return out . coalesce ( ) return out return io_read_multi ( combiner , cls , source , * args , ** kwargs )
Read segments from file into a DataQualityFlag .
22,728
def from_veto_def ( cls , veto ) : name = '%s:%s' % ( veto . ifo , veto . name ) try : name += ':%d' % int ( veto . version ) except TypeError : pass if veto . end_time == 0 : veto . end_time = + inf known = Segment ( veto . start_time , veto . end_time ) pad = ( veto . start_pad , veto . end_pad ) return cls ( name = name , known = [ known ] , category = veto . category , description = veto . comment , padding = pad )
Define a DataQualityFlag from a VetoDef
22,729
def populate ( self , source = DEFAULT_SEGMENT_SERVER , segments = None , pad = True , ** kwargs ) : tmp = DataQualityDict ( ) tmp [ self . name ] = self tmp . populate ( source = source , segments = segments , pad = pad , ** kwargs ) return tmp [ self . name ]
Query the segment database for this flag s active segments .
22,730
def contract ( self , x ) : self . active = self . active . contract ( x ) return self . active
Contract each of the active Segments by x seconds .
22,731
def protract ( self , x ) : self . active = self . active . protract ( x ) return self . active
Protract each of the active Segments by x seconds .
22,732
def pad ( self , * args , ** kwargs ) : if not args : start , end = self . padding else : start , end = args if kwargs . pop ( 'inplace' , False ) : new = self else : new = self . copy ( ) if kwargs : raise TypeError ( "unexpected keyword argument %r" % list ( kwargs . keys ( ) ) [ 0 ] ) new . known = [ ( s [ 0 ] + start , s [ 1 ] + end ) for s in self . known ] new . active = [ ( s [ 0 ] + start , s [ 1 ] + end ) for s in self . active ] return new
Apply a padding to each segment in this DataQualityFlag
22,733
def round ( self , contract = False ) : def _round ( seg ) : if contract : a = type ( seg [ 0 ] ) ( ceil ( seg [ 0 ] ) ) b = type ( seg [ 1 ] ) ( floor ( seg [ 1 ] ) ) else : a = type ( seg [ 0 ] ) ( floor ( seg [ 0 ] ) ) b = type ( seg [ 1 ] ) ( ceil ( seg [ 1 ] ) ) if a >= b : return type ( seg ) ( 0 , 0 ) return type ( seg ) ( a , b ) new = self . copy ( ) new . active = type ( new . active ) ( map ( _round , new . active ) ) new . known = type ( new . known ) ( map ( _round , new . known ) ) return new . coalesce ( )
Round this flag to integer segments .
22,734
def coalesce ( self ) : self . known = self . known . coalesce ( ) self . active = self . active . coalesce ( ) self . active = ( self . known & self . active ) . coalesce ( ) return self
Coalesce the segments for this flag .
22,735
def _parse_name ( self , name ) : if name is None : self . ifo = None self . tag = None self . version = None elif re_IFO_TAG_VERSION . match ( name ) : match = re_IFO_TAG_VERSION . match ( name ) . groupdict ( ) self . ifo = match [ 'ifo' ] self . tag = match [ 'tag' ] self . version = int ( match [ 'version' ] ) elif re_IFO_TAG . match ( name ) : match = re_IFO_TAG . match ( name ) . groupdict ( ) self . ifo = match [ 'ifo' ] self . tag = match [ 'tag' ] self . version = None elif re_TAG_VERSION . match ( name ) : match = re_TAG_VERSION . match ( name ) . groupdict ( ) self . ifo = None self . tag = match [ 'tag' ] self . version = int ( match [ 'version' ] ) else : raise ValueError ( "No flag name structure detected in '%s', flags " "should be named as '{ifo}:{tag}:{version}'. " "For arbitrary strings, use the " "`DataQualityFlag.label` attribute" % name ) return self . ifo , self . tag , self . version
Internal method to parse a string name into constituent ifo name and version components .
22,736
def query_segdb ( cls , flags , * args , ** kwargs ) : warnings . warn ( "query_segdb is deprecated and will be removed in a " "future release" , DeprecationWarning ) qsegs = _parse_query_segments ( args , cls . query_segdb ) url = kwargs . pop ( 'url' , DEFAULT_SEGMENT_SERVER ) if kwargs . pop ( 'on_error' , None ) is not None : warnings . warn ( "DataQualityDict.query_segdb doesn't accept the " "on_error keyword argument" ) if kwargs . keys ( ) : raise TypeError ( "DataQualityDict.query_segdb has no keyword " "argument '%s'" % list ( kwargs . keys ( ) [ 0 ] ) ) from glue . segmentdb import ( segmentdb_utils as segdb_utils , query_engine as segdb_engine ) connection = segdb_utils . setup_database ( url ) engine = segdb_engine . LdbdQueryEngine ( connection ) segdefs = [ ] for flag in flags : dqflag = DataQualityFlag ( name = flag ) ifo = dqflag . ifo name = dqflag . tag if dqflag . version is None : vers = '*' else : vers = dqflag . version for gpsstart , gpsend in qsegs : if float ( gpsend ) == + inf : gpsend = to_gps ( 'now' ) . seconds gpsstart = float ( gpsstart ) if not gpsstart . is_integer ( ) : raise ValueError ( "Segment database queries can only" "operate on integer GPS times" ) gpsend = float ( gpsend ) if not gpsend . is_integer ( ) : raise ValueError ( "Segment database queries can only" "operate on integer GPS times" ) segdefs += segdb_utils . expand_version_number ( engine , ( ifo , name , vers , gpsstart , gpsend , 0 , 0 ) ) segs = segdb_utils . query_segments ( engine , 'segment' , segdefs ) segsum = segdb_utils . query_segments ( engine , 'segment_summary' , segdefs ) out = cls ( ) for definition , segments , summary in zip ( segdefs , segs , segsum ) : flag = ':' . join ( map ( str , definition [ : 3 ] ) ) name = flag . rsplit ( ':' , 1 ) [ 0 ] if flag . endswith ( '*' ) : flag = name key = name elif flag not in flags and name in flags : key = name else : key = flag if key not in out : out [ key ] = DataQualityFlag ( name = flag ) out [ key ] . known . extend ( summary ) out [ key ] . active . extend ( segments ) return out
Query the inital LIGO segment database for a list of flags .
22,737
def query_dqsegdb ( cls , flags , * args , ** kwargs ) : on_error = kwargs . pop ( 'on_error' , 'raise' ) . lower ( ) if on_error not in [ 'raise' , 'warn' , 'ignore' ] : raise ValueError ( "on_error must be one of 'raise', 'warn', " "or 'ignore'" ) qsegs = _parse_query_segments ( args , cls . query_dqsegdb ) inq = Queue ( ) outq = Queue ( ) for i in range ( len ( flags ) ) : t = _QueryDQSegDBThread ( inq , outq , qsegs , ** kwargs ) t . setDaemon ( True ) t . start ( ) for i , flag in enumerate ( flags ) : inq . put ( ( i , flag ) ) inq . join ( ) outq . join ( ) new = cls ( ) results = list ( zip ( * sorted ( [ outq . get ( ) for i in range ( len ( flags ) ) ] , key = lambda x : x [ 0 ] ) ) ) [ 1 ] for result , flag in zip ( results , flags ) : if isinstance ( result , Exception ) : result . args = ( '%s [%s]' % ( str ( result ) , str ( flag ) ) , ) if on_error == 'ignore' : pass elif on_error == 'warn' : warnings . warn ( str ( result ) ) else : raise result else : new [ flag ] = result return new
Query the advanced LIGO DQSegDB for a list of flags .
22,738
def read ( cls , source , names = None , format = None , ** kwargs ) : on_missing = kwargs . pop ( 'on_missing' , 'error' ) coalesce = kwargs . pop ( 'coalesce' , False ) if 'flags' in kwargs : warnings . warn ( '\'flags\' keyword was renamed \'names\', this ' 'warning will result in an error in the future' ) names = kwargs . pop ( 'flags' ) def combiner ( inputs ) : out = cls ( ) required = set ( names or [ ] ) found = set ( name for dqdict in inputs for name in dqdict ) for name in required - found : msg = '{!r} not found in any input file' . format ( name ) if on_missing == 'ignore' : continue if on_missing == 'warn' : warnings . warn ( msg ) else : raise ValueError ( msg ) for dqdict in inputs : for flag in dqdict : try : out [ flag ] . known . extend ( dqdict [ flag ] . known ) out [ flag ] . active . extend ( dqdict [ flag ] . active ) except KeyError : out [ flag ] = dqdict [ flag ] if coalesce : return out . coalesce ( ) return out return io_read_multi ( combiner , cls , source , names = names , format = format , on_missing = 'ignore' , ** kwargs )
Read segments from file into a DataQualityDict
22,739
def from_veto_definer_file ( cls , fp , start = None , end = None , ifo = None , format = 'ligolw' ) : if format != 'ligolw' : raise NotImplementedError ( "Reading veto definer from non-ligolw " "format file is not currently " "supported" ) with get_readable_fileobj ( fp , show_progress = False ) as fobj : from . . io . ligolw import read_table as read_ligolw_table veto_def_table = read_ligolw_table ( fobj , 'veto_definer' ) if start is not None : start = to_gps ( start ) if end is not None : end = to_gps ( end ) out = cls ( ) for row in veto_def_table : if ifo and row . ifo != ifo : continue if start and 0 < row . end_time <= start : continue elif start : row . start_time = max ( row . start_time , start ) if end and row . start_time >= end : continue elif end and not row . end_time : row . end_time = end elif end : row . end_time = min ( row . end_time , end ) flag = DataQualityFlag . from_veto_def ( row ) if flag . name in out : out [ flag . name ] . known . extend ( flag . known ) out [ flag . name ] . known . coalesce ( ) else : out [ flag . name ] = flag return out
Read a DataQualityDict from a LIGO_LW XML VetoDefinerTable .
22,740
def from_ligolw_tables ( cls , segmentdeftable , segmentsumtable , segmenttable , names = None , gpstype = LIGOTimeGPS , on_missing = 'error' ) : out = cls ( ) id_ = dict ( ) for row in segmentdeftable : ifos = sorted ( row . instruments ) ifo = '' . join ( ifos ) if ifos else None tag = row . name version = row . version name = ':' . join ( [ str ( k ) for k in ( ifo , tag , version ) if k is not None ] ) if names is None or name in names : out [ name ] = DataQualityFlag ( name ) thisid = int ( row . segment_def_id ) try : id_ [ name ] . append ( thisid ) except ( AttributeError , KeyError ) : id_ [ name ] = [ thisid ] for flag in names or [ ] : if flag not in out and on_missing != 'ignore' : msg = ( "no segment definition found for flag={0!r} in " "file" . format ( flag ) ) if on_missing == 'warn' : warnings . warn ( msg ) else : raise ValueError ( msg ) def _parse_segments ( table , listattr ) : for row in table : for flag in out : if int ( row . segment_def_id ) in id_ [ flag ] : getattr ( out [ flag ] , listattr ) . append ( Segment ( * map ( gpstype , row . segment ) ) , ) break _parse_segments ( segmentsumtable , "known" ) _parse_segments ( segmenttable , "active" ) return out
Build a DataQualityDict from a set of LIGO_LW segment tables
22,741
def to_ligolw_tables ( self , ilwdchar_compat = None , ** attrs ) : if ilwdchar_compat is None : warnings . warn ( "ilwdchar_compat currently defaults to `True`, " "but this will change to `False` in the future, to " "maintain compatibility in future releases, " "manually specify `ilwdchar_compat=True`" , PendingDeprecationWarning ) ilwdchar_compat = True if ilwdchar_compat : from glue . ligolw import lsctables else : from ligo . lw import lsctables from . . io . ligolw import to_table_type as to_ligolw_table_type SegmentDefTable = lsctables . SegmentDefTable SegmentSumTable = lsctables . SegmentSumTable SegmentTable = lsctables . SegmentTable segdeftab = lsctables . New ( SegmentDefTable ) segsumtab = lsctables . New ( SegmentSumTable ) segtab = lsctables . New ( SegmentTable ) def _write_attrs ( table , row ) : for key , val in attrs . items ( ) : setattr ( row , key , to_ligolw_table_type ( val , table , key ) ) for flag in self . values ( ) : segdef = segdeftab . RowType ( ) for col in segdeftab . columnnames : setattr ( segdef , col , None ) segdef . instruments = { flag . ifo } segdef . name = flag . tag segdef . version = flag . version segdef . comment = flag . description segdef . insertion_time = to_gps ( datetime . datetime . now ( ) ) . gpsSeconds segdef . segment_def_id = SegmentDefTable . get_next_id ( ) _write_attrs ( segdeftab , segdef ) segdeftab . append ( segdef ) for vseg in flag . known : segsum = segsumtab . RowType ( ) for col in segsumtab . columnnames : setattr ( segsum , col , None ) segsum . segment_def_id = segdef . segment_def_id segsum . segment = map ( LIGOTimeGPS , vseg ) segsum . comment = None segsum . segment_sum_id = SegmentSumTable . get_next_id ( ) _write_attrs ( segsumtab , segsum ) segsumtab . append ( segsum ) for aseg in flag . active : seg = segtab . RowType ( ) for col in segtab . columnnames : setattr ( seg , col , None ) seg . segment_def_id = segdef . segment_def_id seg . segment = map ( LIGOTimeGPS , aseg ) seg . segment_id = SegmentTable . get_next_id ( ) _write_attrs ( segtab , seg ) segtab . append ( seg ) return segdeftab , segsumtab , segtab
Convert this DataQualityDict into a trio of LIGO_LW segment tables
22,742
def populate ( self , source = DEFAULT_SEGMENT_SERVER , segments = None , pad = True , on_error = 'raise' , ** kwargs ) : if on_error not in [ 'raise' , 'warn' , 'ignore' ] : raise ValueError ( "on_error must be one of 'raise', 'warn', " "or 'ignore'" ) source = urlparse ( source ) if source . netloc and segments is not None : segments = SegmentList ( map ( Segment , segments ) ) tmp = type ( self ) . query ( self . keys ( ) , segments , url = source . geturl ( ) , on_error = on_error , ** kwargs ) elif not source . netloc : tmp = type ( self ) . read ( source . geturl ( ) , ** kwargs ) for key in self : if segments is None and source . netloc : try : tmp = { key : self [ key ] . query ( self [ key ] . name , self [ key ] . known , ** kwargs ) } except URLError as exc : if on_error == 'ignore' : pass elif on_error == 'warn' : warnings . warn ( 'Error querying for %s: %s' % ( key , exc ) ) else : raise continue self [ key ] . known &= tmp [ key ] . known self [ key ] . active = tmp [ key ] . active if pad : self [ key ] = self [ key ] . pad ( inplace = True ) if segments is not None : self [ key ] . known &= segments self [ key ] . active &= segments return self
Query the segment database for each flag s active segments .
22,743
def copy ( self , deep = False ) : if deep : return deepcopy ( self ) return super ( DataQualityDict , self ) . copy ( )
Build a copy of this dictionary .
22,744
def union ( self ) : usegs = reduce ( operator . or_ , self . values ( ) ) usegs . name = ' | ' . join ( self . keys ( ) ) return usegs
Return the union of all flags in this dict
22,745
def intersection ( self ) : isegs = reduce ( operator . and_ , self . values ( ) ) isegs . name = ' & ' . join ( self . keys ( ) ) return isegs
Return the intersection of all flags in this dict
22,746
def rc_params ( usetex = None ) : if usetex is None : usetex = bool_env ( 'GWPY_USETEX' , default = rcParams [ 'text.usetex' ] or tex . has_tex ( ) ) rcp = GWPY_RCPARAMS . copy ( ) if usetex : rcp . update ( GWPY_TEX_RCPARAMS ) return rcp
Returns a new matplotlib . RcParams with updated GWpy parameters
22,747
def get_subplot_params ( figsize ) : width , height , = figsize try : left , right = SUBPLOT_WIDTH [ width ] except KeyError : left = right = None try : bottom , top = SUBPLOT_HEIGHT [ height ] except KeyError : bottom = top = None return SubplotParams ( left = left , bottom = bottom , right = right , top = top )
Return sensible default SubplotParams for a figure of the given size
22,748
def to_string ( input_ ) : usetex = rcParams [ 'text.usetex' ] if isinstance ( input_ , units . UnitBase ) : return input_ . to_string ( 'latex_inline' ) if isinstance ( input_ , ( float , int ) ) and usetex : return tex . float_to_latex ( input_ ) if usetex : return tex . label_to_latex ( input_ ) return str ( input_ )
Format an input for representation as text
22,749
def default_unit_label ( axis , unit ) : if not axis . isDefault_label : return label = axis . set_label_text ( unit . to_string ( 'latex_inline_dimensional' ) ) axis . isDefault_label = True return label . get_text ( )
Set default label for an axis from a ~astropy . units . Unit
22,750
def initialize ( self ) : try : conn = ldap . initialize ( '{0}://{1}:{2}' . format ( current_app . config [ 'LDAP_SCHEMA' ] , current_app . config [ 'LDAP_HOST' ] , current_app . config [ 'LDAP_PORT' ] ) ) conn . set_option ( ldap . OPT_NETWORK_TIMEOUT , current_app . config [ 'LDAP_TIMEOUT' ] ) conn = self . _set_custom_options ( conn ) conn . protocol_version = ldap . VERSION3 if current_app . config [ 'LDAP_USE_TLS' ] : conn . start_tls_s ( ) return conn except ldap . LDAPError as e : raise LDAPException ( self . error ( e . args ) )
Initialize a connection to the LDAP server .
22,751
def bind ( self ) : conn = self . initialize try : conn . simple_bind_s ( current_app . config [ 'LDAP_USERNAME' ] , current_app . config [ 'LDAP_PASSWORD' ] ) return conn except ldap . LDAPError as e : raise LDAPException ( self . error ( e . args ) )
Attempts to bind to the LDAP server using the credentials of the service account .
22,752
def bind_user ( self , username , password ) : user_dn = self . get_object_details ( user = username , dn_only = True ) if user_dn is None : return try : conn = self . initialize conn . simple_bind_s ( user_dn . decode ( 'utf-8' ) , password ) return True except ldap . LDAPError : return
Attempts to bind a user to the LDAP server using the credentials supplied .
22,753
def get_user_groups ( self , user ) : conn = self . bind try : if current_app . config [ 'LDAP_OPENLDAP' ] : fields = [ str ( current_app . config [ 'LDAP_GROUP_MEMBER_FILTER_FIELD' ] ) ] records = conn . search_s ( current_app . config [ 'LDAP_BASE_DN' ] , ldap . SCOPE_SUBTREE , ldap_filter . filter_format ( current_app . config [ 'LDAP_GROUP_MEMBER_FILTER' ] , ( self . get_object_details ( user , dn_only = True ) , ) ) , fields ) else : records = conn . search_s ( current_app . config [ 'LDAP_BASE_DN' ] , ldap . SCOPE_SUBTREE , ldap_filter . filter_format ( current_app . config [ 'LDAP_USER_OBJECT_FILTER' ] , ( user , ) ) , [ current_app . config [ 'LDAP_USER_GROUPS_FIELD' ] ] ) conn . unbind_s ( ) if records : if current_app . config [ 'LDAP_OPENLDAP' ] : group_member_filter = current_app . config [ 'LDAP_GROUP_MEMBER_FILTER_FIELD' ] if sys . version_info [ 0 ] > 2 : groups = [ record [ 1 ] [ group_member_filter ] [ 0 ] . decode ( 'utf-8' ) for record in records ] else : groups = [ record [ 1 ] [ group_member_filter ] [ 0 ] for record in records ] return groups else : if current_app . config [ 'LDAP_USER_GROUPS_FIELD' ] in records [ 0 ] [ 1 ] : groups = records [ 0 ] [ 1 ] [ current_app . config [ 'LDAP_USER_GROUPS_FIELD' ] ] result = [ re . findall ( b'(?:cn=|CN=)(.*?),' , group ) [ 0 ] for group in groups ] if sys . version_info [ 0 ] > 2 : result = [ r . decode ( 'utf-8' ) for r in result ] return result except ldap . LDAPError as e : raise LDAPException ( self . error ( e . args ) )
Returns a list with the user s groups or None if unsuccessful .
22,754
def get_group_members ( self , group ) : conn = self . bind try : records = conn . search_s ( current_app . config [ 'LDAP_BASE_DN' ] , ldap . SCOPE_SUBTREE , ldap_filter . filter_format ( current_app . config [ 'LDAP_GROUP_OBJECT_FILTER' ] , ( group , ) ) , [ current_app . config [ 'LDAP_GROUP_MEMBERS_FIELD' ] ] ) conn . unbind_s ( ) if records : if current_app . config [ 'LDAP_GROUP_MEMBERS_FIELD' ] in records [ 0 ] [ 1 ] : members = records [ 0 ] [ 1 ] [ current_app . config [ 'LDAP_GROUP_MEMBERS_FIELD' ] ] if sys . version_info [ 0 ] > 2 : members = [ m . decode ( 'utf-8' ) for m in members ] return members except ldap . LDAPError as e : raise LDAPException ( self . error ( e . args ) )
Returns a list with the group s members or None if unsuccessful .
22,755
def login_required ( func ) : @ wraps ( func ) def wrapped ( * args , ** kwargs ) : if g . user is None : return redirect ( url_for ( current_app . config [ 'LDAP_LOGIN_VIEW' ] , next = request . path ) ) return func ( * args , ** kwargs ) return wrapped
When applied to a view function any unauthenticated requests will be redirected to the view named in LDAP_LOGIN_VIEW . Authenticated requests do NOT require membership from a specific group .
22,756
def group_required ( groups = None ) : def wrapper ( func ) : @ wraps ( func ) def wrapped ( * args , ** kwargs ) : if g . user is None : return redirect ( url_for ( current_app . config [ 'LDAP_LOGIN_VIEW' ] , next = request . path ) ) match = [ group for group in groups if group in g . ldap_groups ] if not match : abort ( 401 ) return func ( * args , ** kwargs ) return wrapped return wrapper
When applied to a view function any unauthenticated requests will be redirected to the view named in LDAP_LOGIN_VIEW . Authenticated requests are only permitted if they belong to one of the listed groups .
22,757
def buy_streak_freeze ( self ) : lang = self . get_abbreviation_of ( self . get_user_info ( ) [ 'learning_language_string' ] ) if lang is None : raise Exception ( 'No learning language found' ) try : self . buy_item ( 'streak_freeze' , lang ) return True except AlreadyHaveStoreItemException : return False
figure out the users current learning language use this one as parameter for the shop
22,758
def _compute_dependency_order ( skills ) : dependency_to_skill = MultiDict ( [ ( skill [ 'dependencies_name' ] [ 0 ] if skill [ 'dependencies_name' ] else '' , skill ) for skill in skills ] ) index = 0 previous_skill = '' while True : for skill in dependency_to_skill . getlist ( previous_skill ) : skill [ 'dependency_order' ] = index index += 1 skill_names = set ( [ skill [ 'name' ] for skill in dependency_to_skill . getlist ( previous_skill ) ] ) canonical_dependency = skill_names . intersection ( set ( dependency_to_skill . keys ( ) ) ) if canonical_dependency : previous_skill = canonical_dependency . pop ( ) else : break return skills
Add a field to each skill indicating the order it was learned based on the skill s dependencies . Multiple skills will have the same position if they have the same dependencies .
22,759
def get_languages ( self , abbreviations = False ) : data = [ ] for lang in self . user_data . languages : if lang [ 'learning' ] : if abbreviations : data . append ( lang [ 'language' ] ) else : data . append ( lang [ 'language_string' ] ) return data
Get praticed languages .
22,760
def get_language_from_abbr ( self , abbr ) : for language in self . user_data . languages : if language [ 'language' ] == abbr : return language [ 'language_string' ] return None
Get language full name from abbreviation .
22,761
def get_abbreviation_of ( self , name ) : for language in self . user_data . languages : if language [ 'language_string' ] == name : return language [ 'language' ] return None
Get abbreviation of a language .
22,762
def get_language_details ( self , language ) : for lang in self . user_data . languages : if language == lang [ 'language_string' ] : return lang return { }
Get user s status about a language .
22,763
def get_certificates ( self ) : for certificate in self . user_data . certificates : certificate [ 'datetime' ] = certificate [ 'datetime' ] . strip ( ) return self . user_data . certificates
Get user s certificates .
22,764
def get_calendar ( self , language_abbr = None ) : if language_abbr : if not self . _is_current_language ( language_abbr ) : self . _switch_language ( language_abbr ) return self . user_data . language_data [ language_abbr ] [ 'calendar' ] else : return self . user_data . calendar
Get user s last actions .
22,765
def get_language_progress ( self , lang ) : if not self . _is_current_language ( lang ) : self . _switch_language ( lang ) fields = [ 'streak' , 'language_string' , 'level_progress' , 'num_skills_learned' , 'level_percent' , 'level_points' , 'points_rank' , 'next_level' , 'level_left' , 'language' , 'points' , 'fluency_score' , 'level' ] return self . _make_dict ( fields , self . user_data . language_data [ lang ] )
Get informations about user s progression in a language .
22,766
def get_friends ( self ) : for k , v in iter ( self . user_data . language_data . items ( ) ) : data = [ ] for friend in v [ 'points_ranking_data' ] : temp = { 'username' : friend [ 'username' ] , 'id' : friend [ 'id' ] , 'points' : friend [ 'points_data' ] [ 'total' ] , 'languages' : [ i [ 'language_string' ] for i in friend [ 'points_data' ] [ 'languages' ] ] } data . append ( temp ) return data
Get user s friends .
22,767
def get_known_words ( self , lang ) : words = [ ] for topic in self . user_data . language_data [ lang ] [ 'skills' ] : if topic [ 'learned' ] : words += topic [ 'words' ] return set ( words )
Get a list of all words learned by user in a language .
22,768
def get_learned_skills ( self , lang ) : skills = [ skill for skill in self . user_data . language_data [ lang ] [ 'skills' ] ] self . _compute_dependency_order ( skills ) return [ skill for skill in sorted ( skills , key = lambda skill : skill [ 'dependency_order' ] ) if skill [ 'learned' ] ]
Return the learned skill objects sorted by the order they were learned in .
22,769
def get_known_topics ( self , lang ) : return [ topic [ 'title' ] for topic in self . user_data . language_data [ lang ] [ 'skills' ] if topic [ 'learned' ] ]
Return the topics learned by a user in a language .
22,770
def get_unknown_topics ( self , lang ) : return [ topic [ 'title' ] for topic in self . user_data . language_data [ lang ] [ 'skills' ] if not topic [ 'learned' ] ]
Return the topics remaining to learn by a user in a language .
22,771
def get_reviewable_topics ( self , lang ) : return [ topic [ 'title' ] for topic in self . user_data . language_data [ lang ] [ 'skills' ] if topic [ 'learned' ] and topic [ 'strength' ] < 1.0 ]
Return the topics learned but not golden by a user in a language .
22,772
def get_vocabulary ( self , language_abbr = None ) : if not self . password : raise Exception ( "You must provide a password for this function" ) if language_abbr and not self . _is_current_language ( language_abbr ) : self . _switch_language ( language_abbr ) overview_url = "https://www.duolingo.com/vocabulary/overview" overview_request = self . _make_req ( overview_url ) overview = overview_request . json ( ) return overview
Get overview of user s vocabulary in a language .
22,773
def fire_update_event ( self , * args , ** kwargs ) : for _handler in self . _on_update : _handler ( * args , ** kwargs )
Trigger the method tied to _on_update
22,774
def download_configuration ( self ) -> str : return self . _restCall ( "home/getCurrentState" , json . dumps ( self . _connection . clientCharacteristics ) )
downloads the current configuration from the cloud Returns the downloaded configuration or an errorCode
22,775
def get_security_zones_activation ( self ) -> ( bool , bool ) : internal_active = False external_active = False for g in self . groups : if isinstance ( g , SecurityZoneGroup ) : if g . label == "EXTERNAL" : external_active = g . active elif g . label == "INTERNAL" : internal_active = g . active return internal_active , external_active
returns the value of the security zones if they are armed or not Returns internal True if the internal zone is armed external True if the external zone is armed
22,776
async def enable_events ( self ) -> asyncio . Task : return await self . _connection . ws_connect ( on_message = self . _ws_on_message , on_error = self . _ws_on_error )
Connects to the websocket . Returns a listening task .
22,777
def load_functionalChannels ( self , groups : Iterable [ Group ] ) : self . functionalChannels = [ ] for channel in self . _rawJSONData [ "functionalChannels" ] . values ( ) : fc = self . _parse_functionalChannel ( channel , groups ) self . functionalChannels . append ( fc ) self . functionalChannelCount = Counter ( x . functionalChannelType for x in self . functionalChannels )
this function will load the functionalChannels into the device
22,778
def set_shutter_level ( self , level = 0.0 ) : data = { "channelIndex" : 1 , "deviceId" : self . id , "shutterLevel" : level } return self . _restCall ( "device/control/setShutterLevel" , body = json . dumps ( data ) )
sets the shutter level
22,779
def set_slats_level ( self , slatsLevel = 0.0 , shutterLevel = None ) : if shutterLevel is None : shutterLevel = self . shutterLevel data = { "channelIndex" : 1 , "deviceId" : self . id , "slatsLevel" : slatsLevel , "shutterLevel" : shutterLevel , } return self . _restCall ( "device/control/setSlatsLevel" , json . dumps ( data ) )
sets the slats and shutter level
22,780
def set_label ( self , label ) : data = { "ruleId" : self . id , "label" : label } return self . _restCall ( "rule/setRuleLabel" , json . dumps ( data ) )
sets the label of the rule
22,781
async def api_call ( self , path , body = None , full_url = False ) : result = None if not full_url : path = self . full_url ( path ) for i in range ( self . _restCallRequestCounter ) : try : with async_timeout . timeout ( self . _restCallTimout , loop = self . _loop ) : result = await self . _websession . post ( path , data = body , headers = self . headers ) if result . status == 200 : if result . content_type == "application/json" : ret = await result . json ( ) else : ret = True return ret else : raise HmipWrongHttpStatusError except ( asyncio . TimeoutError , aiohttp . ClientConnectionError ) : logger . debug ( "Connection timed out or another error occurred %s" % path ) except JSONDecodeError as err : logger . exception ( err ) finally : if result is not None : await result . release ( ) raise HmipConnectionError ( "Failed to connect to HomeMaticIp server" )
Make the actual call to the HMIP server . Throws HmipWrongHttpStatusError or HmipConnectionError if connection has failed or response is not correct .
22,782
def perform_master_login ( email , password , android_id , service = 'ac2dm' , device_country = 'us' , operatorCountry = 'us' , lang = 'en' , sdk_version = 17 ) : data = { 'accountType' : 'HOSTED_OR_GOOGLE' , 'Email' : email , 'has_permission' : 1 , 'add_account' : 1 , 'EncryptedPasswd' : google . signature ( email , password , android_key_7_3_29 ) , 'service' : service , 'source' : 'android' , 'androidId' : android_id , 'device_country' : device_country , 'operatorCountry' : device_country , 'lang' : lang , 'sdk_version' : sdk_version , } return _perform_auth_request ( data )
Perform a master login which is what Android does when you first add a Google account .
22,783
def perform_oauth ( email , master_token , android_id , service , app , client_sig , device_country = 'us' , operatorCountry = 'us' , lang = 'en' , sdk_version = 17 ) : data = { 'accountType' : 'HOSTED_OR_GOOGLE' , 'Email' : email , 'has_permission' : 1 , 'EncryptedPasswd' : master_token , 'service' : service , 'source' : 'android' , 'androidId' : android_id , 'app' : app , 'client_sig' : client_sig , 'device_country' : device_country , 'operatorCountry' : device_country , 'lang' : lang , 'sdk_version' : sdk_version } return _perform_auth_request ( data )
Use a master token from master_login to perform OAuth to a specific Google service .
22,784
def is_prime ( n , rnd = default_pseudo_random , k = DEFAULT_ITERATION , algorithm = None ) : if algorithm is None : algorithm = PRIME_ALGO if algorithm == 'gmpy-miller-rabin' : if not gmpy : raise NotImplementedError return gmpy . is_prime ( n , k ) elif algorithm == 'miller-rabin' : return miller_rabin ( n , k , rnd = rnd ) elif algorithm == 'solovay-strassen' : return randomized_primality_testing ( n , rnd = rnd , k = k * 2 ) else : raise NotImplementedError
Test if n is a prime number
22,785
def jacobi_witness ( x , n ) : j = jacobi ( x , n ) % n f = pow ( x , n >> 1 , n ) return j != f
Returns False if n is an Euler pseudo - prime with base x and True otherwise .
22,786
def miller_rabin ( n , k , rnd = default_pseudo_random ) : s = 0 d = n - 1 s = primitives . integer_bit_size ( n ) s = fractions . gcd ( 2 ** s , n - 1 ) d = ( n - 1 ) // s s = primitives . integer_bit_size ( s ) - 1 while k : k = k - 1 a = rnd . randint ( 2 , n - 2 ) x = pow ( a , d , n ) if x == 1 or x == n - 1 : continue for r in range ( 1 , s - 1 ) : x = pow ( x , 2 , n ) if x == 1 : return False if x == n - 1 : break else : return False return True
Pure python implementation of the Miller - Rabin algorithm .
22,787
def find_closest_match ( target_track , tracks ) : track = None tracks_with_match_ratio = [ ( track , get_similarity ( target_track . artist , track . artist ) , get_similarity ( target_track . name , track . name ) , ) for track in tracks ] sorted_tracks = sorted ( tracks_with_match_ratio , key = lambda t : ( t [ 1 ] , t [ 2 ] ) , reverse = True ) if sorted_tracks : track = sorted_tracks [ 0 ] [ 0 ] return track
Return closest match to target track
22,788
def integer_ceil ( a , b ) : quanta , mod = divmod ( a , b ) if mod : quanta += 1 return quanta
Return the ceil integer of a div b .
22,789
def integer_byte_size ( n ) : quanta , mod = divmod ( integer_bit_size ( n ) , 8 ) if mod or n == 0 : quanta += 1 return quanta
Returns the number of bytes necessary to store the integer n .
22,790
def i2osp ( x , x_len ) : if x > 256 ** x_len : raise exceptions . IntegerTooLarge h = hex ( x ) [ 2 : ] if h [ - 1 ] == 'L' : h = h [ : - 1 ] if len ( h ) & 1 == 1 : h = '0%s' % h x = binascii . unhexlify ( h ) return b'\x00' * int ( x_len - len ( x ) ) + x
Converts the integer x to its big - endian representation of length x_len .
22,791
def string_xor ( a , b ) : if sys . version_info [ 0 ] < 3 : return '' . join ( ( chr ( ord ( x ) ^ ord ( y ) ) for ( x , y ) in zip ( a , b ) ) ) else : return bytes ( x ^ y for ( x , y ) in zip ( a , b ) )
Computes the XOR operator between two byte strings . If the strings are of different lengths the result string is as long as the shorter .
22,792
def get_nonzero_random_bytes ( length , rnd = default_crypto_random ) : result = [ ] i = 0 while i < length : rnd = rnd . getrandbits ( 12 * length ) s = i2osp ( rnd , 3 * length ) s = s . replace ( '\x00' , '' ) result . append ( s ) i += len ( s ) return ( '' . join ( result ) ) [ : length ]
Accumulate random bit string and remove \ 0 bytes until the needed length is obtained .
22,793
def constant_time_cmp ( a , b ) : result = True for x , y in zip ( a , b ) : result &= ( x == y ) return result
Compare two strings using constant time .
22,794
def encrypt ( public_key , message , label = b'' , hash_class = hashlib . sha1 , mgf = mgf . mgf1 , seed = None , rnd = default_crypto_random ) : hash = hash_class ( ) h_len = hash . digest_size k = public_key . byte_size max_message_length = k - 2 * h_len - 2 if len ( message ) > max_message_length : raise exceptions . MessageTooLong hash . update ( label ) label_hash = hash . digest ( ) ps = b'\0' * int ( max_message_length - len ( message ) ) db = b'' . join ( ( label_hash , ps , b'\x01' , message ) ) if not seed : seed = primitives . i2osp ( rnd . getrandbits ( h_len * 8 ) , h_len ) db_mask = mgf ( seed , k - h_len - 1 , hash_class = hash_class ) masked_db = primitives . string_xor ( db , db_mask ) seed_mask = mgf ( masked_db , h_len , hash_class = hash_class ) masked_seed = primitives . string_xor ( seed , seed_mask ) em = b'' . join ( ( b'\x00' , masked_seed , masked_db ) ) m = primitives . os2ip ( em ) c = public_key . rsaep ( m ) output = primitives . i2osp ( c , k ) return output
Encrypt a byte message using a RSA public key and the OAEP wrapping algorithm
22,795
def decrypt ( private_key , message , label = b'' , hash_class = hashlib . sha1 , mgf = mgf . mgf1 ) : hash = hash_class ( ) h_len = hash . digest_size k = private_key . byte_size if len ( message ) != k or k < 2 * h_len + 2 : raise ValueError ( 'decryption error' ) c = primitives . os2ip ( message ) m = private_key . rsadp ( c ) em = primitives . i2osp ( m , k ) hash . update ( label ) label_hash = hash . digest ( ) y , masked_seed , masked_db = em [ 0 ] , em [ 1 : h_len + 1 ] , em [ 1 + h_len : ] if y != b'\x00' and y != 0 : raise ValueError ( 'decryption error' ) seed_mask = mgf ( masked_db , h_len ) seed = primitives . string_xor ( masked_seed , seed_mask ) db_mask = mgf ( seed , k - h_len - 1 ) db = primitives . string_xor ( masked_db , db_mask ) label_hash_prime , rest = db [ : h_len ] , db [ h_len : ] i = rest . find ( b'\x01' ) if i == - 1 : raise exceptions . DecryptionError if rest [ : i ] . strip ( b'\x00' ) != b'' : print ( rest [ : i ] . strip ( b'\x00' ) ) raise exceptions . DecryptionError m = rest [ i + 1 : ] if label_hash_prime != label_hash : raise exceptions . DecryptionError return m
Decrypt a byte message using a RSA private key and the OAEP wrapping algorithm
22,796
def generate_key_pair ( size = 512 , number = 2 , rnd = default_crypto_random , k = DEFAULT_ITERATION , primality_algorithm = None , strict_size = True , e = 0x10001 ) : primes = [ ] lbda = 1 bits = size // number + 1 n = 1 while len ( primes ) < number : if number - len ( primes ) == 1 : bits = size - primitives . integer_bit_size ( n ) + 1 prime = get_prime ( bits , rnd , k , algorithm = primality_algorithm ) if prime in primes : continue if e is not None and fractions . gcd ( e , lbda ) != 1 : continue if ( strict_size and number - len ( primes ) == 1 and primitives . integer_bit_size ( n * prime ) != size ) : continue primes . append ( prime ) n *= prime lbda *= prime - 1 if e is None : e = 0x10001 while e < lbda : if fractions . gcd ( e , lbda ) == 1 : break e += 2 assert 3 <= e <= n - 1 public = RsaPublicKey ( n , e ) private = MultiPrimeRsaPrivateKey ( primes , e , blind = True , rnd = rnd ) return public , private
Generates an RSA key pair .
22,797
def do_zsh_complete ( cli , prog_name ) : commandline = os . environ [ 'COMMANDLINE' ] args = split_args ( commandline ) [ 1 : ] if args and not commandline . endswith ( ' ' ) : incomplete = args [ - 1 ] args = args [ : - 1 ] else : incomplete = '' def escape ( s ) : return s . replace ( '"' , '""' ) . replace ( "'" , "''" ) . replace ( '$' , '\\$' ) . replace ( '`' , '\\`' ) res = [ ] for item , help in get_choices ( cli , prog_name , args , incomplete ) : if help : res . append ( r'"%s"\:"%s"' % ( escape ( item ) , escape ( help ) ) ) else : res . append ( '"%s"' % escape ( item ) ) if res : echo ( "_arguments '*: :((%s))'" % '\n' . join ( res ) ) else : echo ( "_files" ) return True
Do the zsh completion
22,798
def setup ( app ) : app . connect ( 'html-page-context' , add_html_link ) app . connect ( 'build-finished' , create_sitemap ) app . set_translator ( 'html' , HTMLTranslator ) app . sitemap_links = [ ]
Setup conntects events to the sitemap builder
22,799
def get_translated_url ( context , lang_code , object = None ) : view = context . get ( 'view' , None ) request = context [ 'request' ] if object is not None : qs = '' else : object = context . get ( 'object' , None ) or context . get ( 'current_page' , None ) or context . get ( 'page' , None ) qs = request . META . get ( 'QUERY_STRING' , '' ) try : if view is not None : get_view_url = getattr ( view , 'get_view_url' , None ) if get_view_url : with smart_override ( lang_code ) : return _url_qs ( view . get_view_url ( ) , qs ) if object is None : object = getattr ( view , 'object' , None ) if object is not None and hasattr ( object , 'get_absolute_url' ) : if isinstance ( object , TranslatableModel ) : with switch_language ( object , lang_code ) : return _url_qs ( object . get_absolute_url ( ) , qs ) else : with smart_override ( lang_code ) : return _url_qs ( object . get_absolute_url ( ) , qs ) except TranslationDoesNotExist : return '' resolver_match = request . resolver_match if resolver_match is None : return '' with smart_override ( lang_code ) : clean_kwargs = _cleanup_urlpattern_kwargs ( resolver_match . kwargs ) return _url_qs ( reverse ( resolver_match . view_name , args = resolver_match . args , kwargs = clean_kwargs , current_app = resolver_match . app_name ) , qs )
Get the proper URL for this page in a different language .