idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
42,900 | def should_retry_on ( self , exception_class , logger = None ) : for n in ( self . retry_on or [ ] ) : try : if issubclass ( exception_class , import_attribute ( n ) ) : return True except TaskImportError : if logger : logger . error ( 'should_retry_on could not import class' , exception_name = n ) return False | Whether this task should be retried when the given exception occurs . |
42,901 | def update_scheduled_time ( self , when ) : tiger = self . tiger ts = get_timestamp ( when ) assert ts pipeline = tiger . connection . pipeline ( ) key = tiger . _key ( SCHEDULED , self . queue ) tiger . scripts . zadd ( key , ts , self . id , mode = 'xx' , client = pipeline ) pipeline . zscore ( key , self . id ) _ , score = pipeline . execute ( ) if not score : raise TaskNotFound ( 'Task {} not found in queue "{}" in state "{}".' . format ( self . id , self . queue , SCHEDULED ) ) self . _ts = ts | Updates a scheduled task s date to the given date . If the task is not scheduled a TaskNotFound exception is raised . |
42,902 | def n_executions ( self ) : pipeline = self . tiger . connection . pipeline ( ) pipeline . exists ( self . tiger . _key ( 'task' , self . id ) ) pipeline . llen ( self . tiger . _key ( 'task' , self . id , 'executions' ) ) exists , n_executions = pipeline . execute ( ) if not exists : raise TaskNotFound ( 'Task {} not found.' . format ( self . id ) ) return n_executions | Queries and returns the number of past task executions . |
42,903 | def set_input ( self , nr = 2 , qd = 1 , b = 0 ) : self . nr = nr self . qd = qd self . b = b | Set inputs after initialization |
42,904 | def generateNoise ( self ) : wfb = np . zeros ( self . nr * 2 ) wfb [ : self . nr ] = np . random . normal ( 0 , np . sqrt ( self . qd ) , self . nr ) mhb = - self . b / 2.0 hfb = np . zeros ( self . nr * 2 ) hfb = np . zeros ( self . nr * 2 ) hfb [ 0 ] = 1.0 indices = np . arange ( self . nr - 1 ) hfb [ 1 : self . nr ] = ( mhb + indices ) / ( indices + 1.0 ) hfb [ : self . nr ] = np . multiply . accumulate ( hfb [ : self . nr ] ) wfb_fft = np . fft . rfft ( wfb ) hfb_fft = np . fft . rfft ( hfb ) time_series = np . fft . irfft ( wfb_fft * hfb_fft ) [ : self . nr ] self . time_series = time_series | Generate noise time series based on input parameters |
42,905 | def adev ( self , tau0 , tau ) : prefactor = self . adev_from_qd ( tau0 = tau0 , tau = tau ) c = self . c_avar ( ) avar = pow ( prefactor , 2 ) * pow ( tau , c ) return np . sqrt ( avar ) | return predicted ADEV of noise - type at given tau |
42,906 | def mdev ( self , tau0 , tau ) : prefactor = self . mdev_from_qd ( tau0 = tau0 , tau = tau ) c = self . c_mvar ( ) mvar = pow ( prefactor , 2 ) * pow ( tau , c ) return np . sqrt ( mvar ) | return predicted MDEV of noise - type at given tau |
42,907 | def scipy_psd ( x , f_sample = 1.0 , nr_segments = 4 ) : f_axis , psd_of_x = scipy . signal . welch ( x , f_sample , nperseg = len ( x ) / nr_segments ) return f_axis , psd_of_x | PSD routine from scipy we can compare our own numpy result against this one |
42,908 | def iterpink ( depth = 20 ) : values = numpy . random . randn ( depth ) smooth = numpy . random . randn ( depth ) source = numpy . random . randn ( depth ) sumvals = values . sum ( ) i = 0 while True : yield sumvals + smooth [ i ] i += 1 if i == depth : i = 0 smooth = numpy . random . randn ( depth ) source = numpy . random . randn ( depth ) continue c = 0 while not ( i >> c ) & 1 : c += 1 sumvals += source [ i ] - values [ c ] values [ c ] = source [ i ] | Generate a sequence of samples of pink noise . |
42,909 | def plotline ( plt , alpha , taus , style , label = "" ) : y = [ pow ( tt , alpha ) for tt in taus ] plt . loglog ( taus , y , style , label = label ) | plot a line with the slope alpha |
42,910 | def b1_noise_id ( x , af , rate ) : ( taus , devs , errs , ns ) = at . adev ( x , taus = [ af * rate ] , data_type = "phase" , rate = rate ) oadev_x = devs [ 0 ] y = np . diff ( x ) y_cut = np . array ( y [ : len ( y ) - ( len ( y ) % af ) ] ) assert len ( y_cut ) % af == 0 y_shaped = y_cut . reshape ( ( int ( len ( y_cut ) / af ) , af ) ) y_averaged = np . average ( y_shaped , axis = 1 ) var = np . var ( y_averaged , ddof = 1 ) return var / pow ( oadev_x , 2.0 ) | B1 ratio for noise identification ratio of Standard Variace to AVAR |
42,911 | def plot ( self , atDataset , errorbars = False , grid = False ) : if errorbars : self . ax . errorbar ( atDataset . out [ "taus" ] , atDataset . out [ "stat" ] , yerr = atDataset . out [ "stat_err" ] , ) else : self . ax . plot ( atDataset . out [ "taus" ] , atDataset . out [ "stat" ] , ) self . ax . set_xlabel ( "Tau" ) self . ax . set_ylabel ( atDataset . out [ "stat_id" ] ) self . ax . grid ( grid , which = "minor" , ls = "-" , color = '0.65' ) self . ax . grid ( grid , which = "major" , ls = "-" , color = '0.25' ) | use matplotlib methods for plotting |
42,912 | def greenhall_table2 ( alpha , d ) : row_idx = int ( - alpha + 2 ) assert ( row_idx in [ 0 , 1 , 2 , 3 , 4 , 5 ] ) col_idx = int ( d - 1 ) table2 = [ [ ( 3.0 / 2.0 , 1.0 / 2.0 ) , ( 35.0 / 18.0 , 1.0 ) , ( 231.0 / 100.0 , 3.0 / 2.0 ) ] , [ ( 78.6 , 25.2 ) , ( 790.0 , 410.0 ) , ( 9950.0 , 6520.0 ) ] , [ ( 2.0 / 3.0 , 1.0 / 6.0 ) , ( 2.0 / 3.0 , 1.0 / 3.0 ) , ( 7.0 / 9.0 , 1.0 / 2.0 ) ] , [ ( - 1 , - 1 ) , ( 0.852 , 0.375 ) , ( 0.997 , 0.617 ) ] , [ ( - 1 , - 1 ) , ( 1.079 , 0.368 ) , ( 1.033 , 0.607 ) ] , [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.053 , 0.553 ) ] , [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.302 , 0.535 ) ] , ] return table2 [ row_idx ] [ col_idx ] | Table 2 from Greenhall 2004 |
42,913 | def greenhall_table1 ( alpha , d ) : row_idx = int ( - alpha + 2 ) col_idx = int ( d - 1 ) table1 = [ [ ( 2.0 / 3.0 , 1.0 / 3.0 ) , ( 7.0 / 9.0 , 1.0 / 2.0 ) , ( 22.0 / 25.0 , 2.0 / 3.0 ) ] , [ ( 0.840 , 0.345 ) , ( 0.997 , 0.616 ) , ( 1.141 , 0.843 ) ] , [ ( 1.079 , 0.368 ) , ( 1.033 , 0.607 ) , ( 1.184 , 0.848 ) ] , [ ( - 1 , - 1 ) , ( 1.048 , 0.534 ) , ( 1.180 , 0.816 ) ] , [ ( - 1 , - 1 ) , ( 1.302 , 0.535 ) , ( 1.175 , 0.777 ) ] , [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.194 , 0.703 ) ] , [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.489 , 0.702 ) ] , ] return table1 [ row_idx ] [ col_idx ] | Table 1 from Greenhall 2004 |
42,914 | def edf_mtotdev ( N , m , alpha ) : assert ( alpha in [ 2 , 1 , 0 , - 1 , - 2 ] ) NIST_SP1065_table8 = [ ( 1.90 , 2.1 ) , ( 1.20 , 1.40 ) , ( 1.10 , 1.2 ) , ( 0.85 , 0.50 ) , ( 0.75 , 0.31 ) ] ( b , c ) = NIST_SP1065_table8 [ abs ( alpha - 2 ) ] edf = b * ( float ( N ) / float ( m ) ) - c print ( "mtotdev b,c= " , ( b , c ) , " edf=" , edf ) return edf | Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41 Table 8 |
42,915 | def edf_simple ( N , m , alpha ) : N = float ( N ) m = float ( m ) if alpha in [ 2 , 1 , 0 , - 1 , - 2 ] : if alpha == + 2 : edf = ( N + 1 ) * ( N - 2 * m ) / ( 2 * ( N - m ) ) if alpha == 0 : edf = ( ( ( 3 * ( N - 1 ) / ( 2 * m ) ) - ( 2 * ( N - 2 ) / N ) ) * ( ( 4 * pow ( m , 2 ) ) / ( ( 4 * pow ( m , 2 ) ) + 5 ) ) ) if alpha == 1 : a = ( N - 1 ) / ( 2 * m ) b = ( 2 * m + 1 ) * ( N - 1 ) / 4 edf = np . exp ( np . sqrt ( np . log ( a ) * np . log ( b ) ) ) if alpha == - 1 : if m == 1 : edf = 2 * ( N - 2 ) / ( 2.3 * N - 4.9 ) if m >= 2 : edf = 5 * N ** 2 / ( 4 * m * ( N + ( 3 * m ) ) ) if alpha == - 2 : a = ( N - 2 ) / ( m * ( N - 3 ) ** 2 ) b = ( N - 1 ) ** 2 c = 3 * m * ( N - 1 ) d = 4 * m ** 2 edf = a * ( b - c + d ) else : edf = ( N - 1 ) print ( "Noise type not recognized. Defaulting to N - 1 degrees of freedom." ) return edf | Equivalent degrees of freedom . Simple approximate formulae . |
42,916 | def example1 ( ) : N = 1000 f = 1 y = np . random . randn ( 1 , N ) [ 0 , : ] x = [ xx for xx in np . linspace ( 1 , len ( y ) , len ( y ) ) ] x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . errorbar ( x_ax , y_ax , yerr = [ err_l , err_h ] , label = 'GRADEV, no gaps' ) y [ int ( np . floor ( 0.4 * N ) ) : int ( np . floor ( 0.6 * N ) ) ] = np . NaN x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . errorbar ( x_ax , y_ax , yerr = [ err_l , err_h ] , label = 'GRADEV, with gaps' ) plt . xscale ( 'log' ) plt . yscale ( 'log' ) plt . grid ( ) plt . legend ( ) plt . xlabel ( 'Tau / s' ) plt . ylabel ( 'Overlapping Allan deviation' ) plt . show ( ) | Compute the GRADEV of a white phase noise . Compares two different scenarios . 1 ) The original data and 2 ) ADEV estimate with gap robust ADEV . |
42,917 | def example2 ( ) : N = 1000 f = 1 s = 1 + 5 / N * np . arange ( 0 , N ) y = s * np . random . randn ( 1 , N ) [ 0 , : ] x = [ xx for xx in np . linspace ( 1 , len ( y ) , len ( y ) ) ] x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . loglog ( x_ax , y_ax , 'b.' , label = "No gaps" ) y [ int ( 0.4 * N ) : int ( 0.6 * N , ) ] = np . NaN x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . loglog ( x_ax , y_ax , 'g.' , label = "With gaps" ) plt . grid ( ) plt . legend ( ) plt . xlabel ( 'Tau / s' ) plt . ylabel ( 'Overlapping Allan deviation' ) plt . show ( ) | Compute the GRADEV of a nonstationary white phase noise . |
42,918 | def tdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( taus , md , mde , ns ) = mdev ( phase , rate = rate , taus = taus ) td = taus * md / np . sqrt ( 3.0 ) tde = td / np . sqrt ( ns ) return taus , td , tde , ns | Time deviation . Based on modified Allan variance . |
42,919 | def mdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus = taus ) data , taus = np . array ( phase ) , np . array ( taus ) md = np . zeros_like ( ms ) mderr = np . zeros_like ( ms ) ns = np . zeros_like ( ms ) for idx , m in enumerate ( ms ) : m = int ( m ) tau = taus_used [ idx ] d0 = phase [ 0 : m ] d1 = phase [ m : 2 * m ] d2 = phase [ 2 * m : 3 * m ] e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) ) v = np . sum ( d2 [ : e ] - 2 * d1 [ : e ] + d0 [ : e ] ) s = v * v d3 = phase [ 3 * m : ] d2 = phase [ 2 * m : ] d1 = phase [ 1 * m : ] d0 = phase [ 0 : ] e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) , len ( d3 ) ) n = e + 1 v_arr = v + np . cumsum ( d3 [ : e ] - 3 * d2 [ : e ] + 3 * d1 [ : e ] - d0 [ : e ] ) s = s + np . sum ( v_arr * v_arr ) s /= 2.0 * m * m * tau * tau * n s = np . sqrt ( s ) md [ idx ] = s mderr [ idx ] = ( s / np . sqrt ( n ) ) ns [ idx ] = n return remove_small_ns ( taus_used , md , mderr , ns ) | Modified Allan deviation . Used to distinguish between White and Flicker Phase Modulation . |
42,920 | def adev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) ad = np . zeros_like ( taus_used ) ade = np . zeros_like ( taus_used ) adn = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : ( ad [ idx ] , ade [ idx ] , adn [ idx ] ) = calc_adev_phase ( phase , rate , mj , mj ) return remove_small_ns ( taus_used , ad , ade , adn ) | Allan deviation . Classic - use only if required - relatively poor confidence . |
42,921 | def ohdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) hdevs = np . zeros_like ( taus_used ) hdeverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : ( hdevs [ idx ] , hdeverrs [ idx ] , ns [ idx ] ) = calc_hdev_phase ( phase , rate , mj , 1 ) return remove_small_ns ( taus_used , hdevs , hdeverrs , ns ) | Overlapping Hadamard deviation . Better confidence than normal Hadamard . |
42,922 | def calc_hdev_phase ( phase , rate , mj , stride ) : tau0 = 1.0 / float ( rate ) mj = int ( mj ) stride = int ( stride ) d3 = phase [ 3 * mj : : stride ] d2 = phase [ 2 * mj : : stride ] d1 = phase [ 1 * mj : : stride ] d0 = phase [ : : stride ] n = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) , len ( d3 ) ) v_arr = d3 [ : n ] - 3 * d2 [ : n ] + 3 * d1 [ : n ] - d0 [ : n ] s = np . sum ( v_arr * v_arr ) if n == 0 : n = 1 h = np . sqrt ( s / 6.0 / float ( n ) ) / float ( tau0 * mj ) e = h / np . sqrt ( n ) return h , e , n | main calculation fungtion for HDEV and OHDEV |
42,923 | def totdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) N = len ( phase ) x1 = 2.0 * phase [ 0 ] * np . ones ( ( N - 2 , ) ) x1 = x1 - phase [ 1 : - 1 ] x1 = x1 [ : : - 1 ] x2 = 2.0 * phase [ - 1 ] * np . ones ( ( N - 2 , ) ) x2 = x2 - phase [ 1 : - 1 ] [ : : - 1 ] assert len ( x1 ) + len ( phase ) + len ( x2 ) == 3 * N - 4 x = np . zeros ( ( 3 * N - 4 ) ) x [ 0 : N - 2 ] = x1 x [ N - 2 : 2 * ( N - 2 ) + 2 ] = phase x [ 2 * ( N - 2 ) + 2 : ] = x2 devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) mid = len ( x1 ) for idx , mj in enumerate ( m ) : mj = int ( mj ) d0 = x [ mid + 1 : ] d1 = x [ mid + mj + 1 : ] d1n = x [ mid - mj + 1 : ] e = min ( len ( d0 ) , len ( d1 ) , len ( d1n ) ) v_arr = d1n [ : e ] - 2.0 * d0 [ : e ] + d1 [ : e ] dev = np . sum ( v_arr [ : mid ] * v_arr [ : mid ] ) dev /= float ( 2 * pow ( mj / rate , 2 ) * ( N - 2 ) ) dev = np . sqrt ( dev ) devs [ idx ] = dev deverrs [ idx ] = dev / np . sqrt ( mid ) ns [ idx ] = mid return remove_small_ns ( taus_used , devs , deverrs , ns ) | Total deviation . Better confidence at long averages for Allan . |
42,924 | def mtotdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus , maximum_m = float ( len ( phase ) ) / 3.0 ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( ms ) : devs [ idx ] , deverrs [ idx ] , ns [ idx ] = calc_mtotdev_phase ( phase , rate , mj ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Modified Total deviation . Better confidence at long averages for modified Allan |
42,925 | def htotdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : if data_type == "phase" : phase = data freq = phase2frequency ( phase , rate ) elif data_type == "freq" : phase = frequency2phase ( data , rate ) freq = data else : raise Exception ( "unknown data_type: " + data_type ) rate = float ( rate ) ( freq , ms , taus_used ) = tau_generator ( freq , rate , taus , maximum_m = float ( len ( freq ) ) / 3.0 ) phase = np . array ( phase ) freq = np . array ( freq ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( ms ) : if int ( mj ) == 1 : ( devs [ idx ] , deverrs [ idx ] , ns [ idx ] ) = calc_hdev_phase ( phase , rate , mj , 1 ) else : ( devs [ idx ] , deverrs [ idx ] , ns [ idx ] ) = calc_htotdev_freq ( freq , mj ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Hadamard Total deviation . Better confidence at long averages for Hadamard deviation |
42,926 | def theo1 ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) tau0 = 1.0 / rate ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus , even = True ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) N = len ( phase ) for idx , m in enumerate ( ms ) : m = int ( m ) assert m % 2 == 0 dev = 0 n = 0 for i in range ( int ( N - m ) ) : s = 0 for d in range ( int ( m / 2 ) ) : pre = 1.0 / ( float ( m ) / 2 - float ( d ) ) s += pre * pow ( phase [ i ] - phase [ i - d + int ( m / 2 ) ] + phase [ i + m ] - phase [ i + d + int ( m / 2 ) ] , 2 ) n = n + 1 dev += s assert n == ( N - m ) * m / 2 dev = dev / ( 0.75 * ( N - m ) * pow ( m * tau0 , 2 ) ) devs [ idx ] = np . sqrt ( dev ) deverrs [ idx ] = devs [ idx ] / np . sqrt ( N - m ) ns [ idx ] = n return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Theo1 is a two - sample variance with improved confidence and extended averaging factor range . |
42,927 | def tierms ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( data , m , taus_used ) = tau_generator ( phase , rate , taus ) count = len ( phase ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : mj = int ( mj ) phases = np . column_stack ( ( phase [ : - mj ] , phase [ mj : ] ) ) p_max = np . max ( phases , axis = 1 ) p_min = np . min ( phases , axis = 1 ) phases = p_max - p_min tie = np . sqrt ( np . mean ( phases * phases ) ) ncount = count - mj devs [ idx ] = tie deverrs [ idx ] = 0 / np . sqrt ( ncount ) ns [ idx ] = ncount return remove_small_ns ( taus_used , devs , deverrs , ns ) | Time Interval Error RMS . |
42,928 | def mtie ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : rw = mtie_rolling_window ( phase , int ( mj + 1 ) ) win_max = np . max ( rw , axis = 1 ) win_min = np . min ( rw , axis = 1 ) tie = win_max - win_min dev = np . max ( tie ) ncount = phase . shape [ 0 ] - mj devs [ idx ] = dev deverrs [ idx ] = dev / np . sqrt ( ncount ) ns [ idx ] = ncount return remove_small_ns ( taus_used , devs , deverrs , ns ) | Maximum Time Interval Error . |
42,929 | def mtie_phase_fast ( phase , rate = 1.0 , data_type = "phase" , taus = None ) : rate = float ( rate ) phase = np . asarray ( phase ) k_max = int ( np . floor ( np . log2 ( len ( phase ) ) ) ) phase = phase [ 0 : pow ( 2 , k_max ) ] assert len ( phase ) == pow ( 2 , k_max ) taus = [ pow ( 2 , k ) for k in range ( k_max ) ] print ( "taus N=" , len ( taus ) , " " , taus ) devs = np . zeros ( len ( taus ) ) deverrs = np . zeros ( len ( taus ) ) ns = np . zeros ( len ( taus ) ) taus_used = np . array ( taus ) mtie_max = np . zeros ( ( len ( phase ) - 1 , k_max ) ) mtie_min = np . zeros ( ( len ( phase ) - 1 , k_max ) ) for kidx in range ( k_max ) : k = kidx + 1 imax = len ( phase ) - pow ( 2 , k ) + 1 tie = np . zeros ( imax ) ns [ kidx ] = imax for i in range ( imax ) : if k == 1 : mtie_max [ i , kidx ] = max ( phase [ i ] , phase [ i + 1 ] ) mtie_min [ i , kidx ] = min ( phase [ i ] , phase [ i + 1 ] ) else : p = int ( pow ( 2 , k - 1 ) ) mtie_max [ i , kidx ] = max ( mtie_max [ i , kidx - 1 ] , mtie_max [ i + p , kidx - 1 ] ) mtie_min [ i , kidx ] = min ( mtie_min [ i , kidx - 1 ] , mtie_min [ i + p , kidx - 1 ] ) tie [ i ] = mtie_max [ i , kidx ] - mtie_min [ i , kidx ] devs [ kidx ] = np . amax ( tie ) devs = np . array ( devs ) print ( "devs N=" , len ( devs ) , " " , devs ) print ( "taus N=" , len ( taus_used ) , " " , taus_used ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | fast binary decomposition algorithm for MTIE |
42,930 | def gradev ( data , rate = 1.0 , data_type = "phase" , taus = None , ci = 0.9 , noisetype = 'wp' ) : if ( data_type == "freq" ) : print ( "Warning : phase data is preferred as input to gradev()" ) phase = input_to_phase ( data , rate , data_type ) ( data , m , taus_used ) = tau_generator ( phase , rate , taus ) ad = np . zeros_like ( taus_used ) ade_l = np . zeros_like ( taus_used ) ade_h = np . zeros_like ( taus_used ) adn = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : ( dev , deverr , n ) = calc_gradev_phase ( data , rate , mj , 1 , ci , noisetype ) ad [ idx ] = dev ade_l [ idx ] = deverr [ 0 ] ade_h [ idx ] = deverr [ 1 ] adn [ idx ] = n return remove_small_ns ( taus_used , ad , [ ade_l , ade_h ] , adn ) | gap resistant overlapping Allan deviation |
42,931 | def input_to_phase ( data , rate , data_type ) : if data_type == "phase" : return data elif data_type == "freq" : return frequency2phase ( data , rate ) else : raise Exception ( "unknown data_type: " + data_type ) | Take either phase or frequency as input and return phase |
42,932 | def trim_data ( x ) : first = 0 while np . isnan ( x [ first ] ) : first += 1 last = len ( x ) while np . isnan ( x [ last - 1 ] ) : last -= 1 return x [ first : last ] | Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non - NaN in each case the return the appropriate slice of the array |
42,933 | def three_cornered_hat_phase ( phasedata_ab , phasedata_bc , phasedata_ca , rate , taus , function ) : ( tau_ab , dev_ab , err_ab , ns_ab ) = function ( phasedata_ab , data_type = 'phase' , rate = rate , taus = taus ) ( tau_bc , dev_bc , err_bc , ns_bc ) = function ( phasedata_bc , data_type = 'phase' , rate = rate , taus = taus ) ( tau_ca , dev_ca , err_ca , ns_ca ) = function ( phasedata_ca , data_type = 'phase' , rate = rate , taus = taus ) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len ( var_ab ) == len ( var_bc ) == len ( var_ca ) var_a = 0.5 * ( var_ab + var_ca - var_bc ) var_a [ var_a < 0 ] = 0 dev_a = np . sqrt ( var_a ) err_a = [ d / np . sqrt ( nn ) for ( d , nn ) in zip ( dev_a , ns_ab ) ] return tau_ab , dev_a , err_a , ns_ab | Three Cornered Hat Method |
42,934 | def frequency2phase ( freqdata , rate ) : dt = 1.0 / float ( rate ) freqdata = trim_data ( freqdata ) phasedata = np . cumsum ( freqdata ) * dt phasedata = np . insert ( phasedata , 0 , 0 ) return phasedata | integrate fractional frequency data and output phase data |
42,935 | def phase2radians ( phasedata , v0 ) : fi = [ 2 * np . pi * v0 * xx for xx in phasedata ] return fi | Convert phase in seconds to phase in radians |
42,936 | def frequency2fractional ( frequency , mean_frequency = - 1 ) : if mean_frequency == - 1 : mu = np . mean ( frequency ) else : mu = mean_frequency y = [ ( x - mu ) / mu for x in frequency ] return y | Convert frequency in Hz to fractional frequency |
42,937 | def set_input ( self , data , rate = 1.0 , data_type = "phase" , taus = None ) : self . inp [ "data" ] = data self . inp [ "rate" ] = rate self . inp [ "data_type" ] = data_type self . inp [ "taus" ] = taus | Optionnal method if you chose not to set inputs on init |
42,938 | def compute ( self , function ) : try : func = getattr ( allantools , function ) except AttributeError : raise AttributeError ( "function must be defined in allantools" ) whitelisted = [ "theo1" , "mtie" , "tierms" ] if function [ - 3 : ] != "dev" and function not in whitelisted : raise RuntimeError ( "function must be one of the 'dev' functions" ) result = func ( self . inp [ "data" ] , rate = self . inp [ "rate" ] , data_type = self . inp [ "data_type" ] , taus = self . inp [ "taus" ] ) keys = [ "taus" , "stat" , "stat_err" , "stat_n" ] result = { key : result [ i ] for i , key in enumerate ( keys ) } self . out = result . copy ( ) self . out [ "stat_id" ] = function return result | Evaluate the passed function with the supplied data . |
42,939 | def many_psds ( k = 2 , fs = 1.0 , b0 = 1.0 , N = 1024 ) : psd = [ ] for j in range ( k ) : print j x = noise . white ( N = 2 * 4096 , b0 = b0 , fs = fs ) f , tmp = noise . numpy_psd ( x , fs ) if j == 0 : psd = tmp else : psd = psd + tmp return f , psd / k | compute average of many PSDs |
42,940 | def list_my ( self ) : org_list = self . call_contract_command ( "Registry" , "listOrganizations" , [ ] ) rez_owner = [ ] rez_member = [ ] for idx , org_id in enumerate ( org_list ) : ( found , org_id , org_name , owner , members , serviceNames , repositoryNames ) = self . call_contract_command ( "Registry" , "getOrganizationById" , [ org_id ] ) if ( not found ) : raise Exception ( "Organization was removed during this call. Please retry." ) if self . ident . address == owner : rez_owner . append ( ( org_name , bytes32_to_str ( org_id ) ) ) if self . ident . address in members : rez_member . append ( ( org_name , bytes32_to_str ( org_id ) ) ) if ( rez_owner ) : self . _printout ( "# Organizations you are the owner of" ) self . _printout ( "# OrgName OrgId" ) for n , i in rez_owner : self . _printout ( "%s %s" % ( n , i ) ) if ( rez_member ) : self . _printout ( "# Organizations you are the member of" ) self . _printout ( "# OrgName OrgId" ) for n , i in rez_member : self . _printout ( "%s %s" % ( n , i ) ) | Find organization that has the current identity as the owner or as the member |
42,941 | def add_group ( self , group_name , payment_address ) : if ( self . is_group_name_exists ( group_name ) ) : raise Exception ( "the group \"%s\" is already present" % str ( group_name ) ) group_id_base64 = base64 . b64encode ( secrets . token_bytes ( 32 ) ) self . m [ "groups" ] += [ { "group_name" : group_name , "group_id" : group_id_base64 . decode ( "ascii" ) , "payment_address" : payment_address } ] return group_id_base64 | Return new group_id in base64 |
42,942 | def is_group_name_exists ( self , group_name ) : groups = self . m [ "groups" ] for g in groups : if ( g [ "group_name" ] == group_name ) : return True return False | check if group with given name is already exists |
42,943 | def get_group_name_nonetrick ( self , group_name = None ) : groups = self . m [ "groups" ] if ( len ( groups ) == 0 ) : raise Exception ( "Cannot find any groups in metadata" ) if ( not group_name ) : if ( len ( groups ) > 1 ) : raise Exception ( "We have more than one payment group in metadata, so group_name should be specified" ) return groups [ 0 ] [ "group_name" ] return group_name | In all getter function in case of single payment group group_name can be None |
42,944 | def get_from_ipfs_and_checkhash ( ipfs_client , ipfs_hash_base58 , validate = True ) : if validate : from snet_cli . resources . proto . unixfs_pb2 import Data from snet_cli . resources . proto . merckledag_pb2 import MerkleNode block_data = ipfs_client . block_get ( ipfs_hash_base58 ) mn = MerkleNode ( ) mn . ParseFromString ( block_data ) unixfs_data = Data ( ) unixfs_data . ParseFromString ( mn . Data ) assert unixfs_data . Type == unixfs_data . DataType . Value ( 'File' ) , "IPFS hash must be a file" data = unixfs_data . Data multihash . CodecReg . register ( 'base58' , base58 . b58encode , base58 . b58decode ) mh = multihash . decode ( ipfs_hash_base58 . encode ( 'ascii' ) , 'base58' ) if not mh . verify ( block_data ) : raise Exception ( "IPFS hash mismatch with data" ) else : data = ipfs_client . cat ( ipfs_hash_base58 ) return data | Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn t been compromise |
42,945 | def hash_to_bytesuri ( s ) : s = "ipfs://" + s return s . encode ( "ascii" ) . ljust ( 32 * ( len ( s ) // 32 + 1 ) , b"\0" ) | Convert in and from bytes uri format used in Registry contract |
42,946 | def _get_stub_and_request_classes ( self , service_name ) : codegen_dir = Path . home ( ) . joinpath ( ".snet" , "mpe_client" , "control_service" ) proto_dir = Path ( __file__ ) . absolute ( ) . parent . joinpath ( "resources" , "proto" ) if ( not codegen_dir . joinpath ( "control_service_pb2.py" ) . is_file ( ) ) : compile_proto ( proto_dir , codegen_dir , proto_file = "control_service.proto" ) stub_class , request_class , _ = import_protobuf_from_dir ( codegen_dir , service_name ) return stub_class , request_class | import protobuf and return stub and request class |
42,947 | def _start_claim_channels ( self , grpc_channel , channels_ids ) : unclaimed_payments = self . _call_GetListUnclaimed ( grpc_channel ) unclaimed_payments_dict = { p [ "channel_id" ] : p for p in unclaimed_payments } to_claim = [ ] for channel_id in channels_ids : if ( channel_id not in unclaimed_payments_dict or unclaimed_payments_dict [ channel_id ] [ "amount" ] == 0 ) : self . _printout ( "There is nothing to claim for channel %i, we skip it" % channel_id ) continue blockchain = self . _get_channel_state_from_blockchain ( channel_id ) if ( unclaimed_payments_dict [ channel_id ] [ "nonce" ] != blockchain [ "nonce" ] ) : self . _printout ( "Old payment for channel %i is still in progress. Please run claim for this channel later." % channel_id ) continue to_claim . append ( ( channel_id , blockchain [ "nonce" ] ) ) payments = [ self . _call_StartClaim ( grpc_channel , channel_id , nonce ) for channel_id , nonce in to_claim ] return payments | Safely run StartClaim for given channels |
42,948 | def _claim_in_progress_and_claim_channels ( self , grpc_channel , channels ) : payments = self . _call_GetListInProgress ( grpc_channel ) if ( len ( payments ) > 0 ) : self . _printout ( "There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them." % len ( payments ) ) self . _blockchain_claim ( payments ) payments = self . _start_claim_channels ( grpc_channel , channels ) self . _blockchain_claim ( payments ) | Claim all pending payments in progress and after we claim given channels |
42,949 | def create_default_config ( self ) : self . _config_file . parent . mkdir ( mode = 0o700 , exist_ok = True ) self [ "network.kovan" ] = { "default_eth_rpc_endpoint" : "https://kovan.infura.io" , "default_gas_price" : "medium" } self [ "network.mainnet" ] = { "default_eth_rpc_endpoint" : "https://mainnet.infura.io" , "default_gas_price" : "medium" } self [ "network.ropsten" ] = { "default_eth_rpc_endpoint" : "https://ropsten.infura.io" , "default_gas_price" : "medium" } self [ "network.rinkeby" ] = { "default_eth_rpc_endpoint" : "https://rinkeby.infura.io" , "default_gas_price" : "medium" } self [ "ipfs" ] = { "default_ipfs_endpoint" : "http://ipfs.singularitynet.io:80" } self [ "session" ] = { "network" : "kovan" } self . _persist ( ) print ( "We've created configuration file with default values in: %s\n" % str ( self . _config_file ) ) | Create default configuration if config file does not exist |
42,950 | def switch_to_json_payload_encoding ( call_fn , response_class ) : def json_serializer ( * args , ** kwargs ) : return bytes ( json_format . MessageToJson ( args [ 0 ] , True , preserving_proto_field_name = True ) , "utf-8" ) def json_deserializer ( * args , ** kwargs ) : resp = response_class ( ) json_format . Parse ( args [ 0 ] , resp , True ) return resp call_fn . _request_serializer = json_serializer call_fn . _response_deserializer = json_deserializer | Switch payload encoding to JSON for GRPC call |
42,951 | def print_agi_and_mpe_balances ( self ) : if ( self . args . account ) : account = self . args . account else : account = self . ident . address eth_wei = self . w3 . eth . getBalance ( account ) agi_cogs = self . call_contract_command ( "SingularityNetToken" , "balanceOf" , [ account ] ) mpe_cogs = self . call_contract_command ( "MultiPartyEscrow" , "balances" , [ account ] ) self . _printout ( " account: %s" % account ) self . _printout ( " ETH: %s" % self . w3 . fromWei ( eth_wei , 'ether' ) ) self . _printout ( " AGI: %s" % cogs2stragi ( agi_cogs ) ) self . _printout ( " MPE: %s" % cogs2stragi ( mpe_cogs ) ) | Print balance of ETH AGI and MPE wallet |
42,952 | def publish_proto_in_ipfs ( self ) : ipfs_hash_base58 = utils_ipfs . publish_proto_in_ipfs ( self . _get_ipfs_client ( ) , self . args . protodir ) self . _printout ( ipfs_hash_base58 ) | Publish proto files in ipfs and print hash |
42,953 | def publish_proto_metadata_update ( self ) : metadata = load_mpe_service_metadata ( self . args . metadata_file ) ipfs_hash_base58 = utils_ipfs . publish_proto_in_ipfs ( self . _get_ipfs_client ( ) , self . args . protodir ) metadata . set_simple_field ( "model_ipfs_hash" , ipfs_hash_base58 ) metadata . save_pretty ( self . args . metadata_file ) | Publish protobuf model in ipfs and update existing metadata file |
42,954 | def _get_persistent_mpe_dir ( self ) : mpe_address = self . get_mpe_address ( ) . lower ( ) registry_address = self . get_registry_address ( ) . lower ( ) return Path . home ( ) . joinpath ( ".snet" , "mpe_client" , "%s_%s" % ( mpe_address , registry_address ) ) | get persistent storage for mpe |
42,955 | def _check_mpe_address_metadata ( self , metadata ) : mpe_address = self . get_mpe_address ( ) if ( str ( mpe_address ) . lower ( ) != str ( metadata [ "mpe_address" ] ) . lower ( ) ) : raise Exception ( "MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s" % ( metadata [ "mpe_address" ] , mpe_address ) ) | we make sure that MultiPartyEscrow address from metadata is correct |
42,956 | def _init_or_update_registered_service_if_needed ( self ) : if ( self . is_service_initialized ( ) ) : old_reg = self . _read_service_info ( self . args . org_id , self . args . service_id ) if ( "metadataURI" not in old_reg ) : return service_registration = self . _get_service_registration ( ) if ( not self . is_metadataURI_has_changed ( service_registration ) ) : return else : service_registration = self . _get_service_registration ( ) service_metadata = self . _get_service_metadata_from_registry ( ) self . _init_or_update_service_if_needed ( service_metadata , service_registration ) | similar to _init_or_update_service_if_needed but we get service_registraion from registry so we can update only registered services |
42,957 | def _smart_get_initialized_channel_for_service ( self , metadata , filter_by , is_try_initailize = True ) : channels = self . _get_initialized_channels_for_service ( self . args . org_id , self . args . service_id ) group_id = metadata . get_group_id ( self . args . group_name ) channels = [ c for c in channels if c [ filter_by ] . lower ( ) == self . ident . address . lower ( ) and c [ "groupId" ] == group_id ] if ( len ( channels ) == 0 and is_try_initailize ) : self . _initialize_already_opened_channel ( metadata , self . ident . address , self . ident . address ) return self . _smart_get_initialized_channel_for_service ( metadata , filter_by , is_try_initailize = False ) if ( len ( channels ) == 0 ) : raise Exception ( "Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s" % ( self . args . org_id , self . args . service_id , self . ident . address ) ) if ( self . args . channel_id is None ) : if ( len ( channels ) > 1 ) : channel_ids = [ channel [ "channelId" ] for channel in channels ] raise Exception ( "We have several initialized channel: %s. You should use --channel-id to select one" % str ( channel_ids ) ) return channels [ 0 ] for channel in channels : if ( channel [ "channelId" ] == self . args . channel_id ) : return channel raise Exception ( "Channel %i has not been initialized or your are not the sender/signer of it" % self . args . channel_id ) | - filter_by can be sender or signer |
42,958 | def _get_all_filtered_channels ( self , topics_without_signature ) : mpe_address = self . get_mpe_address ( ) event_signature = self . ident . w3 . sha3 ( text = "ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)" ) . hex ( ) topics = [ event_signature ] + topics_without_signature logs = self . ident . w3 . eth . getLogs ( { "fromBlock" : self . args . from_block , "address" : mpe_address , "topics" : topics } ) abi = get_contract_def ( "MultiPartyEscrow" ) event_abi = abi_get_element_by_name ( abi , "ChannelOpen" ) channels_ids = [ get_event_data ( event_abi , l ) [ "args" ] [ "channelId" ] for l in logs ] return channels_ids | get all filtered chanels from blockchain logs |
42,959 | def list_repo ( self ) : req = proto . ListRepoRequest ( ) res = self . stub . ListRepo ( req , metadata = self . metadata ) if hasattr ( res , 'repo_info' ) : return res . repo_info return [ ] | Returns info about all Repos . |
42,960 | def delete_repo ( self , repo_name = None , force = False , all = False ) : if not all : if repo_name : req = proto . DeleteRepoRequest ( repo = proto . Repo ( name = repo_name ) , force = force ) self . stub . DeleteRepo ( req , metadata = self . metadata ) else : raise ValueError ( "Either a repo_name or all=True needs to be provided" ) else : if not repo_name : req = proto . DeleteRepoRequest ( force = force , all = all ) self . stub . DeleteRepo ( req , metadata = self . metadata ) else : raise ValueError ( "Cannot specify a repo_name if all=True" ) | Deletes a repo and reclaims the storage space it was using . |
42,961 | def start_commit ( self , repo_name , branch = None , parent = None , description = None ) : req = proto . StartCommitRequest ( parent = proto . Commit ( repo = proto . Repo ( name = repo_name ) , id = parent ) , branch = branch , description = description ) res = self . stub . StartCommit ( req , metadata = self . metadata ) return res | Begins the process of committing data to a Repo . Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit . NOTE data is not persisted until FinishCommit is called . A Commit object is returned . |
42,962 | def finish_commit ( self , commit ) : req = proto . FinishCommitRequest ( commit = commit_from ( commit ) ) res = self . stub . FinishCommit ( req , metadata = self . metadata ) return res | Ends the process of committing data to a Repo and persists the Commit . Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error . |
42,963 | def commit ( self , repo_name , branch = None , parent = None , description = None ) : commit = self . start_commit ( repo_name , branch , parent , description ) try : yield commit except Exception as e : print ( "An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)" ) raise e finally : self . finish_commit ( commit ) | A context manager for doing stuff inside a commit . |
42,964 | def inspect_commit ( self , commit ) : req = proto . InspectCommitRequest ( commit = commit_from ( commit ) ) return self . stub . InspectCommit ( req , metadata = self . metadata ) | Returns info about a specific Commit . |
42,965 | def list_commit ( self , repo_name , to_commit = None , from_commit = None , number = 0 ) : req = proto . ListCommitRequest ( repo = proto . Repo ( name = repo_name ) , number = number ) if to_commit is not None : req . to . CopyFrom ( commit_from ( to_commit ) ) if from_commit is not None : getattr ( req , 'from' ) . CopyFrom ( commit_from ( from_commit ) ) res = self . stub . ListCommit ( req , metadata = self . metadata ) if hasattr ( res , 'commit_info' ) : return res . commit_info return [ ] | Gets a list of CommitInfo objects . |
42,966 | def delete_commit ( self , commit ) : req = proto . DeleteCommitRequest ( commit = commit_from ( commit ) ) self . stub . DeleteCommit ( req , metadata = self . metadata ) | Deletes a commit . |
42,967 | def flush_commit ( self , commits , repos = tuple ( ) ) : req = proto . FlushCommitRequest ( commit = [ commit_from ( c ) for c in commits ] , to_repo = [ proto . Repo ( name = r ) for r in repos ] ) res = self . stub . FlushCommit ( req , metadata = self . metadata ) return res | Blocks until all of the commits which have a set of commits as provenance have finished . For commits to be considered they must have all of the specified commits as provenance . This in effect waits for all of the jobs that are triggered by a set of commits to complete . It returns an error if any of the commits it s waiting on are cancelled due to one of the jobs encountering an error during runtime . Note that it s never necessary to call FlushCommit to run jobs they ll run no matter what FlushCommit just allows you to wait for them to complete and see their output once they do . This returns an iterator of CommitInfo objects . |
42,968 | def subscribe_commit ( self , repo_name , branch , from_commit_id = None ) : repo = proto . Repo ( name = repo_name ) req = proto . SubscribeCommitRequest ( repo = repo , branch = branch ) if from_commit_id is not None : getattr ( req , 'from' ) . CopyFrom ( proto . Commit ( repo = repo , id = from_commit_id ) ) res = self . stub . SubscribeCommit ( req , metadata = self . metadata ) return res | SubscribeCommit is like ListCommit but it keeps listening for commits as they come in . This returns an iterator Commit objects . |
42,969 | def list_branch ( self , repo_name ) : req = proto . ListBranchRequest ( repo = proto . Repo ( name = repo_name ) ) res = self . stub . ListBranch ( req , metadata = self . metadata ) if hasattr ( res , 'branch_info' ) : return res . branch_info return [ ] | Lists the active Branch objects on a Repo . |
42,970 | def set_branch ( self , commit , branch_name ) : res = proto . SetBranchRequest ( commit = commit_from ( commit ) , branch = branch_name ) self . stub . SetBranch ( res , metadata = self . metadata ) | Sets a commit and its ancestors as a branch . |
42,971 | def delete_branch ( self , repo_name , branch_name ) : res = proto . DeleteBranchRequest ( repo = Repo ( name = repo_name ) , branch = branch_name ) self . stub . DeleteBranch ( res , metadata = self . metadata ) | Deletes a branch but leaves the commits themselves intact . In other words those commits can still be accessed via commit IDs and other branches they happen to be on . |
42,972 | def put_file_url ( self , commit , path , url , recursive = False ) : req = iter ( [ proto . PutFileRequest ( file = proto . File ( commit = commit_from ( commit ) , path = path ) , url = url , recursive = recursive ) ] ) self . stub . PutFile ( req , metadata = self . metadata ) | Puts a file using the content found at a URL . The URL is sent to the server which performs the request . |
42,973 | def get_file ( self , commit , path , offset_bytes = 0 , size_bytes = 0 , extract_value = True ) : req = proto . GetFileRequest ( file = proto . File ( commit = commit_from ( commit ) , path = path ) , offset_bytes = offset_bytes , size_bytes = size_bytes ) res = self . stub . GetFile ( req , metadata = self . metadata ) if extract_value : return ExtractValueIterator ( res ) return res | Returns an iterator of the contents contents of a file at a specific Commit . |
42,974 | def get_files ( self , commit , paths , recursive = False ) : filtered_file_infos = [ ] for path in paths : fi = self . inspect_file ( commit , path ) if fi . file_type == proto . FILE : filtered_file_infos . append ( fi ) else : filtered_file_infos += self . list_file ( commit , path , recursive = recursive ) filtered_paths = [ fi . file . path for fi in filtered_file_infos if fi . file_type == proto . FILE ] return { path : b'' . join ( self . get_file ( commit , path ) ) for path in filtered_paths } | Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data . |
42,975 | def inspect_file ( self , commit , path ) : req = proto . InspectFileRequest ( file = proto . File ( commit = commit_from ( commit ) , path = path ) ) res = self . stub . InspectFile ( req , metadata = self . metadata ) return res | Returns info about a specific file . |
42,976 | def list_file ( self , commit , path , recursive = False ) : req = proto . ListFileRequest ( file = proto . File ( commit = commit_from ( commit ) , path = path ) ) res = self . stub . ListFile ( req , metadata = self . metadata ) file_infos = res . file_info if recursive : dirs = [ f for f in file_infos if f . file_type == proto . DIR ] files = [ f for f in file_infos if f . file_type == proto . FILE ] return sum ( [ self . list_file ( commit , d . file . path , recursive ) for d in dirs ] , files ) return list ( file_infos ) | Lists the files in a directory . |
42,977 | def delete_file ( self , commit , path ) : req = proto . DeleteFileRequest ( file = proto . File ( commit = commit_from ( commit ) , path = path ) ) self . stub . DeleteFile ( req , metadata = self . metadata ) | Deletes a file from a Commit . DeleteFile leaves a tombstone in the Commit assuming the file isn t written to later attempting to get the file from the finished commit will result in not found error . The file will of course remain intact in the Commit s parent . |
42,978 | def handle_authn_request ( self , context , binding_in ) : return self . _handle_authn_request ( context , binding_in , self . idp ) | This method is bound to the starting endpoint of the authentication . |
42,979 | def _create_state_data ( self , context , resp_args , relay_state ) : if "name_id_policy" in resp_args and resp_args [ "name_id_policy" ] is not None : resp_args [ "name_id_policy" ] = resp_args [ "name_id_policy" ] . to_string ( ) . decode ( "utf-8" ) return { "resp_args" : resp_args , "relay_state" : relay_state } | Returns a dict containing the state needed in the response flow . |
42,980 | def _handle_authn_request ( self , context , binding_in , idp ) : req_info = idp . parse_authn_request ( context . request [ "SAMLRequest" ] , binding_in ) authn_req = req_info . message satosa_logging ( logger , logging . DEBUG , "%s" % authn_req , context . state ) try : resp_args = idp . response_args ( authn_req ) except SAMLError as e : satosa_logging ( logger , logging . ERROR , "Could not find necessary info about entity: %s" % e , context . state ) return ServiceError ( "Incorrect request from requester: %s" % e ) requester = resp_args [ "sp_entity_id" ] context . state [ self . name ] = self . _create_state_data ( context , idp . response_args ( authn_req ) , context . request . get ( "RelayState" ) ) subject = authn_req . subject name_id_value = subject . name_id . text if subject else None nameid_formats = { "from_policy" : authn_req . name_id_policy and authn_req . name_id_policy . format , "from_response" : subject and subject . name_id and subject . name_id . format , "from_metadata" : ( idp . metadata [ requester ] . get ( "spsso_descriptor" , [ { } ] ) [ 0 ] . get ( "name_id_format" , [ { } ] ) [ 0 ] . get ( "text" ) ) , "default" : NAMEID_FORMAT_TRANSIENT , } name_id_format = ( nameid_formats [ "from_policy" ] or ( nameid_formats [ "from_response" ] != NAMEID_FORMAT_UNSPECIFIED and nameid_formats [ "from_response" ] ) or nameid_formats [ "from_metadata" ] or nameid_formats [ "from_response" ] or nameid_formats [ "default" ] ) requester_name = self . _get_sp_display_name ( idp , requester ) internal_req = InternalData ( subject_id = name_id_value , subject_type = name_id_format , requester = requester , requester_name = requester_name , ) idp_policy = idp . config . getattr ( "policy" , "idp" ) if idp_policy : internal_req . attributes = self . _get_approved_attributes ( idp , idp_policy , requester , context . state ) return self . auth_req_callback_func ( context , internal_req ) | See doc for handle_authn_request method . |
42,981 | def _get_approved_attributes ( self , idp , idp_policy , sp_entity_id , state ) : name_format = idp_policy . get_name_form ( sp_entity_id ) attrconvs = idp . config . attribute_converters idp_policy . acs = attrconvs attribute_filter = [ ] for aconv in attrconvs : if aconv . name_format == name_format : all_attributes = { v : None for v in aconv . _fro . values ( ) } attribute_filter = list ( idp_policy . restrict ( all_attributes , sp_entity_id , idp . metadata ) . keys ( ) ) break attribute_filter = self . converter . to_internal_filter ( self . attribute_profile , attribute_filter ) satosa_logging ( logger , logging . DEBUG , "Filter: %s" % attribute_filter , state ) return attribute_filter | Returns a list of approved attributes |
42,982 | def _build_idp_config_endpoints ( self , config , providers ) : idp_endpoints = [ ] for endp_category in self . endpoints : for func , endpoint in self . endpoints [ endp_category ] . items ( ) : for provider in providers : _endpoint = "{base}/{provider}/{endpoint}" . format ( base = self . base_url , provider = provider , endpoint = endpoint ) idp_endpoints . append ( ( _endpoint , func ) ) config [ "service" ] [ "idp" ] [ "endpoints" ] [ endp_category ] = idp_endpoints return config | Builds the final frontend module config |
42,983 | def _load_endpoints_to_config ( self , provider , target_entity_id , config = None ) : idp_conf = copy . deepcopy ( config or self . idp_config ) for service , endpoint in self . endpoints . items ( ) : idp_endpoints = [ ] for binding , path in endpoint . items ( ) : url = "{base}/{provider}/{target_id}/{path}" . format ( base = self . base_url , provider = provider , target_id = target_entity_id , path = path ) idp_endpoints . append ( ( url , binding ) ) idp_conf [ "service" ] [ "idp" ] [ "endpoints" ] [ service ] = idp_endpoints return idp_conf | Loads approved endpoints to the config . |
42,984 | def _load_idp_dynamic_entity_id ( self , state ) : idp_config_file = copy . deepcopy ( self . idp_config ) idp_config_file [ "entityid" ] = "{}/{}" . format ( self . idp_config [ "entityid" ] , state [ self . name ] [ "target_entity_id" ] ) idp_config = IdPConfig ( ) . load ( idp_config_file , metadata_construction = False ) return Server ( config = idp_config ) | Loads an idp server with the entity id saved in state |
42,985 | def _get_co_name_from_path ( self , context ) : url_encoded_co_name = context . path . split ( "/" ) [ 1 ] co_name = unquote_plus ( url_encoded_co_name ) return co_name | The CO name is URL encoded and obtained from the request path for a request coming into one of the standard binding endpoints . For example the HTTP - Redirect binding request path will have the format |
42,986 | def _get_co_name ( self , context ) : try : co_name = context . state [ self . name ] [ self . KEY_CO_NAME ] logger . debug ( "Found CO {} from state" . format ( co_name ) ) except KeyError : co_name = self . _get_co_name_from_path ( context ) logger . debug ( "Found CO {} from request path" . format ( co_name ) ) return co_name | Obtain the CO name previously saved in the request state or if not set use the request path obtained from the current context to determine the target CO . |
42,987 | def _add_endpoints_to_config ( self , config , co_name , backend_name ) : for service , endpoint in self . endpoints . items ( ) : idp_endpoints = [ ] for binding , path in endpoint . items ( ) : url = "{base}/{backend}/{co_name}/{path}" . format ( base = self . base_url , backend = backend_name , co_name = quote_plus ( co_name ) , path = path ) mapping = ( url , binding ) idp_endpoints . append ( mapping ) config [ "service" ] [ "idp" ] [ "endpoints" ] [ service ] = idp_endpoints return config | Use the request path from the context to determine the target backend then construct mappings from bindings to endpoints for the virtual IdP for the CO . |
42,988 | def _add_entity_id ( self , config , co_name ) : base_entity_id = config [ 'entityid' ] co_entity_id = "{}/{}" . format ( base_entity_id , quote_plus ( co_name ) ) config [ 'entityid' ] = co_entity_id return config | Use the CO name to construct the entity ID for the virtual IdP for the CO . |
42,989 | def _overlay_for_saml_metadata ( self , config , co_name ) : for co in self . config [ self . KEY_CO ] : if co [ self . KEY_ENCODEABLE_NAME ] == co_name : break key = self . KEY_ORGANIZATION if key in co : if key not in config : config [ key ] = { } for org_key in self . KEY_ORGANIZATION_KEYS : if org_key in co [ key ] : config [ key ] [ org_key ] = co [ key ] [ org_key ] key = self . KEY_CONTACT_PERSON if key in co : config [ key ] = co [ key ] return config | Overlay configuration details like organization and contact person from the front end configuration onto the IdP configuration to support SAML metadata generation . |
42,990 | def _co_names_from_config ( self ) : co_names = [ co [ self . KEY_ENCODEABLE_NAME ] for co in self . config [ self . KEY_CO ] ] return co_names | Parse the configuration for the names of the COs for which to construct virtual IdPs . |
42,991 | def _create_co_virtual_idp ( self , context ) : co_name = self . _get_co_name ( context ) context . decorate ( self . KEY_CO_NAME , co_name ) co_names = self . _co_names_from_config ( ) if co_name not in co_names : msg = "CO {} not in configured list of COs {}" . format ( co_name , co_names ) satosa_logging ( logger , logging . WARN , msg , context . state ) raise SATOSAError ( msg ) backend_name = context . target_backend idp_config = copy . deepcopy ( self . idp_config ) idp_config = self . _add_endpoints_to_config ( idp_config , co_name , backend_name ) idp_config = self . _add_entity_id ( idp_config , co_name ) pysaml2_idp_config = IdPConfig ( ) . load ( idp_config , metadata_construction = False ) server = Server ( config = pysaml2_idp_config ) return server | Create a virtual IdP to represent the CO . |
42,992 | def _authn_response ( self , context ) : state_data = context . state [ self . name ] aresp = self . consumer . parse_response ( AuthorizationResponse , info = json . dumps ( context . request ) ) self . _verify_state ( aresp , state_data , context . state ) rargs = { "code" : aresp [ "code" ] , "redirect_uri" : self . redirect_url , "state" : state_data [ "state" ] } atresp = self . consumer . do_access_token_request ( request_args = rargs , state = aresp [ "state" ] ) if "verify_accesstoken_state" not in self . config or self . config [ "verify_accesstoken_state" ] : self . _verify_state ( atresp , state_data , context . state ) user_info = self . user_information ( atresp [ "access_token" ] ) internal_response = InternalData ( auth_info = self . auth_info ( context . request ) ) internal_response . attributes = self . converter . to_internal ( self . external_type , user_info ) internal_response . subject_id = user_info [ self . user_id_attr ] del context . state [ self . name ] return self . auth_callback_func ( context , internal_response ) | Handles the authentication response from the AS . |
42,993 | def hash_data ( salt , value , hash_alg = None ) : hash_alg = hash_alg or 'sha512' hasher = hashlib . new ( hash_alg ) hasher . update ( value . encode ( 'utf-8' ) ) hasher . update ( salt . encode ( 'utf-8' ) ) value_hashed = hasher . hexdigest ( ) return value_hashed | Hashes a value together with a salt with the given hash algorithm . |
42,994 | def _construct_filter_value ( self , candidate , data ) : context = self . context attributes = data . attributes satosa_logging ( logger , logging . DEBUG , "Input attributes {}" . format ( attributes ) , context . state ) values = [ ] for identifier_name in candidate [ 'attribute_names' ] : v = attributes . get ( identifier_name , None ) if isinstance ( v , list ) : v = v [ 0 ] values . append ( v ) satosa_logging ( logger , logging . DEBUG , "Found candidate values {}" . format ( values ) , context . state ) if 'name_id' in candidate [ 'attribute_names' ] : candidate_nameid_value = None candidate_name_id_format = candidate . get ( 'name_id_format' ) name_id_value = data . subject_id name_id_format = data . subject_type if ( name_id_value and candidate_name_id_format and candidate_name_id_format == name_id_format ) : satosa_logging ( logger , logging . DEBUG , "IdP asserted NameID {}" . format ( name_id_value ) , context . state ) candidate_nameid_value = name_id_value if candidate_nameid_value not in values : satosa_logging ( logger , logging . DEBUG , "Added NameID {} to candidate values" . format ( candidate_nameid_value ) , context . state ) values . append ( candidate_nameid_value ) else : satosa_logging ( logger , logging . WARN , "NameID {} value also asserted as attribute value" . format ( candidate_nameid_value ) , context . state ) if None in values : satosa_logging ( logger , logging . DEBUG , "Candidate is missing value so skipping" , context . state ) return None if 'add_scope' in candidate : if candidate [ 'add_scope' ] == 'issuer_entityid' : scope = data . auth_info . issuer else : scope = candidate [ 'add_scope' ] satosa_logging ( logger , logging . DEBUG , "Added scope {} to values" . format ( scope ) , context . state ) values . append ( scope ) value = '' . join ( values ) satosa_logging ( logger , logging . DEBUG , "Constructed filter value {}" . format ( value ) , context . state ) return value | Construct and return a LDAP directory search filter value from the candidate identifier . |
42,995 | def _filter_config ( self , config , fields = None ) : filter_fields_default = [ 'bind_password' , 'connection' ] filter_fields = fields or filter_fields_default return dict ( map ( lambda key : ( key , '<hidden>' if key in filter_fields else config [ key ] ) , config . keys ( ) ) ) | Filter sensitive details like passwords from a configuration dictionary . |
42,996 | def _ldap_connection_factory ( self , config ) : ldap_url = config [ 'ldap_url' ] bind_dn = config [ 'bind_dn' ] bind_password = config [ 'bind_password' ] if not ldap_url : raise LdapAttributeStoreError ( "ldap_url is not configured" ) if not bind_dn : raise LdapAttributeStoreError ( "bind_dn is not configured" ) if not bind_password : raise LdapAttributeStoreError ( "bind_password is not configured" ) pool_size = config [ 'pool_size' ] pool_keepalive = config [ 'pool_keepalive' ] server = ldap3 . Server ( config [ 'ldap_url' ] ) satosa_logging ( logger , logging . DEBUG , "Creating a new LDAP connection" , None ) satosa_logging ( logger , logging . DEBUG , "Using LDAP URL {}" . format ( ldap_url ) , None ) satosa_logging ( logger , logging . DEBUG , "Using bind DN {}" . format ( bind_dn ) , None ) satosa_logging ( logger , logging . DEBUG , "Using pool size {}" . format ( pool_size ) , None ) satosa_logging ( logger , logging . DEBUG , "Using pool keep alive {}" . format ( pool_keepalive ) , None ) try : connection = ldap3 . Connection ( server , bind_dn , bind_password , auto_bind = True , client_strategy = ldap3 . REUSABLE , pool_size = pool_size , pool_keepalive = pool_keepalive ) except LDAPException as e : msg = "Caught exception when connecting to LDAP server: {}" . format ( e ) satosa_logging ( logger , logging . ERROR , msg , None ) raise LdapAttributeStoreError ( msg ) satosa_logging ( logger , logging . DEBUG , "Successfully connected to LDAP server" , None ) return connection | Use the input configuration to instantiate and return a ldap3 Connection object . |
42,997 | def _populate_attributes ( self , config , record , context , data ) : search_return_attributes = config [ 'search_return_attributes' ] for attr in search_return_attributes . keys ( ) : if attr in record [ "attributes" ] : if record [ "attributes" ] [ attr ] : data . attributes [ search_return_attributes [ attr ] ] = record [ "attributes" ] [ attr ] satosa_logging ( logger , logging . DEBUG , "Setting internal attribute {} with values {}" . format ( search_return_attributes [ attr ] , record [ "attributes" ] [ attr ] ) , context . state ) else : satosa_logging ( logger , logging . DEBUG , "Not setting internal attribute {} because value {} is null or empty" . format ( search_return_attributes [ attr ] , record [ "attributes" ] [ attr ] ) , context . state ) | Use a record found in LDAP to populate attributes . |
42,998 | def _populate_input_for_name_id ( self , config , record , context , data ) : user_id = "" user_id_from_attrs = config [ 'user_id_from_attrs' ] for attr in user_id_from_attrs : if attr in record [ "attributes" ] : value = record [ "attributes" ] [ attr ] if isinstance ( value , list ) : value . sort ( ) user_id += "" . join ( value ) satosa_logging ( logger , logging . DEBUG , "Added attribute {} with values {} to input for NameID" . format ( attr , value ) , context . state ) else : user_id += value satosa_logging ( logger , logging . DEBUG , "Added attribute {} with value {} to input for NameID" . format ( attr , value ) , context . state ) if not user_id : satosa_logging ( logger , logging . WARNING , "Input for NameID is empty so not overriding default" , context . state ) else : data . subject_id = user_id satosa_logging ( logger , logging . DEBUG , "Input for NameID is {}" . format ( data . subject_id ) , context . state ) | Use a record found in LDAP to populate input for NameID generation . |
42,999 | def _verify_dict ( self , conf ) : if not conf : raise SATOSAConfigurationError ( "Missing configuration or unknown format" ) for key in SATOSAConfig . mandatory_dict_keys : if key not in conf : raise SATOSAConfigurationError ( "Missing key '%s' in config" % key ) for key in SATOSAConfig . sensitive_dict_keys : if key not in conf and "SATOSA_{key}" . format ( key = key ) not in os . environ : raise SATOSAConfigurationError ( "Missing key '%s' from config and ENVIRONMENT" % key ) | Check that the configuration contains all necessary keys . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.