code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def default_update_function(self, n: Tuple[str, dict]) -> List[float]:
""" The default update function for a CAG node.
n: A 2-tuple containing the node name and node data.
Returns:
A list of values corresponding to the distribution of the value of
the real-valued variable representing the node.
"""
return [
self.transition_matrix_collection[i].loc[n[0]].values
@ self.s0[i].values
for i in range(self.res)
]
|
def function[default_update_function, parameter[self, n]]:
constant[ The default update function for a CAG node.
n: A 2-tuple containing the node name and node data.
Returns:
A list of values corresponding to the distribution of the value of
the real-valued variable representing the node.
]
return[<ast.ListComp object at 0x7da1b044d0f0>]
|
keyword[def] identifier[default_update_function] ( identifier[self] , identifier[n] : identifier[Tuple] [ identifier[str] , identifier[dict] ])-> identifier[List] [ identifier[float] ]:
literal[string]
keyword[return] [
identifier[self] . identifier[transition_matrix_collection] [ identifier[i] ]. identifier[loc] [ identifier[n] [ literal[int] ]]. identifier[values]
@ identifier[self] . identifier[s0] [ identifier[i] ]. identifier[values]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[res] )
]
|
def default_update_function(self, n: Tuple[str, dict]) -> List[float]:
""" The default update function for a CAG node.
n: A 2-tuple containing the node name and node data.
Returns:
A list of values corresponding to the distribution of the value of
the real-valued variable representing the node.
"""
return [self.transition_matrix_collection[i].loc[n[0]].values @ self.s0[i].values for i in range(self.res)]
|
def fasper(x, y, ofac, hifac, n_threads, MACC=4):
"""
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
"""
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return
#print x, y, hifac, ofac
nout = int(0.5*ofac*hifac*n)
nfreqt = long(ofac*hifac*n*MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = long(2*nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean())**2).sum()/(len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((x - xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC)
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE',
threads=n_threads)
wk2 = fft_wk2() * len(wk2)
else:
wk1 = ifft(wk1)*len(wk1)
wk2 = ifft(wk2)*len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0*abs(wk2)
hc2wt = rwk2/hypo2
hs2wt = iwk2/hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt)*(sqrt(0.5 - hc2wt))
den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2
cterm = (cwt*rwk1 + swt*iwk1)**2./den
sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den)
wk1 = df*(arange(nout, dtype='float') + 1.)
wk2 = (cterm + sterm)/(2.0*var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0*(nout)/ofac
prob = effm*expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy)**effm
return wk1, wk2, nout, jmax, prob
|
def function[fasper, parameter[x, y, ofac, hifac, n_threads, MACC]]:
constant[
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
]
variable[n] assign[=] call[name[long], parameter[call[name[len], parameter[name[x]]]]]
if compare[name[n] not_equal[!=] call[name[len], parameter[name[y]]]] begin[:]
call[name[print], parameter[constant[Incompatible arrays.]]]
return[None]
variable[nout] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[constant[0.5] * name[ofac]] * name[hifac]] * name[n]]]]
variable[nfreqt] assign[=] call[name[long], parameter[binary_operation[binary_operation[binary_operation[name[ofac] * name[hifac]] * name[n]] * name[MACC]]]]
variable[nfreq] assign[=] constant[64]
while compare[name[nfreq] less[<] name[nfreqt]] begin[:]
variable[nfreq] assign[=] binary_operation[constant[2] * name[nfreq]]
variable[ndim] assign[=] call[name[long], parameter[binary_operation[constant[2] * name[nfreq]]]]
variable[ave] assign[=] call[name[y].mean, parameter[]]
variable[var] assign[=] binary_operation[call[binary_operation[binary_operation[name[y] - call[name[y].mean, parameter[]]] ** constant[2]].sum, parameter[]] / binary_operation[call[name[len], parameter[name[y]]] - constant[1]]]
variable[xmin] assign[=] call[name[x].min, parameter[]]
variable[xmax] assign[=] call[name[x].max, parameter[]]
variable[xdif] assign[=] binary_operation[name[xmax] - name[xmin]]
if name[is_pyfftw] begin[:]
variable[wk1] assign[=] binary_operation[call[name[pyfftw].n_byte_align_empty, parameter[call[name[int], parameter[name[ndim]]], constant[16], constant[complex]]] * constant[0.0]]
variable[wk2] assign[=] binary_operation[call[name[pyfftw].n_byte_align_empty, parameter[call[name[int], parameter[name[ndim]]], constant[16], constant[complex]]] * constant[0.0]]
variable[fac] assign[=] binary_operation[name[ndim] / binary_operation[name[xdif] * name[ofac]]]
variable[fndim] assign[=] name[ndim]
variable[ck] assign[=] binary_operation[binary_operation[binary_operation[name[x] - name[xmin]] * name[fac]] <ast.Mod object at 0x7da2590d6920> name[fndim]]
variable[ckk] assign[=] binary_operation[binary_operation[constant[2.0] * name[ck]] <ast.Mod object at 0x7da2590d6920> name[fndim]]
for taget[name[j]] in starred[call[name[range], parameter[constant[0], name[n]]]] begin[:]
call[name[__spread__], parameter[binary_operation[call[name[y]][name[j]] - name[ave]], name[wk1], name[ndim], call[name[ck]][name[j]], name[MACC]]]
call[name[__spread__], parameter[constant[1.0], name[wk2], name[ndim], call[name[ckk]][name[j]], name[MACC]]]
if name[is_pyfftw] begin[:]
variable[fft_wk1] assign[=] call[name[pyfftw].builders.ifft, parameter[name[wk1]]]
variable[wk1] assign[=] binary_operation[call[name[fft_wk1], parameter[]] * call[name[len], parameter[name[wk1]]]]
variable[fft_wk2] assign[=] call[name[pyfftw].builders.ifft, parameter[name[wk2]]]
variable[wk2] assign[=] binary_operation[call[name[fft_wk2], parameter[]] * call[name[len], parameter[name[wk2]]]]
variable[wk1] assign[=] call[name[wk1]][<ast.Slice object at 0x7da20e9b3c10>]
variable[wk2] assign[=] call[name[wk2]][<ast.Slice object at 0x7da20e9b3130>]
variable[rwk1] assign[=] name[wk1].real
variable[iwk1] assign[=] name[wk1].imag
variable[rwk2] assign[=] name[wk2].real
variable[iwk2] assign[=] name[wk2].imag
variable[df] assign[=] binary_operation[constant[1.0] / binary_operation[name[xdif] * name[ofac]]]
variable[hypo2] assign[=] binary_operation[constant[2.0] * call[name[abs], parameter[name[wk2]]]]
variable[hc2wt] assign[=] binary_operation[name[rwk2] / name[hypo2]]
variable[hs2wt] assign[=] binary_operation[name[iwk2] / name[hypo2]]
variable[cwt] assign[=] call[name[sqrt], parameter[binary_operation[constant[0.5] + name[hc2wt]]]]
variable[swt] assign[=] binary_operation[call[name[sign], parameter[name[hs2wt]]] * call[name[sqrt], parameter[binary_operation[constant[0.5] - name[hc2wt]]]]]
variable[den] assign[=] binary_operation[binary_operation[binary_operation[constant[0.5] * name[n]] + binary_operation[name[hc2wt] * name[rwk2]]] + binary_operation[name[hs2wt] * name[iwk2]]]
variable[cterm] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[cwt] * name[rwk1]] + binary_operation[name[swt] * name[iwk1]]] ** constant[2.0]] / name[den]]
variable[sterm] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[cwt] * name[iwk1]] - binary_operation[name[swt] * name[rwk1]]] ** constant[2.0]] / binary_operation[name[n] - name[den]]]
variable[wk1] assign[=] binary_operation[name[df] * binary_operation[call[name[arange], parameter[name[nout]]] + constant[1.0]]]
variable[wk2] assign[=] binary_operation[binary_operation[name[cterm] + name[sterm]] / binary_operation[constant[2.0] * name[var]]]
variable[pmax] assign[=] call[name[wk2].max, parameter[]]
variable[jmax] assign[=] call[name[wk2].argmax, parameter[]]
variable[expy] assign[=] call[name[exp], parameter[<ast.UnaryOp object at 0x7da207f01390>]]
variable[effm] assign[=] binary_operation[binary_operation[constant[2.0] * name[nout]] / name[ofac]]
variable[prob] assign[=] binary_operation[name[effm] * name[expy]]
if compare[name[prob] greater[>] constant[0.01]] begin[:]
variable[prob] assign[=] binary_operation[constant[1.0] - binary_operation[binary_operation[constant[1.0] - name[expy]] ** name[effm]]]
return[tuple[[<ast.Name object at 0x7da207f01ab0>, <ast.Name object at 0x7da207f007c0>, <ast.Name object at 0x7da207f02530>, <ast.Name object at 0x7da207f03ee0>, <ast.Name object at 0x7da207f01990>]]]
|
keyword[def] identifier[fasper] ( identifier[x] , identifier[y] , identifier[ofac] , identifier[hifac] , identifier[n_threads] , identifier[MACC] = literal[int] ):
literal[string]
identifier[n] = identifier[long] ( identifier[len] ( identifier[x] ))
keyword[if] identifier[n] != identifier[len] ( identifier[y] ):
identifier[print] ( literal[string] )
keyword[return]
identifier[nout] = identifier[int] ( literal[int] * identifier[ofac] * identifier[hifac] * identifier[n] )
identifier[nfreqt] = identifier[long] ( identifier[ofac] * identifier[hifac] * identifier[n] * identifier[MACC] )
identifier[nfreq] = literal[int]
keyword[while] identifier[nfreq] < identifier[nfreqt] :
identifier[nfreq] = literal[int] * identifier[nfreq]
identifier[ndim] = identifier[long] ( literal[int] * identifier[nfreq] )
identifier[ave] = identifier[y] . identifier[mean] ()
identifier[var] =(( identifier[y] - identifier[y] . identifier[mean] ())** literal[int] ). identifier[sum] ()/( identifier[len] ( identifier[y] )- literal[int] )
identifier[xmin] = identifier[x] . identifier[min] ()
identifier[xmax] = identifier[x] . identifier[max] ()
identifier[xdif] = identifier[xmax] - identifier[xmin]
keyword[if] identifier[is_pyfftw] :
identifier[wk1] = identifier[pyfftw] . identifier[n_byte_align_empty] ( identifier[int] ( identifier[ndim] ), literal[int] , literal[string] )* literal[int]
identifier[wk2] = identifier[pyfftw] . identifier[n_byte_align_empty] ( identifier[int] ( identifier[ndim] ), literal[int] , literal[string] )* literal[int]
keyword[else] :
identifier[wk1] = identifier[zeros] ( identifier[ndim] , identifier[dtype] = literal[string] )
identifier[wk2] = identifier[zeros] ( identifier[ndim] , identifier[dtype] = literal[string] )
identifier[fac] = identifier[ndim] /( identifier[xdif] * identifier[ofac] )
identifier[fndim] = identifier[ndim]
identifier[ck] =(( identifier[x] - identifier[xmin] )* identifier[fac] )% identifier[fndim]
identifier[ckk] =( literal[int] * identifier[ck] )% identifier[fndim]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[n] ):
identifier[__spread__] ( identifier[y] [ identifier[j] ]- identifier[ave] , identifier[wk1] , identifier[ndim] , identifier[ck] [ identifier[j] ], identifier[MACC] )
identifier[__spread__] ( literal[int] , identifier[wk2] , identifier[ndim] , identifier[ckk] [ identifier[j] ], identifier[MACC] )
keyword[if] identifier[is_pyfftw] :
identifier[fft_wk1] = identifier[pyfftw] . identifier[builders] . identifier[ifft] ( identifier[wk1] , identifier[planner_effort] = literal[string] ,
identifier[threads] = identifier[n_threads] )
identifier[wk1] = identifier[fft_wk1] ()* identifier[len] ( identifier[wk1] )
identifier[fft_wk2] = identifier[pyfftw] . identifier[builders] . identifier[ifft] ( identifier[wk2] , identifier[planner_effort] = literal[string] ,
identifier[threads] = identifier[n_threads] )
identifier[wk2] = identifier[fft_wk2] ()* identifier[len] ( identifier[wk2] )
keyword[else] :
identifier[wk1] = identifier[ifft] ( identifier[wk1] )* identifier[len] ( identifier[wk1] )
identifier[wk2] = identifier[ifft] ( identifier[wk2] )* identifier[len] ( identifier[wk1] )
identifier[wk1] = identifier[wk1] [ literal[int] : identifier[nout] + literal[int] ]
identifier[wk2] = identifier[wk2] [ literal[int] : identifier[nout] + literal[int] ]
identifier[rwk1] = identifier[wk1] . identifier[real]
identifier[iwk1] = identifier[wk1] . identifier[imag]
identifier[rwk2] = identifier[wk2] . identifier[real]
identifier[iwk2] = identifier[wk2] . identifier[imag]
identifier[df] = literal[int] /( identifier[xdif] * identifier[ofac] )
identifier[hypo2] = literal[int] * identifier[abs] ( identifier[wk2] )
identifier[hc2wt] = identifier[rwk2] / identifier[hypo2]
identifier[hs2wt] = identifier[iwk2] / identifier[hypo2]
identifier[cwt] = identifier[sqrt] ( literal[int] + identifier[hc2wt] )
identifier[swt] = identifier[sign] ( identifier[hs2wt] )*( identifier[sqrt] ( literal[int] - identifier[hc2wt] ))
identifier[den] = literal[int] * identifier[n] + identifier[hc2wt] * identifier[rwk2] + identifier[hs2wt] * identifier[iwk2]
identifier[cterm] =( identifier[cwt] * identifier[rwk1] + identifier[swt] * identifier[iwk1] )** literal[int] / identifier[den]
identifier[sterm] =( identifier[cwt] * identifier[iwk1] - identifier[swt] * identifier[rwk1] )** literal[int] /( identifier[n] - identifier[den] )
identifier[wk1] = identifier[df] *( identifier[arange] ( identifier[nout] , identifier[dtype] = literal[string] )+ literal[int] )
identifier[wk2] =( identifier[cterm] + identifier[sterm] )/( literal[int] * identifier[var] )
identifier[pmax] = identifier[wk2] . identifier[max] ()
identifier[jmax] = identifier[wk2] . identifier[argmax] ()
identifier[expy] = identifier[exp] (- identifier[pmax] )
identifier[effm] = literal[int] *( identifier[nout] )/ identifier[ofac]
identifier[prob] = identifier[effm] * identifier[expy]
keyword[if] identifier[prob] > literal[int] :
identifier[prob] = literal[int] -( literal[int] - identifier[expy] )** identifier[effm]
keyword[return] identifier[wk1] , identifier[wk2] , identifier[nout] , identifier[jmax] , identifier[prob]
|
def fasper(x, y, ofac, hifac, n_threads, MACC=4):
"""
Given abscissas x (which need not be equally spaced) and ordinates
y, and given a desired oversampling factor ofac (a typical value
being 4 or larger). this routine creates an array wk1 with a
sequence of nout increasing frequencies (not angular frequencies)
up to hifac times the "average" Nyquist frequency, and creates
an array wk2 with the values of the Lomb normalized periodogram at
those frequencies. The arrays x and y are not altered. This
routine also returns jmax such that wk2(jmax) is the maximum
element in wk2, and prob, an estimate of the significance of that
maximum against the hypothesis of random noise. A small value of prob
indicates that a significant periodic signal is present.
Reference:
Press, W. H. & Rybicki, G. B. 1989
ApJ vol. 338, p. 277-280.
Fast algorithm for spectral analysis of unevenly sampled data
(1989ApJ...338..277P)
Arguments:
X : Abscissas array, (e.g. an array of times).
Y : Ordinates array, (e.g. corresponding counts).
Ofac : Oversampling factor.
Hifac : Hifac * "average" Nyquist frequency = highest frequency
for which values of the Lomb normalized periodogram will
be calculated.
n_threads : number of threads to use.
Returns:
Wk1 : An array of Lomb periodogram frequencies.
Wk2 : An array of corresponding values of the Lomb periodogram.
Nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
Jmax : The array index corresponding to the MAX( Wk2 ).
Prob : False Alarm Probability of the largest Periodogram value
MACC : Number of interpolation points per 1/4 cycle
of highest frequency
History:
02/23/2009, v1.0, MF
Translation of IDL code (orig. Numerical recipies)
"""
#Check dimensions of input arrays
n = long(len(x))
if n != len(y):
print('Incompatible arrays.')
return # depends on [control=['if'], data=[]]
#print x, y, hifac, ofac
nout = int(0.5 * ofac * hifac * n)
nfreqt = long(ofac * hifac * n * MACC) #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2 * nfreq # depends on [control=['while'], data=['nfreq']]
ndim = long(2 * nfreq)
#Compute the mean, variance
ave = y.mean()
##sample variance because the divisor is N-1
var = ((y - y.mean()) ** 2).sum() / (len(y) - 1)
# and range of the data.
xmin = x.min()
xmax = x.max()
xdif = xmax - xmin
#extrapolate the data into the workspaces
if is_pyfftw:
wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.0
wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, 'complex') * 0.0 # depends on [control=['if'], data=[]]
else:
wk1 = zeros(ndim, dtype='complex')
wk2 = zeros(ndim, dtype='complex')
fac = ndim / (xdif * ofac)
fndim = ndim
ck = (x - xmin) * fac % fndim
ckk = 2.0 * ck % fndim
for j in range(0, n):
__spread__(y[j] - ave, wk1, ndim, ck[j], MACC)
__spread__(1.0, wk2, ndim, ckk[j], MACC) # depends on [control=['for'], data=['j']]
#Take the Fast Fourier Transforms.
if is_pyfftw:
fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort='FFTW_ESTIMATE', threads=n_threads)
wk1 = fft_wk1() * len(wk1)
fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort='FFTW_ESTIMATE', threads=n_threads)
wk2 = fft_wk2() * len(wk2) # depends on [control=['if'], data=[]]
else:
wk1 = ifft(wk1) * len(wk1)
wk2 = ifft(wk2) * len(wk1)
wk1 = wk1[1:nout + 1]
wk2 = wk2[1:nout + 1]
rwk1 = wk1.real
iwk1 = wk1.imag
rwk2 = wk2.real
iwk2 = wk2.imag
df = 1.0 / (xdif * ofac)
#Compute the Lomb value for each frequency
hypo2 = 2.0 * abs(wk2)
hc2wt = rwk2 / hypo2
hs2wt = iwk2 / hypo2
cwt = sqrt(0.5 + hc2wt)
swt = sign(hs2wt) * sqrt(0.5 - hc2wt)
den = 0.5 * n + hc2wt * rwk2 + hs2wt * iwk2
cterm = (cwt * rwk1 + swt * iwk1) ** 2.0 / den
sterm = (cwt * iwk1 - swt * rwk1) ** 2.0 / (n - den)
wk1 = df * (arange(nout, dtype='float') + 1.0)
wk2 = (cterm + sterm) / (2.0 * var)
pmax = wk2.max()
jmax = wk2.argmax()
#Significance estimation
#expy = exp(-wk2)
#effm = 2.0*(nout)/ofac
#sig = effm*expy
#ind = (sig > 0.01).nonzero()
#sig[ind] = 1.0-(1.0-expy[ind])**effm
#Estimate significance of largest peak value
expy = exp(-pmax)
effm = 2.0 * nout / ofac
prob = effm * expy
if prob > 0.01:
prob = 1.0 - (1.0 - expy) ** effm # depends on [control=['if'], data=['prob']]
return (wk1, wk2, nout, jmax, prob)
|
def execute_deploy_from_linked_clone(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_from_linked_clone(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result
|
def function[execute_deploy_from_linked_clone, parameter[self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager]]:
constant[
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
]
call[name[self]._prepare_deployed_apps_folder, parameter[name[deployment_params], name[si], name[logger], name[folder_manager], name[vcenter_data_model]]]
variable[deploy_result] assign[=] call[name[self].deployer.deploy_from_linked_clone, parameter[name[si], name[logger], name[deployment_params], name[vcenter_data_model], name[reservation_id], name[cancellation_context]]]
return[name[deploy_result]]
|
keyword[def] identifier[execute_deploy_from_linked_clone] ( identifier[self] , identifier[si] , identifier[logger] , identifier[vcenter_data_model] , identifier[reservation_id] , identifier[deployment_params] , identifier[cancellation_context] , identifier[folder_manager] ):
literal[string]
identifier[self] . identifier[_prepare_deployed_apps_folder] ( identifier[deployment_params] , identifier[si] , identifier[logger] , identifier[folder_manager] , identifier[vcenter_data_model] )
identifier[deploy_result] = identifier[self] . identifier[deployer] . identifier[deploy_from_linked_clone] ( identifier[si] , identifier[logger] , identifier[deployment_params] , identifier[vcenter_data_model] ,
identifier[reservation_id] , identifier[cancellation_context] )
keyword[return] identifier[deploy_result]
|
def execute_deploy_from_linked_clone(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_from_linked_clone(si, logger, deployment_params, vcenter_data_model, reservation_id, cancellation_context)
return deploy_result
|
def coverage(ctx, opts=""):
"""
Execute all tests (normal and slow) with coverage enabled.
"""
return test(ctx, coverage=True, include_slow=True, opts=opts)
|
def function[coverage, parameter[ctx, opts]]:
constant[
Execute all tests (normal and slow) with coverage enabled.
]
return[call[name[test], parameter[name[ctx]]]]
|
keyword[def] identifier[coverage] ( identifier[ctx] , identifier[opts] = literal[string] ):
literal[string]
keyword[return] identifier[test] ( identifier[ctx] , identifier[coverage] = keyword[True] , identifier[include_slow] = keyword[True] , identifier[opts] = identifier[opts] )
|
def coverage(ctx, opts=''):
"""
Execute all tests (normal and slow) with coverage enabled.
"""
return test(ctx, coverage=True, include_slow=True, opts=opts)
|
def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
self.add(tensor, key=key)
|
def function[load, parameter[self, path]]:
constant[
Loads all tensors from a file defined by *path* and adds them to the root set.
]
variable[path] assign[=] call[name[os].path.expandvars, parameter[call[name[os].path.expanduser, parameter[name[path]]]]]
with call[name[open], parameter[name[path], constant[rb]]] begin[:]
variable[roots] assign[=] call[name[pickle].load, parameter[name[f]]]
for taget[tuple[[<ast.Name object at 0x7da1b053ba60>, <ast.Name object at 0x7da1b053a680>]]] in starred[call[name[roots].items, parameter[]]] begin[:]
call[name[self].add, parameter[name[tensor]]]
|
keyword[def] identifier[load] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[expandvars] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[path] ))
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[roots] = identifier[pickle] . identifier[load] ( identifier[f] )
keyword[for] identifier[key] , identifier[tensor] keyword[in] identifier[roots] . identifier[items] ():
identifier[self] . identifier[add] ( identifier[tensor] , identifier[key] = identifier[key] )
|
def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, 'rb') as f:
roots = pickle.load(f) # depends on [control=['with'], data=['f']]
for (key, tensor) in roots.items():
self.add(tensor, key=key) # depends on [control=['for'], data=[]]
|
def nested_to_ring(nested_index, nside):
"""
Convert a HEALPix 'nested' index to a HEALPix 'ring' index
Parameters
----------
nested_index : int or `~numpy.ndarray`
Healpix index using the 'nested' ordering
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
Returns
-------
ring_index : int or `~numpy.ndarray`
Healpix index using the 'ring' ordering
"""
nside = np.asarray(nside, dtype=np.intc)
return _core.nested_to_ring(nested_index, nside)
|
def function[nested_to_ring, parameter[nested_index, nside]]:
constant[
Convert a HEALPix 'nested' index to a HEALPix 'ring' index
Parameters
----------
nested_index : int or `~numpy.ndarray`
Healpix index using the 'nested' ordering
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
Returns
-------
ring_index : int or `~numpy.ndarray`
Healpix index using the 'ring' ordering
]
variable[nside] assign[=] call[name[np].asarray, parameter[name[nside]]]
return[call[name[_core].nested_to_ring, parameter[name[nested_index], name[nside]]]]
|
keyword[def] identifier[nested_to_ring] ( identifier[nested_index] , identifier[nside] ):
literal[string]
identifier[nside] = identifier[np] . identifier[asarray] ( identifier[nside] , identifier[dtype] = identifier[np] . identifier[intc] )
keyword[return] identifier[_core] . identifier[nested_to_ring] ( identifier[nested_index] , identifier[nside] )
|
def nested_to_ring(nested_index, nside):
"""
Convert a HEALPix 'nested' index to a HEALPix 'ring' index
Parameters
----------
nested_index : int or `~numpy.ndarray`
Healpix index using the 'nested' ordering
nside : int or `~numpy.ndarray`
Number of pixels along the side of each of the 12 top-level HEALPix tiles
Returns
-------
ring_index : int or `~numpy.ndarray`
Healpix index using the 'ring' ordering
"""
nside = np.asarray(nside, dtype=np.intc)
return _core.nested_to_ring(nested_index, nside)
|
def try_call(self, client_data=None, api_data=None, aux_data=None, *args, **kwargs):
"""
Calls the request catching all exceptions
:param client_data:
:param api_data:
:param aux_data:
:param args:
:param kwargs:
:return:
"""
try:
return self.call(client_data, api_data, aux_data, *args, **kwargs)
except Exception as e:
self.last_exception = RequestFailed(cause=e)
return None
|
def function[try_call, parameter[self, client_data, api_data, aux_data]]:
constant[
Calls the request catching all exceptions
:param client_data:
:param api_data:
:param aux_data:
:param args:
:param kwargs:
:return:
]
<ast.Try object at 0x7da1b2346440>
return[constant[None]]
|
keyword[def] identifier[try_call] ( identifier[self] , identifier[client_data] = keyword[None] , identifier[api_data] = keyword[None] , identifier[aux_data] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[call] ( identifier[client_data] , identifier[api_data] , identifier[aux_data] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[last_exception] = identifier[RequestFailed] ( identifier[cause] = identifier[e] )
keyword[return] keyword[None]
|
def try_call(self, client_data=None, api_data=None, aux_data=None, *args, **kwargs):
"""
Calls the request catching all exceptions
:param client_data:
:param api_data:
:param aux_data:
:param args:
:param kwargs:
:return:
"""
try:
return self.call(client_data, api_data, aux_data, *args, **kwargs) # depends on [control=['try'], data=[]]
except Exception as e:
self.last_exception = RequestFailed(cause=e) # depends on [control=['except'], data=['e']]
return None
|
def run(query, container=Null):
"""
THIS FUNCTION IS SIMPLY SWITCHING BASED ON THE query["from"] CONTAINER,
BUT IT IS ALSO PROCESSING A list CONTAINER; SEPARATE TO A ListContainer
"""
if container == None:
container = wrap(query)["from"]
query_op = QueryOp.wrap(query, container=container, namespace=container.schema)
else:
query_op = QueryOp.wrap(query, container, container.namespace)
if container == None:
from jx_python.containers.list_usingPythonList import DUAL
return DUAL.query(query_op)
elif isinstance(container, Container):
return container.query(query_op)
elif is_many(container):
container = wrap(list(container))
elif isinstance(container, Cube):
if is_aggs(query_op):
return cube_aggs(container, query_op)
elif is_op(container, QueryOp):
container = run(container)
elif is_data(container):
query = container
container = query["from"]
container = run(QueryOp.wrap(query, container, container.namespace), container)
else:
Log.error(
"Do not know how to handle {{type}}", type=container.__class__.__name__
)
if is_aggs(query_op):
container = list_aggs(container, query_op)
else: # SETOP
if query_op.where is not TRUE:
container = filter(container, query_op.where)
if query_op.sort:
container = sort(container, query_op.sort, already_normalized=True)
if query_op.select:
container = select(container, query_op.select)
if query_op.window:
if isinstance(container, Cube):
container = list(container.values())
for param in query_op.window:
window(container, param)
# AT THIS POINT frum IS IN LIST FORMAT, NOW PACKAGE RESULT
if query_op.format == "cube":
container = convert.list2cube(container)
elif query_op.format == "table":
container = convert.list2table(container)
container.meta.format = "table"
else:
container = wrap({"meta": {"format": "list"}, "data": container})
return container
|
def function[run, parameter[query, container]]:
constant[
THIS FUNCTION IS SIMPLY SWITCHING BASED ON THE query["from"] CONTAINER,
BUT IT IS ALSO PROCESSING A list CONTAINER; SEPARATE TO A ListContainer
]
if compare[name[container] equal[==] constant[None]] begin[:]
variable[container] assign[=] call[call[name[wrap], parameter[name[query]]]][constant[from]]
variable[query_op] assign[=] call[name[QueryOp].wrap, parameter[name[query]]]
if compare[name[container] equal[==] constant[None]] begin[:]
from relative_module[jx_python.containers.list_usingPythonList] import module[DUAL]
return[call[name[DUAL].query, parameter[name[query_op]]]]
if call[name[is_aggs], parameter[name[query_op]]] begin[:]
variable[container] assign[=] call[name[list_aggs], parameter[name[container], name[query_op]]]
if name[query_op].window begin[:]
if call[name[isinstance], parameter[name[container], name[Cube]]] begin[:]
variable[container] assign[=] call[name[list], parameter[call[name[container].values, parameter[]]]]
for taget[name[param]] in starred[name[query_op].window] begin[:]
call[name[window], parameter[name[container], name[param]]]
if compare[name[query_op].format equal[==] constant[cube]] begin[:]
variable[container] assign[=] call[name[convert].list2cube, parameter[name[container]]]
return[name[container]]
|
keyword[def] identifier[run] ( identifier[query] , identifier[container] = identifier[Null] ):
literal[string]
keyword[if] identifier[container] == keyword[None] :
identifier[container] = identifier[wrap] ( identifier[query] )[ literal[string] ]
identifier[query_op] = identifier[QueryOp] . identifier[wrap] ( identifier[query] , identifier[container] = identifier[container] , identifier[namespace] = identifier[container] . identifier[schema] )
keyword[else] :
identifier[query_op] = identifier[QueryOp] . identifier[wrap] ( identifier[query] , identifier[container] , identifier[container] . identifier[namespace] )
keyword[if] identifier[container] == keyword[None] :
keyword[from] identifier[jx_python] . identifier[containers] . identifier[list_usingPythonList] keyword[import] identifier[DUAL]
keyword[return] identifier[DUAL] . identifier[query] ( identifier[query_op] )
keyword[elif] identifier[isinstance] ( identifier[container] , identifier[Container] ):
keyword[return] identifier[container] . identifier[query] ( identifier[query_op] )
keyword[elif] identifier[is_many] ( identifier[container] ):
identifier[container] = identifier[wrap] ( identifier[list] ( identifier[container] ))
keyword[elif] identifier[isinstance] ( identifier[container] , identifier[Cube] ):
keyword[if] identifier[is_aggs] ( identifier[query_op] ):
keyword[return] identifier[cube_aggs] ( identifier[container] , identifier[query_op] )
keyword[elif] identifier[is_op] ( identifier[container] , identifier[QueryOp] ):
identifier[container] = identifier[run] ( identifier[container] )
keyword[elif] identifier[is_data] ( identifier[container] ):
identifier[query] = identifier[container]
identifier[container] = identifier[query] [ literal[string] ]
identifier[container] = identifier[run] ( identifier[QueryOp] . identifier[wrap] ( identifier[query] , identifier[container] , identifier[container] . identifier[namespace] ), identifier[container] )
keyword[else] :
identifier[Log] . identifier[error] (
literal[string] , identifier[type] = identifier[container] . identifier[__class__] . identifier[__name__]
)
keyword[if] identifier[is_aggs] ( identifier[query_op] ):
identifier[container] = identifier[list_aggs] ( identifier[container] , identifier[query_op] )
keyword[else] :
keyword[if] identifier[query_op] . identifier[where] keyword[is] keyword[not] identifier[TRUE] :
identifier[container] = identifier[filter] ( identifier[container] , identifier[query_op] . identifier[where] )
keyword[if] identifier[query_op] . identifier[sort] :
identifier[container] = identifier[sort] ( identifier[container] , identifier[query_op] . identifier[sort] , identifier[already_normalized] = keyword[True] )
keyword[if] identifier[query_op] . identifier[select] :
identifier[container] = identifier[select] ( identifier[container] , identifier[query_op] . identifier[select] )
keyword[if] identifier[query_op] . identifier[window] :
keyword[if] identifier[isinstance] ( identifier[container] , identifier[Cube] ):
identifier[container] = identifier[list] ( identifier[container] . identifier[values] ())
keyword[for] identifier[param] keyword[in] identifier[query_op] . identifier[window] :
identifier[window] ( identifier[container] , identifier[param] )
keyword[if] identifier[query_op] . identifier[format] == literal[string] :
identifier[container] = identifier[convert] . identifier[list2cube] ( identifier[container] )
keyword[elif] identifier[query_op] . identifier[format] == literal[string] :
identifier[container] = identifier[convert] . identifier[list2table] ( identifier[container] )
identifier[container] . identifier[meta] . identifier[format] = literal[string]
keyword[else] :
identifier[container] = identifier[wrap] ({ literal[string] :{ literal[string] : literal[string] }, literal[string] : identifier[container] })
keyword[return] identifier[container]
|
def run(query, container=Null):
"""
THIS FUNCTION IS SIMPLY SWITCHING BASED ON THE query["from"] CONTAINER,
BUT IT IS ALSO PROCESSING A list CONTAINER; SEPARATE TO A ListContainer
"""
if container == None:
container = wrap(query)['from']
query_op = QueryOp.wrap(query, container=container, namespace=container.schema) # depends on [control=['if'], data=['container']]
else:
query_op = QueryOp.wrap(query, container, container.namespace)
if container == None:
from jx_python.containers.list_usingPythonList import DUAL
return DUAL.query(query_op) # depends on [control=['if'], data=[]]
elif isinstance(container, Container):
return container.query(query_op) # depends on [control=['if'], data=[]]
elif is_many(container):
container = wrap(list(container)) # depends on [control=['if'], data=[]]
elif isinstance(container, Cube):
if is_aggs(query_op):
return cube_aggs(container, query_op) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif is_op(container, QueryOp):
container = run(container) # depends on [control=['if'], data=[]]
elif is_data(container):
query = container
container = query['from']
container = run(QueryOp.wrap(query, container, container.namespace), container) # depends on [control=['if'], data=[]]
else:
Log.error('Do not know how to handle {{type}}', type=container.__class__.__name__)
if is_aggs(query_op):
container = list_aggs(container, query_op) # depends on [control=['if'], data=[]]
else: # SETOP
if query_op.where is not TRUE:
container = filter(container, query_op.where) # depends on [control=['if'], data=[]]
if query_op.sort:
container = sort(container, query_op.sort, already_normalized=True) # depends on [control=['if'], data=[]]
if query_op.select:
container = select(container, query_op.select) # depends on [control=['if'], data=[]]
if query_op.window:
if isinstance(container, Cube):
container = list(container.values()) # depends on [control=['if'], data=[]]
for param in query_op.window:
window(container, param) # depends on [control=['for'], data=['param']] # depends on [control=['if'], data=[]]
# AT THIS POINT frum IS IN LIST FORMAT, NOW PACKAGE RESULT
if query_op.format == 'cube':
container = convert.list2cube(container) # depends on [control=['if'], data=[]]
elif query_op.format == 'table':
container = convert.list2table(container)
container.meta.format = 'table' # depends on [control=['if'], data=[]]
else:
container = wrap({'meta': {'format': 'list'}, 'data': container})
return container
|
def pre_change_receiver(self, instance: Model, action: Action):
"""
Entry point for triggering the old_binding from save signals.
"""
if action == Action.CREATE:
group_names = set()
else:
group_names = set(self.group_names(instance))
# use a thread local dict to be safe...
if not hasattr(instance, '__instance_groups'):
instance.__instance_groups = threading.local()
instance.__instance_groups.observers = {}
if not hasattr(instance.__instance_groups, 'observers'):
instance.__instance_groups.observers = {}
instance.__instance_groups.observers[self] = group_names
|
def function[pre_change_receiver, parameter[self, instance, action]]:
constant[
Entry point for triggering the old_binding from save signals.
]
if compare[name[action] equal[==] name[Action].CREATE] begin[:]
variable[group_names] assign[=] call[name[set], parameter[]]
if <ast.UnaryOp object at 0x7da18f812410> begin[:]
name[instance].__instance_groups assign[=] call[name[threading].local, parameter[]]
name[instance].__instance_groups.observers assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da18f811ba0> begin[:]
name[instance].__instance_groups.observers assign[=] dictionary[[], []]
call[name[instance].__instance_groups.observers][name[self]] assign[=] name[group_names]
|
keyword[def] identifier[pre_change_receiver] ( identifier[self] , identifier[instance] : identifier[Model] , identifier[action] : identifier[Action] ):
literal[string]
keyword[if] identifier[action] == identifier[Action] . identifier[CREATE] :
identifier[group_names] = identifier[set] ()
keyword[else] :
identifier[group_names] = identifier[set] ( identifier[self] . identifier[group_names] ( identifier[instance] ))
keyword[if] keyword[not] identifier[hasattr] ( identifier[instance] , literal[string] ):
identifier[instance] . identifier[__instance_groups] = identifier[threading] . identifier[local] ()
identifier[instance] . identifier[__instance_groups] . identifier[observers] ={}
keyword[if] keyword[not] identifier[hasattr] ( identifier[instance] . identifier[__instance_groups] , literal[string] ):
identifier[instance] . identifier[__instance_groups] . identifier[observers] ={}
identifier[instance] . identifier[__instance_groups] . identifier[observers] [ identifier[self] ]= identifier[group_names]
|
def pre_change_receiver(self, instance: Model, action: Action):
"""
Entry point for triggering the old_binding from save signals.
"""
if action == Action.CREATE:
group_names = set() # depends on [control=['if'], data=[]]
else:
group_names = set(self.group_names(instance))
# use a thread local dict to be safe...
if not hasattr(instance, '__instance_groups'):
instance.__instance_groups = threading.local()
instance.__instance_groups.observers = {} # depends on [control=['if'], data=[]]
if not hasattr(instance.__instance_groups, 'observers'):
instance.__instance_groups.observers = {} # depends on [control=['if'], data=[]]
instance.__instance_groups.observers[self] = group_names
|
def generate_dylib_load_command(header, libary_install_name):
""" Generates a LC_LOAD_DYLIB command for the given header and a library install path.
Note: the header must already contain at least one LC_LOAD_DYLIB command (see code comments).
Returns a ready-for-use load_command in terms of macholib.
"""
# One can not simply create instances of `dylib_command` and `load_command` classes,
# because that's just not the way macholib works. If we try then all we'll get is a bunch
# of endian (big/little) issues when these objects are serialized into a file.
# BUT THAT'S PROGRAMMING RIGHT?
# So instead I'll iterate *existing* load commands, find a dyld_command, copy it
# and modify this copy. This existing command is said to be fully initialized.
lc = None
cmd = None
for (command, internal_cmd, data) in header.commands:
if (command.cmd == LC_LOAD_DYLIB) and isinstance(internal_cmd, dylib_command):
lc = deepcopy(command)
cmd = deepcopy(internal_cmd)
break
if not lc or not cmd:
raise Exception("Invalid Mach-O file. I mean, there must be at least one LC_LOAD_DYLIB load command.")
return None
# Well, now we just replace everything with our own stuff
cmd.timestamp = 0
cmd.current_version = cmd.compatibility_version = 0x1000
# Since we store the library's path just after the load command itself, we need to find out it's offset.
base = sizeof(load_command) + sizeof(dylib_command)
# `name` is rather bad name for this property: actually it means a path string offset
cmd.name = base
# Also the whole thing must be aligned by 4 bytes on 32-bit arches and by 8 bytes on 64-bit arches
align = 4 if header.header.magic == MH_MAGIC else 8
aligned_name = libary_install_name + (b'\x00' * (align - (len(libary_install_name) % align)))
# So now we finally can say what size this load_command is
lc.cmdsize = base + len(aligned_name)
return (lc, cmd, aligned_name)
|
def function[generate_dylib_load_command, parameter[header, libary_install_name]]:
constant[ Generates a LC_LOAD_DYLIB command for the given header and a library install path.
Note: the header must already contain at least one LC_LOAD_DYLIB command (see code comments).
Returns a ready-for-use load_command in terms of macholib.
]
variable[lc] assign[=] constant[None]
variable[cmd] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da207f00c10>, <ast.Name object at 0x7da207f02710>, <ast.Name object at 0x7da207f012a0>]]] in starred[name[header].commands] begin[:]
if <ast.BoolOp object at 0x7da207f025c0> begin[:]
variable[lc] assign[=] call[name[deepcopy], parameter[name[command]]]
variable[cmd] assign[=] call[name[deepcopy], parameter[name[internal_cmd]]]
break
if <ast.BoolOp object at 0x7da207f00d30> begin[:]
<ast.Raise object at 0x7da207f03490>
return[constant[None]]
name[cmd].timestamp assign[=] constant[0]
name[cmd].current_version assign[=] constant[4096]
variable[base] assign[=] binary_operation[call[name[sizeof], parameter[name[load_command]]] + call[name[sizeof], parameter[name[dylib_command]]]]
name[cmd].name assign[=] name[base]
variable[align] assign[=] <ast.IfExp object at 0x7da20c7951b0>
variable[aligned_name] assign[=] binary_operation[name[libary_install_name] + binary_operation[constant[b'\x00'] * binary_operation[name[align] - binary_operation[call[name[len], parameter[name[libary_install_name]]] <ast.Mod object at 0x7da2590d6920> name[align]]]]]
name[lc].cmdsize assign[=] binary_operation[name[base] + call[name[len], parameter[name[aligned_name]]]]
return[tuple[[<ast.Name object at 0x7da20c7960b0>, <ast.Name object at 0x7da20c7940d0>, <ast.Name object at 0x7da20c796080>]]]
|
keyword[def] identifier[generate_dylib_load_command] ( identifier[header] , identifier[libary_install_name] ):
literal[string]
identifier[lc] = keyword[None]
identifier[cmd] = keyword[None]
keyword[for] ( identifier[command] , identifier[internal_cmd] , identifier[data] ) keyword[in] identifier[header] . identifier[commands] :
keyword[if] ( identifier[command] . identifier[cmd] == identifier[LC_LOAD_DYLIB] ) keyword[and] identifier[isinstance] ( identifier[internal_cmd] , identifier[dylib_command] ):
identifier[lc] = identifier[deepcopy] ( identifier[command] )
identifier[cmd] = identifier[deepcopy] ( identifier[internal_cmd] )
keyword[break]
keyword[if] keyword[not] identifier[lc] keyword[or] keyword[not] identifier[cmd] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] keyword[None]
identifier[cmd] . identifier[timestamp] = literal[int]
identifier[cmd] . identifier[current_version] = identifier[cmd] . identifier[compatibility_version] = literal[int]
identifier[base] = identifier[sizeof] ( identifier[load_command] )+ identifier[sizeof] ( identifier[dylib_command] )
identifier[cmd] . identifier[name] = identifier[base]
identifier[align] = literal[int] keyword[if] identifier[header] . identifier[header] . identifier[magic] == identifier[MH_MAGIC] keyword[else] literal[int]
identifier[aligned_name] = identifier[libary_install_name] +( literal[string] *( identifier[align] -( identifier[len] ( identifier[libary_install_name] )% identifier[align] )))
identifier[lc] . identifier[cmdsize] = identifier[base] + identifier[len] ( identifier[aligned_name] )
keyword[return] ( identifier[lc] , identifier[cmd] , identifier[aligned_name] )
|
def generate_dylib_load_command(header, libary_install_name):
""" Generates a LC_LOAD_DYLIB command for the given header and a library install path.
Note: the header must already contain at least one LC_LOAD_DYLIB command (see code comments).
Returns a ready-for-use load_command in terms of macholib.
""" # One can not simply create instances of `dylib_command` and `load_command` classes,
# because that's just not the way macholib works. If we try then all we'll get is a bunch
# of endian (big/little) issues when these objects are serialized into a file.
# BUT THAT'S PROGRAMMING RIGHT?
# So instead I'll iterate *existing* load commands, find a dyld_command, copy it
# and modify this copy. This existing command is said to be fully initialized.
lc = None
cmd = None
for (command, internal_cmd, data) in header.commands:
if command.cmd == LC_LOAD_DYLIB and isinstance(internal_cmd, dylib_command):
lc = deepcopy(command)
cmd = deepcopy(internal_cmd)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not lc or not cmd:
raise Exception('Invalid Mach-O file. I mean, there must be at least one LC_LOAD_DYLIB load command.')
return None # depends on [control=['if'], data=[]] # Well, now we just replace everything with our own stuff
cmd.timestamp = 0
cmd.current_version = cmd.compatibility_version = 4096 # Since we store the library's path just after the load command itself, we need to find out it's offset.
base = sizeof(load_command) + sizeof(dylib_command) # `name` is rather bad name for this property: actually it means a path string offset
cmd.name = base # Also the whole thing must be aligned by 4 bytes on 32-bit arches and by 8 bytes on 64-bit arches
align = 4 if header.header.magic == MH_MAGIC else 8
aligned_name = libary_install_name + b'\x00' * (align - len(libary_install_name) % align) # So now we finally can say what size this load_command is
lc.cmdsize = base + len(aligned_name)
return (lc, cmd, aligned_name)
|
def _try_parse_formula(self, compound_id, s):
"""Try to parse the given compound formula string.
Logs a warning if the formula could not be parsed.
"""
s = s.strip()
if s == '':
return None
try:
# Do not return the parsed formula. For now it is better to keep
# the original formula string unchanged in all cases.
formula.Formula.parse(s)
except formula.ParseError:
logger.warning('Unable to parse compound formula {}: {}'.format(
compound_id, s))
return s
|
def function[_try_parse_formula, parameter[self, compound_id, s]]:
constant[Try to parse the given compound formula string.
Logs a warning if the formula could not be parsed.
]
variable[s] assign[=] call[name[s].strip, parameter[]]
if compare[name[s] equal[==] constant[]] begin[:]
return[constant[None]]
<ast.Try object at 0x7da20c76fa90>
return[name[s]]
|
keyword[def] identifier[_try_parse_formula] ( identifier[self] , identifier[compound_id] , identifier[s] ):
literal[string]
identifier[s] = identifier[s] . identifier[strip] ()
keyword[if] identifier[s] == literal[string] :
keyword[return] keyword[None]
keyword[try] :
identifier[formula] . identifier[Formula] . identifier[parse] ( identifier[s] )
keyword[except] identifier[formula] . identifier[ParseError] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] (
identifier[compound_id] , identifier[s] ))
keyword[return] identifier[s]
|
def _try_parse_formula(self, compound_id, s):
"""Try to parse the given compound formula string.
Logs a warning if the formula could not be parsed.
"""
s = s.strip()
if s == '':
return None # depends on [control=['if'], data=[]]
try:
# Do not return the parsed formula. For now it is better to keep
# the original formula string unchanged in all cases.
formula.Formula.parse(s) # depends on [control=['try'], data=[]]
except formula.ParseError:
logger.warning('Unable to parse compound formula {}: {}'.format(compound_id, s)) # depends on [control=['except'], data=[]]
return s
|
def xml_to_metrics(xmlstr, object_type):
'''Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
return_obj = object_type()
members = dict(vars(return_obj))
# Only one entry here
for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc,
'entry'):
for node in _MinidomXmlToObject.get_children_from_path(xml_entry,
'content',
'properties'):
for name in members:
xml_name = _get_serialization_name(name)
children = _MinidomXmlToObject.get_child_nodes(node, xml_name)
if not children:
continue
child = children[0]
node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", 'type')
node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type)
setattr(return_obj, name, node_value)
for name, value in _MinidomXmlToObject.get_entry_properties_from_node(
xml_entry,
include_id=True,
use_title_as_id=False).items():
if name in members:
continue # Do not override if already members
setattr(return_obj, name, value)
return return_obj
|
def function[xml_to_metrics, parameter[xmlstr, object_type]]:
constant[Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics('listeners.active')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime'2014-10-02T00:00:00Z')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
]
variable[xmldoc] assign[=] call[name[minidom].parseString, parameter[name[xmlstr]]]
variable[return_obj] assign[=] call[name[object_type], parameter[]]
variable[members] assign[=] call[name[dict], parameter[call[name[vars], parameter[name[return_obj]]]]]
for taget[name[xml_entry]] in starred[call[name[_MinidomXmlToObject].get_children_from_path, parameter[name[xmldoc], constant[entry]]]] begin[:]
for taget[name[node]] in starred[call[name[_MinidomXmlToObject].get_children_from_path, parameter[name[xml_entry], constant[content], constant[properties]]]] begin[:]
for taget[name[name]] in starred[name[members]] begin[:]
variable[xml_name] assign[=] call[name[_get_serialization_name], parameter[name[name]]]
variable[children] assign[=] call[name[_MinidomXmlToObject].get_child_nodes, parameter[name[node], name[xml_name]]]
if <ast.UnaryOp object at 0x7da2041d8f10> begin[:]
continue
variable[child] assign[=] call[name[children]][constant[0]]
variable[node_type] assign[=] call[name[child].getAttributeNS, parameter[constant[http://schemas.microsoft.com/ado/2007/08/dataservices/metadata], constant[type]]]
variable[node_value] assign[=] call[name[_ServiceBusManagementXmlSerializer].odata_converter, parameter[name[child].firstChild.nodeValue, name[node_type]]]
call[name[setattr], parameter[name[return_obj], name[name], name[node_value]]]
for taget[tuple[[<ast.Name object at 0x7da20cabc0a0>, <ast.Name object at 0x7da20cabc6d0>]]] in starred[call[call[name[_MinidomXmlToObject].get_entry_properties_from_node, parameter[name[xml_entry]]].items, parameter[]]] begin[:]
if compare[name[name] in name[members]] begin[:]
continue
call[name[setattr], parameter[name[return_obj], name[name], name[value]]]
return[name[return_obj]]
|
keyword[def] identifier[xml_to_metrics] ( identifier[xmlstr] , identifier[object_type] ):
literal[string]
identifier[xmldoc] = identifier[minidom] . identifier[parseString] ( identifier[xmlstr] )
identifier[return_obj] = identifier[object_type] ()
identifier[members] = identifier[dict] ( identifier[vars] ( identifier[return_obj] ))
keyword[for] identifier[xml_entry] keyword[in] identifier[_MinidomXmlToObject] . identifier[get_children_from_path] ( identifier[xmldoc] ,
literal[string] ):
keyword[for] identifier[node] keyword[in] identifier[_MinidomXmlToObject] . identifier[get_children_from_path] ( identifier[xml_entry] ,
literal[string] ,
literal[string] ):
keyword[for] identifier[name] keyword[in] identifier[members] :
identifier[xml_name] = identifier[_get_serialization_name] ( identifier[name] )
identifier[children] = identifier[_MinidomXmlToObject] . identifier[get_child_nodes] ( identifier[node] , identifier[xml_name] )
keyword[if] keyword[not] identifier[children] :
keyword[continue]
identifier[child] = identifier[children] [ literal[int] ]
identifier[node_type] = identifier[child] . identifier[getAttributeNS] ( literal[string] , literal[string] )
identifier[node_value] = identifier[_ServiceBusManagementXmlSerializer] . identifier[odata_converter] ( identifier[child] . identifier[firstChild] . identifier[nodeValue] , identifier[node_type] )
identifier[setattr] ( identifier[return_obj] , identifier[name] , identifier[node_value] )
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[_MinidomXmlToObject] . identifier[get_entry_properties_from_node] (
identifier[xml_entry] ,
identifier[include_id] = keyword[True] ,
identifier[use_title_as_id] = keyword[False] ). identifier[items] ():
keyword[if] identifier[name] keyword[in] identifier[members] :
keyword[continue]
identifier[setattr] ( identifier[return_obj] , identifier[name] , identifier[value] )
keyword[return] identifier[return_obj]
|
def xml_to_metrics(xmlstr, object_type):
"""Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics('listeners.active')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime'2014-10-02T00:00:00Z')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
"""
xmldoc = minidom.parseString(xmlstr)
return_obj = object_type()
members = dict(vars(return_obj))
# Only one entry here
for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc, 'entry'):
for node in _MinidomXmlToObject.get_children_from_path(xml_entry, 'content', 'properties'):
for name in members:
xml_name = _get_serialization_name(name)
children = _MinidomXmlToObject.get_child_nodes(node, xml_name)
if not children:
continue # depends on [control=['if'], data=[]]
child = children[0]
node_type = child.getAttributeNS('http://schemas.microsoft.com/ado/2007/08/dataservices/metadata', 'type')
node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type)
setattr(return_obj, name, node_value) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['node']]
for (name, value) in _MinidomXmlToObject.get_entry_properties_from_node(xml_entry, include_id=True, use_title_as_id=False).items():
if name in members:
continue # Do not override if already members # depends on [control=['if'], data=[]]
setattr(return_obj, name, value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['xml_entry']]
return return_obj
|
def run(self, sqlTail = '', raw = False) :
"""Compile filters and run the query and returns the entire result. You can use sqlTail to add things such as order by. If raw, returns the raw tuple data (not wrapped into a raba object)"""
sql, sqlValues = self.getSQLQuery()
cur = self.con.execute('%s %s'% (sql, sqlTail), sqlValues)
res = []
for v in cur :
if not raw :
res.append(RabaPupa(self.rabaClass, v[0]))
else :
return v
return res
|
def function[run, parameter[self, sqlTail, raw]]:
constant[Compile filters and run the query and returns the entire result. You can use sqlTail to add things such as order by. If raw, returns the raw tuple data (not wrapped into a raba object)]
<ast.Tuple object at 0x7da1b0a851e0> assign[=] call[name[self].getSQLQuery, parameter[]]
variable[cur] assign[=] call[name[self].con.execute, parameter[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0bd3ac0>, <ast.Name object at 0x7da1b0bd25c0>]]], name[sqlValues]]]
variable[res] assign[=] list[[]]
for taget[name[v]] in starred[name[cur]] begin[:]
if <ast.UnaryOp object at 0x7da1b0b62710> begin[:]
call[name[res].append, parameter[call[name[RabaPupa], parameter[name[self].rabaClass, call[name[v]][constant[0]]]]]]
return[name[res]]
|
keyword[def] identifier[run] ( identifier[self] , identifier[sqlTail] = literal[string] , identifier[raw] = keyword[False] ):
literal[string]
identifier[sql] , identifier[sqlValues] = identifier[self] . identifier[getSQLQuery] ()
identifier[cur] = identifier[self] . identifier[con] . identifier[execute] ( literal[string] %( identifier[sql] , identifier[sqlTail] ), identifier[sqlValues] )
identifier[res] =[]
keyword[for] identifier[v] keyword[in] identifier[cur] :
keyword[if] keyword[not] identifier[raw] :
identifier[res] . identifier[append] ( identifier[RabaPupa] ( identifier[self] . identifier[rabaClass] , identifier[v] [ literal[int] ]))
keyword[else] :
keyword[return] identifier[v]
keyword[return] identifier[res]
|
def run(self, sqlTail='', raw=False):
"""Compile filters and run the query and returns the entire result. You can use sqlTail to add things such as order by. If raw, returns the raw tuple data (not wrapped into a raba object)"""
(sql, sqlValues) = self.getSQLQuery()
cur = self.con.execute('%s %s' % (sql, sqlTail), sqlValues)
res = []
for v in cur:
if not raw:
res.append(RabaPupa(self.rabaClass, v[0])) # depends on [control=['if'], data=[]]
else:
return v # depends on [control=['for'], data=['v']]
return res
|
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
|
def function[setWhitespaceChars, parameter[self, chars]]:
constant[
Overrides the default whitespace chars
]
name[self].skipWhitespace assign[=] constant[True]
name[self].whiteChars assign[=] name[chars]
name[self].copyDefaultWhiteChars assign[=] constant[False]
return[name[self]]
|
keyword[def] identifier[setWhitespaceChars] ( identifier[self] , identifier[chars] ):
literal[string]
identifier[self] . identifier[skipWhitespace] = keyword[True]
identifier[self] . identifier[whiteChars] = identifier[chars]
identifier[self] . identifier[copyDefaultWhiteChars] = keyword[False]
keyword[return] identifier[self]
|
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
|
def _get_all_services(crypto=None, just_exchange=False):
"""
Go through the crypto_data structure and return all list of all (unique)
installed services. Optionally filter by crypto-currency.
"""
from moneywagon.crypto_data import crypto_data
if not crypto:
# no currency specified, get all services
to_iterate = crypto_data.items()
else:
# limit to one currency
to_iterate = [(crypto, crypto_data[crypto])]
services = []
for currency, data in to_iterate:
if 'services' not in data:
continue
if currency == '':
continue # template
# price services are defined as dictionaries, all other services
# are defined as a list.
price_services = data['services']['current_price']
del data['services']['current_price']
all_services = list(price_services.values())
if not just_exchange:
all_services += list(data['services'].values())
# replace
data['services']['current_price'] = price_services
services.append([
item for sublist in all_services for item in sublist
])
return sorted(
set([item for sublist in services for item in sublist]),
key=lambda x: x.__name__
)
|
def function[_get_all_services, parameter[crypto, just_exchange]]:
constant[
Go through the crypto_data structure and return all list of all (unique)
installed services. Optionally filter by crypto-currency.
]
from relative_module[moneywagon.crypto_data] import module[crypto_data]
if <ast.UnaryOp object at 0x7da1b10184f0> begin[:]
variable[to_iterate] assign[=] call[name[crypto_data].items, parameter[]]
variable[services] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1018850>, <ast.Name object at 0x7da1b1018820>]]] in starred[name[to_iterate]] begin[:]
if compare[constant[services] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
continue
if compare[name[currency] equal[==] constant[]] begin[:]
continue
variable[price_services] assign[=] call[call[name[data]][constant[services]]][constant[current_price]]
<ast.Delete object at 0x7da1b11fb940>
variable[all_services] assign[=] call[name[list], parameter[call[name[price_services].values, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b11fbfa0> begin[:]
<ast.AugAssign object at 0x7da1b11fbd90>
call[call[name[data]][constant[services]]][constant[current_price]] assign[=] name[price_services]
call[name[services].append, parameter[<ast.ListComp object at 0x7da1b11f8d90>]]
return[call[name[sorted], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b11f9f60>]]]]]
|
keyword[def] identifier[_get_all_services] ( identifier[crypto] = keyword[None] , identifier[just_exchange] = keyword[False] ):
literal[string]
keyword[from] identifier[moneywagon] . identifier[crypto_data] keyword[import] identifier[crypto_data]
keyword[if] keyword[not] identifier[crypto] :
identifier[to_iterate] = identifier[crypto_data] . identifier[items] ()
keyword[else] :
identifier[to_iterate] =[( identifier[crypto] , identifier[crypto_data] [ identifier[crypto] ])]
identifier[services] =[]
keyword[for] identifier[currency] , identifier[data] keyword[in] identifier[to_iterate] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[data] :
keyword[continue]
keyword[if] identifier[currency] == literal[string] :
keyword[continue]
identifier[price_services] = identifier[data] [ literal[string] ][ literal[string] ]
keyword[del] identifier[data] [ literal[string] ][ literal[string] ]
identifier[all_services] = identifier[list] ( identifier[price_services] . identifier[values] ())
keyword[if] keyword[not] identifier[just_exchange] :
identifier[all_services] += identifier[list] ( identifier[data] [ literal[string] ]. identifier[values] ())
identifier[data] [ literal[string] ][ literal[string] ]= identifier[price_services]
identifier[services] . identifier[append] ([
identifier[item] keyword[for] identifier[sublist] keyword[in] identifier[all_services] keyword[for] identifier[item] keyword[in] identifier[sublist]
])
keyword[return] identifier[sorted] (
identifier[set] ([ identifier[item] keyword[for] identifier[sublist] keyword[in] identifier[services] keyword[for] identifier[item] keyword[in] identifier[sublist] ]),
identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[__name__]
)
|
def _get_all_services(crypto=None, just_exchange=False):
"""
Go through the crypto_data structure and return all list of all (unique)
installed services. Optionally filter by crypto-currency.
"""
from moneywagon.crypto_data import crypto_data
if not crypto:
# no currency specified, get all services
to_iterate = crypto_data.items() # depends on [control=['if'], data=[]]
else:
# limit to one currency
to_iterate = [(crypto, crypto_data[crypto])]
services = []
for (currency, data) in to_iterate:
if 'services' not in data:
continue # depends on [control=['if'], data=[]]
if currency == '':
continue # template # depends on [control=['if'], data=[]]
# price services are defined as dictionaries, all other services
# are defined as a list.
price_services = data['services']['current_price']
del data['services']['current_price']
all_services = list(price_services.values())
if not just_exchange:
all_services += list(data['services'].values()) # depends on [control=['if'], data=[]]
# replace
data['services']['current_price'] = price_services
services.append([item for sublist in all_services for item in sublist]) # depends on [control=['for'], data=[]]
return sorted(set([item for sublist in services for item in sublist]), key=lambda x: x.__name__)
|
def _strip_commas(cls, kw):
"Strip out any leading/training commas from the token"
kw = kw[:-1] if kw[-1]==',' else kw
return kw[1:] if kw[0]==',' else kw
|
def function[_strip_commas, parameter[cls, kw]]:
constant[Strip out any leading/training commas from the token]
variable[kw] assign[=] <ast.IfExp object at 0x7da20e9612a0>
return[<ast.IfExp object at 0x7da20e9618a0>]
|
keyword[def] identifier[_strip_commas] ( identifier[cls] , identifier[kw] ):
literal[string]
identifier[kw] = identifier[kw] [:- literal[int] ] keyword[if] identifier[kw] [- literal[int] ]== literal[string] keyword[else] identifier[kw]
keyword[return] identifier[kw] [ literal[int] :] keyword[if] identifier[kw] [ literal[int] ]== literal[string] keyword[else] identifier[kw]
|
def _strip_commas(cls, kw):
"""Strip out any leading/training commas from the token"""
kw = kw[:-1] if kw[-1] == ',' else kw
return kw[1:] if kw[0] == ',' else kw
|
def list_nics(access_token, subscription_id):
'''List the network interfaces in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of NICs list with properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/networkInterfaces?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
def function[list_nics, parameter[access_token, subscription_id]]:
constant[List the network interfaces in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of NICs list with properties.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b04cb490>, <ast.Constant object at 0x7da1b04c8460>, <ast.Name object at 0x7da1b04c8910>, <ast.Constant object at 0x7da1b04c8130>, <ast.Constant object at 0x7da1b04ca110>, <ast.Name object at 0x7da1b04c9960>]]]]
return[call[name[do_get], parameter[name[endpoint], name[access_token]]]]
|
keyword[def] identifier[list_nics] ( identifier[access_token] , identifier[subscription_id] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] ,
literal[string] , identifier[NETWORK_API] ])
keyword[return] identifier[do_get] ( identifier[endpoint] , identifier[access_token] )
|
def list_nics(access_token, subscription_id):
"""List the network interfaces in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of NICs list with properties.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/networkInterfaces?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
prepick, **kwargs):
"""
Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use \
Tribe.construct instead.
"""
if method in ['from_meta_file', 'from_seishub', 'from_client',
'multi_template_gen']:
raise NotImplementedError('Method is not supported, '
'use Tribe.construct instead.')
streams, events, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, **kwargs)
self.name = name
st = streams[0]
event = events[0]
process_length = process_lengths[0]
for tr in st:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data,'
' will not use: %s' % tr.id)
st.remove(tr)
self.st = st
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.samp_rate = samp_rate
self.process_length = process_length
self.prepick = prepick
self.event = event
return self
|
def function[construct, parameter[self, method, name, lowcut, highcut, samp_rate, filt_order, prepick]]:
constant[
Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use Tribe.construct instead.
]
if compare[name[method] in list[[<ast.Constant object at 0x7da18fe93ca0>, <ast.Constant object at 0x7da18fe93490>, <ast.Constant object at 0x7da18fe91030>, <ast.Constant object at 0x7da18fe91c90>]]] begin[:]
<ast.Raise object at 0x7da18fe91450>
<ast.Tuple object at 0x7da18fe913c0> assign[=] call[name[template_gen].template_gen, parameter[]]
name[self].name assign[=] name[name]
variable[st] assign[=] call[name[streams]][constant[0]]
variable[event] assign[=] call[name[events]][constant[0]]
variable[process_length] assign[=] call[name[process_lengths]][constant[0]]
for taget[name[tr]] in starred[name[st]] begin[:]
if <ast.UnaryOp object at 0x7da18fe91900> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Data are zero in float16, missing data, will not use: %s] <ast.Mod object at 0x7da2590d6920> name[tr].id]]]
call[name[st].remove, parameter[name[tr]]]
name[self].st assign[=] name[st]
name[self].lowcut assign[=] name[lowcut]
name[self].highcut assign[=] name[highcut]
name[self].filt_order assign[=] name[filt_order]
name[self].samp_rate assign[=] name[samp_rate]
name[self].process_length assign[=] name[process_length]
name[self].prepick assign[=] name[prepick]
name[self].event assign[=] name[event]
return[name[self]]
|
keyword[def] identifier[construct] ( identifier[self] , identifier[method] , identifier[name] , identifier[lowcut] , identifier[highcut] , identifier[samp_rate] , identifier[filt_order] ,
identifier[prepick] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[method] keyword[in] [ literal[string] , literal[string] , literal[string] ,
literal[string] ]:
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] )
identifier[streams] , identifier[events] , identifier[process_lengths] = identifier[template_gen] . identifier[template_gen] (
identifier[method] = identifier[method] , identifier[lowcut] = identifier[lowcut] , identifier[highcut] = identifier[highcut] ,
identifier[filt_order] = identifier[filt_order] , identifier[samp_rate] = identifier[samp_rate] , identifier[prepick] = identifier[prepick] ,
identifier[return_event] = keyword[True] ,** identifier[kwargs] )
identifier[self] . identifier[name] = identifier[name]
identifier[st] = identifier[streams] [ literal[int] ]
identifier[event] = identifier[events] [ literal[int] ]
identifier[process_length] = identifier[process_lengths] [ literal[int] ]
keyword[for] identifier[tr] keyword[in] identifier[st] :
keyword[if] keyword[not] identifier[np] . identifier[any] ( identifier[tr] . identifier[data] . identifier[astype] ( identifier[np] . identifier[float16] )):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] % identifier[tr] . identifier[id] )
identifier[st] . identifier[remove] ( identifier[tr] )
identifier[self] . identifier[st] = identifier[st]
identifier[self] . identifier[lowcut] = identifier[lowcut]
identifier[self] . identifier[highcut] = identifier[highcut]
identifier[self] . identifier[filt_order] = identifier[filt_order]
identifier[self] . identifier[samp_rate] = identifier[samp_rate]
identifier[self] . identifier[process_length] = identifier[process_length]
identifier[self] . identifier[prepick] = identifier[prepick]
identifier[self] . identifier[event] = identifier[event]
keyword[return] identifier[self]
|
def construct(self, method, name, lowcut, highcut, samp_rate, filt_order, prepick, **kwargs):
"""
Construct a template using a given method.
:param method:
Method to make the template,
see :mod:`eqcorrscan.core.template_gen` for possible methods.
:type method: str
:type name: str
:param name: Name for the template
:type lowcut: float
:param lowcut:
Low cut (Hz), if set to None will not apply a lowcut
:type highcut: float
:param highcut:
High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate:
New sampling rate in Hz.
:type filt_order: int
:param filt_order:
Filter level (number of corners).
:type prepick: float
:param prepick: Pre-pick time in seconds
.. Note::
methods `from_meta_file`, `from_seishub`, `from_client` and
`multi_template_gen` are not accommodated in this function and must
be called from Tribe.construct as these generate multiple
templates.
.. Note::
Calls functions from `eqcorrscan.core.template_gen`, see these
functions for details on what further arguments are required.
.. rubric:: Example
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = (
... os.path.dirname(eqcorrscan.__file__) + '/tests/test_data')
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
>>> print(template) # doctest: +NORMALIZE_WHITESPACE
Template test:
12 channels;
lowcut: 2.0 Hz;
highcut: 8.0 Hz;
sampling rate 20.0 Hz;
filter order: 4;
process length: 300.0 s
This will raise an error if the method is unsupported:
>>> template = Template().construct(
... method='from_meta_file', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: Method is not supported, use Tribe.construct instead.
"""
if method in ['from_meta_file', 'from_seishub', 'from_client', 'multi_template_gen']:
raise NotImplementedError('Method is not supported, use Tribe.construct instead.') # depends on [control=['if'], data=[]]
(streams, events, process_lengths) = template_gen.template_gen(method=method, lowcut=lowcut, highcut=highcut, filt_order=filt_order, samp_rate=samp_rate, prepick=prepick, return_event=True, **kwargs)
self.name = name
st = streams[0]
event = events[0]
process_length = process_lengths[0]
for tr in st:
if not np.any(tr.data.astype(np.float16)):
warnings.warn('Data are zero in float16, missing data, will not use: %s' % tr.id)
st.remove(tr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tr']]
self.st = st
self.lowcut = lowcut
self.highcut = highcut
self.filt_order = filt_order
self.samp_rate = samp_rate
self.process_length = process_length
self.prepick = prepick
self.event = event
return self
|
def _merge_simple_selectors(a, b):
"""Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
"""
# TODO what about combinators
if a.is_superset_of(b):
return b
elif b.is_superset_of(a):
return a
else:
return None
|
def function[_merge_simple_selectors, parameter[a, b]]:
constant[Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
]
if call[name[a].is_superset_of, parameter[name[b]]] begin[:]
return[name[b]]
|
keyword[def] identifier[_merge_simple_selectors] ( identifier[a] , identifier[b] ):
literal[string]
keyword[if] identifier[a] . identifier[is_superset_of] ( identifier[b] ):
keyword[return] identifier[b]
keyword[elif] identifier[b] . identifier[is_superset_of] ( identifier[a] ):
keyword[return] identifier[a]
keyword[else] :
keyword[return] keyword[None]
|
def _merge_simple_selectors(a, b):
"""Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
"""
# TODO what about combinators
if a.is_superset_of(b):
return b # depends on [control=['if'], data=[]]
elif b.is_superset_of(a):
return a # depends on [control=['if'], data=[]]
else:
return None
|
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt]
|
def function[_xfs_info_get_kv, parameter[serialized]]:
constant[
Parse one line of the XFS info output.
]
if call[name[serialized].startswith, parameter[constant[=]]] begin[:]
variable[serialized] assign[=] call[call[name[serialized]][<ast.Slice object at 0x7da207f007c0>].strip, parameter[]]
variable[serialized] assign[=] call[call[name[serialized].replace, parameter[constant[ = ], constant[=*** ]]].replace, parameter[constant[ =], constant[=]]]
variable[opt] assign[=] list[[]]
for taget[name[tkn]] in starred[call[name[serialized].split, parameter[constant[ ]]]] begin[:]
if <ast.BoolOp object at 0x7da207f03f70> begin[:]
call[name[opt].append, parameter[name[tkn]]]
return[<ast.ListComp object at 0x7da207f01a80>]
|
keyword[def] identifier[_xfs_info_get_kv] ( identifier[serialized] ):
literal[string]
keyword[if] identifier[serialized] . identifier[startswith] ( literal[string] ):
identifier[serialized] = identifier[serialized] [ literal[int] :]. identifier[strip] ()
identifier[serialized] = identifier[serialized] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[opt] =[]
keyword[for] identifier[tkn] keyword[in] identifier[serialized] . identifier[split] ( literal[string] ):
keyword[if] keyword[not] identifier[opt] keyword[or] literal[string] keyword[in] identifier[tkn] :
identifier[opt] . identifier[append] ( identifier[tkn] )
keyword[else] :
identifier[opt] [ identifier[len] ( identifier[opt] )- literal[int] ]= identifier[opt] [ identifier[len] ( identifier[opt] )- literal[int] ]+ literal[string] + identifier[tkn]
keyword[return] [ identifier[tuple] ( identifier[items] . identifier[split] ( literal[string] )) keyword[for] identifier[items] keyword[in] identifier[opt] ]
|
def _xfs_info_get_kv(serialized):
"""
Parse one line of the XFS info output.
"""
# No need to know sub-elements here
if serialized.startswith('='):
serialized = serialized[1:].strip() # depends on [control=['if'], data=[]]
serialized = serialized.replace(' = ', '=*** ').replace(' =', '=')
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(' '):
if not opt or '=' in tkn:
opt.append(tkn) # depends on [control=['if'], data=[]]
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + ' ' + tkn # depends on [control=['for'], data=['tkn']]
# Preserve ordering
return [tuple(items.split('=')) for items in opt]
|
def _fire_ret_load_specific_fun(self, load, fun_index=0):
'''
Helper function for fire_ret_load
'''
if isinstance(load['fun'], list):
# Multi-function job
fun = load['fun'][fun_index]
# 'retcode' was already validated to exist and be non-zero
# for the given function in the caller.
if isinstance(load['retcode'], list):
# Multi-function ordered
ret = load.get('return')
if isinstance(ret, list) and len(ret) > fun_index:
ret = ret[fun_index]
else:
ret = {}
retcode = load['retcode'][fun_index]
else:
ret = load.get('return', {})
ret = ret.get(fun, {})
retcode = load['retcode'][fun]
else:
# Single-function job
fun = load['fun']
ret = load.get('return', {})
retcode = load['retcode']
try:
for tag, data in six.iteritems(ret):
data['retcode'] = retcode
tags = tag.split('_|-')
if data.get('result') is False:
self.fire_event(
data,
'{0}.{1}'.format(tags[0], tags[-1])
) # old dup event
data['jid'] = load['jid']
data['id'] = load['id']
data['success'] = False
data['return'] = 'Error: {0}.{1}'.format(
tags[0], tags[-1])
data['fun'] = fun
data['user'] = load['user']
self.fire_event(
data,
tagify([load['jid'],
'sub',
load['id'],
'error',
fun],
'job'))
except Exception:
pass
|
def function[_fire_ret_load_specific_fun, parameter[self, load, fun_index]]:
constant[
Helper function for fire_ret_load
]
if call[name[isinstance], parameter[call[name[load]][constant[fun]], name[list]]] begin[:]
variable[fun] assign[=] call[call[name[load]][constant[fun]]][name[fun_index]]
if call[name[isinstance], parameter[call[name[load]][constant[retcode]], name[list]]] begin[:]
variable[ret] assign[=] call[name[load].get, parameter[constant[return]]]
if <ast.BoolOp object at 0x7da20c76c100> begin[:]
variable[ret] assign[=] call[name[ret]][name[fun_index]]
variable[retcode] assign[=] call[call[name[load]][constant[retcode]]][name[fun_index]]
<ast.Try object at 0x7da20c76feb0>
|
keyword[def] identifier[_fire_ret_load_specific_fun] ( identifier[self] , identifier[load] , identifier[fun_index] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[load] [ literal[string] ], identifier[list] ):
identifier[fun] = identifier[load] [ literal[string] ][ identifier[fun_index] ]
keyword[if] identifier[isinstance] ( identifier[load] [ literal[string] ], identifier[list] ):
identifier[ret] = identifier[load] . identifier[get] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[ret] , identifier[list] ) keyword[and] identifier[len] ( identifier[ret] )> identifier[fun_index] :
identifier[ret] = identifier[ret] [ identifier[fun_index] ]
keyword[else] :
identifier[ret] ={}
identifier[retcode] = identifier[load] [ literal[string] ][ identifier[fun_index] ]
keyword[else] :
identifier[ret] = identifier[load] . identifier[get] ( literal[string] ,{})
identifier[ret] = identifier[ret] . identifier[get] ( identifier[fun] ,{})
identifier[retcode] = identifier[load] [ literal[string] ][ identifier[fun] ]
keyword[else] :
identifier[fun] = identifier[load] [ literal[string] ]
identifier[ret] = identifier[load] . identifier[get] ( literal[string] ,{})
identifier[retcode] = identifier[load] [ literal[string] ]
keyword[try] :
keyword[for] identifier[tag] , identifier[data] keyword[in] identifier[six] . identifier[iteritems] ( identifier[ret] ):
identifier[data] [ literal[string] ]= identifier[retcode]
identifier[tags] = identifier[tag] . identifier[split] ( literal[string] )
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[False] :
identifier[self] . identifier[fire_event] (
identifier[data] ,
literal[string] . identifier[format] ( identifier[tags] [ literal[int] ], identifier[tags] [- literal[int] ])
)
identifier[data] [ literal[string] ]= identifier[load] [ literal[string] ]
identifier[data] [ literal[string] ]= identifier[load] [ literal[string] ]
identifier[data] [ literal[string] ]= keyword[False]
identifier[data] [ literal[string] ]= literal[string] . identifier[format] (
identifier[tags] [ literal[int] ], identifier[tags] [- literal[int] ])
identifier[data] [ literal[string] ]= identifier[fun]
identifier[data] [ literal[string] ]= identifier[load] [ literal[string] ]
identifier[self] . identifier[fire_event] (
identifier[data] ,
identifier[tagify] ([ identifier[load] [ literal[string] ],
literal[string] ,
identifier[load] [ literal[string] ],
literal[string] ,
identifier[fun] ],
literal[string] ))
keyword[except] identifier[Exception] :
keyword[pass]
|
def _fire_ret_load_specific_fun(self, load, fun_index=0):
"""
Helper function for fire_ret_load
"""
if isinstance(load['fun'], list):
# Multi-function job
fun = load['fun'][fun_index]
# 'retcode' was already validated to exist and be non-zero
# for the given function in the caller.
if isinstance(load['retcode'], list):
# Multi-function ordered
ret = load.get('return')
if isinstance(ret, list) and len(ret) > fun_index:
ret = ret[fun_index] # depends on [control=['if'], data=[]]
else:
ret = {}
retcode = load['retcode'][fun_index] # depends on [control=['if'], data=[]]
else:
ret = load.get('return', {})
ret = ret.get(fun, {})
retcode = load['retcode'][fun] # depends on [control=['if'], data=[]]
else:
# Single-function job
fun = load['fun']
ret = load.get('return', {})
retcode = load['retcode']
try:
for (tag, data) in six.iteritems(ret):
data['retcode'] = retcode
tags = tag.split('_|-')
if data.get('result') is False:
self.fire_event(data, '{0}.{1}'.format(tags[0], tags[-1])) # old dup event
data['jid'] = load['jid']
data['id'] = load['id']
data['success'] = False
data['return'] = 'Error: {0}.{1}'.format(tags[0], tags[-1])
data['fun'] = fun
data['user'] = load['user']
self.fire_event(data, tagify([load['jid'], 'sub', load['id'], 'error', fun], 'job')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
|
def get_context(self, name, value, attrs):
"""
Some widgets require a modified rendering context, if they contain angular directives.
"""
context = super(NgWidgetMixin, self).get_context(name, value, attrs)
if callable(getattr(self._field, 'update_widget_rendering_context', None)):
self._field.update_widget_rendering_context(context)
return context
|
def function[get_context, parameter[self, name, value, attrs]]:
constant[
Some widgets require a modified rendering context, if they contain angular directives.
]
variable[context] assign[=] call[call[name[super], parameter[name[NgWidgetMixin], name[self]]].get_context, parameter[name[name], name[value], name[attrs]]]
if call[name[callable], parameter[call[name[getattr], parameter[name[self]._field, constant[update_widget_rendering_context], constant[None]]]]] begin[:]
call[name[self]._field.update_widget_rendering_context, parameter[name[context]]]
return[name[context]]
|
keyword[def] identifier[get_context] ( identifier[self] , identifier[name] , identifier[value] , identifier[attrs] ):
literal[string]
identifier[context] = identifier[super] ( identifier[NgWidgetMixin] , identifier[self] ). identifier[get_context] ( identifier[name] , identifier[value] , identifier[attrs] )
keyword[if] identifier[callable] ( identifier[getattr] ( identifier[self] . identifier[_field] , literal[string] , keyword[None] )):
identifier[self] . identifier[_field] . identifier[update_widget_rendering_context] ( identifier[context] )
keyword[return] identifier[context]
|
def get_context(self, name, value, attrs):
"""
Some widgets require a modified rendering context, if they contain angular directives.
"""
context = super(NgWidgetMixin, self).get_context(name, value, attrs)
if callable(getattr(self._field, 'update_widget_rendering_context', None)):
self._field.update_widget_rendering_context(context) # depends on [control=['if'], data=[]]
return context
|
def get_event_data(event_abi, log_entry):
"""
Given an event ABI and a log entry for that event, return the decoded
event data
"""
if event_abi['anonymous']:
log_topics = log_entry['topics']
elif not log_entry['topics']:
raise MismatchedABI("Expected non-anonymous event to have 1 or more topics")
elif event_abi_to_log_topic(event_abi) != log_entry['topics'][0]:
raise MismatchedABI("The event signature did not match the provided ABI")
else:
log_topics = log_entry['topics'][1:]
log_topics_abi = get_indexed_event_inputs(event_abi)
log_topic_normalized_inputs = normalize_event_input_types(log_topics_abi)
log_topic_types = get_event_abi_types_for_decoding(log_topic_normalized_inputs)
log_topic_names = get_abi_input_names({'inputs': log_topics_abi})
if len(log_topics) != len(log_topic_types):
raise ValueError("Expected {0} log topics. Got {1}".format(
len(log_topic_types),
len(log_topics),
))
log_data = hexstr_if_str(to_bytes, log_entry['data'])
log_data_abi = exclude_indexed_event_inputs(event_abi)
log_data_normalized_inputs = normalize_event_input_types(log_data_abi)
log_data_types = get_event_abi_types_for_decoding(log_data_normalized_inputs)
log_data_names = get_abi_input_names({'inputs': log_data_abi})
# sanity check that there are not name intersections between the topic
# names and the data argument names.
duplicate_names = set(log_topic_names).intersection(log_data_names)
if duplicate_names:
raise ValueError(
"Invalid Event ABI: The following argument names are duplicated "
"between event inputs: '{0}'".format(', '.join(duplicate_names))
)
decoded_log_data = decode_abi(log_data_types, log_data)
normalized_log_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_data_types,
decoded_log_data
)
decoded_topic_data = [
decode_single(topic_type, topic_data)
for topic_type, topic_data
in zip(log_topic_types, log_topics)
]
normalized_topic_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_topic_types,
decoded_topic_data
)
event_args = dict(itertools.chain(
zip(log_topic_names, normalized_topic_data),
zip(log_data_names, normalized_log_data),
))
event_data = {
'args': event_args,
'event': event_abi['name'],
'logIndex': log_entry['logIndex'],
'transactionIndex': log_entry['transactionIndex'],
'transactionHash': log_entry['transactionHash'],
'address': log_entry['address'],
'blockHash': log_entry['blockHash'],
'blockNumber': log_entry['blockNumber'],
}
return AttributeDict.recursive(event_data)
|
def function[get_event_data, parameter[event_abi, log_entry]]:
constant[
Given an event ABI and a log entry for that event, return the decoded
event data
]
if call[name[event_abi]][constant[anonymous]] begin[:]
variable[log_topics] assign[=] call[name[log_entry]][constant[topics]]
variable[log_topics_abi] assign[=] call[name[get_indexed_event_inputs], parameter[name[event_abi]]]
variable[log_topic_normalized_inputs] assign[=] call[name[normalize_event_input_types], parameter[name[log_topics_abi]]]
variable[log_topic_types] assign[=] call[name[get_event_abi_types_for_decoding], parameter[name[log_topic_normalized_inputs]]]
variable[log_topic_names] assign[=] call[name[get_abi_input_names], parameter[dictionary[[<ast.Constant object at 0x7da18c4ce8c0>], [<ast.Name object at 0x7da18c4cee60>]]]]
if compare[call[name[len], parameter[name[log_topics]]] not_equal[!=] call[name[len], parameter[name[log_topic_types]]]] begin[:]
<ast.Raise object at 0x7da18c4cea70>
variable[log_data] assign[=] call[name[hexstr_if_str], parameter[name[to_bytes], call[name[log_entry]][constant[data]]]]
variable[log_data_abi] assign[=] call[name[exclude_indexed_event_inputs], parameter[name[event_abi]]]
variable[log_data_normalized_inputs] assign[=] call[name[normalize_event_input_types], parameter[name[log_data_abi]]]
variable[log_data_types] assign[=] call[name[get_event_abi_types_for_decoding], parameter[name[log_data_normalized_inputs]]]
variable[log_data_names] assign[=] call[name[get_abi_input_names], parameter[dictionary[[<ast.Constant object at 0x7da18c4cf1c0>], [<ast.Name object at 0x7da18c4cec20>]]]]
variable[duplicate_names] assign[=] call[call[name[set], parameter[name[log_topic_names]]].intersection, parameter[name[log_data_names]]]
if name[duplicate_names] begin[:]
<ast.Raise object at 0x7da18c4cd4b0>
variable[decoded_log_data] assign[=] call[name[decode_abi], parameter[name[log_data_types], name[log_data]]]
variable[normalized_log_data] assign[=] call[name[map_abi_data], parameter[name[BASE_RETURN_NORMALIZERS], name[log_data_types], name[decoded_log_data]]]
variable[decoded_topic_data] assign[=] <ast.ListComp object at 0x7da18c4ce4d0>
variable[normalized_topic_data] assign[=] call[name[map_abi_data], parameter[name[BASE_RETURN_NORMALIZERS], name[log_topic_types], name[decoded_topic_data]]]
variable[event_args] assign[=] call[name[dict], parameter[call[name[itertools].chain, parameter[call[name[zip], parameter[name[log_topic_names], name[normalized_topic_data]]], call[name[zip], parameter[name[log_data_names], name[normalized_log_data]]]]]]]
variable[event_data] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cdcc0>, <ast.Constant object at 0x7da18c4ccc70>, <ast.Constant object at 0x7da18c4cda50>, <ast.Constant object at 0x7da18c4cffd0>, <ast.Constant object at 0x7da18c4ce7d0>, <ast.Constant object at 0x7da18c4ced70>, <ast.Constant object at 0x7da18c4cf3d0>, <ast.Constant object at 0x7da18c4cd1b0>], [<ast.Name object at 0x7da18c4cccd0>, <ast.Subscript object at 0x7da18c4cd5d0>, <ast.Subscript object at 0x7da18c4cdf00>, <ast.Subscript object at 0x7da18c4ccf10>, <ast.Subscript object at 0x7da18c4cf220>, <ast.Subscript object at 0x7da18c4cceb0>, <ast.Subscript object at 0x7da18c4cec50>, <ast.Subscript object at 0x7da18c4cd720>]]
return[call[name[AttributeDict].recursive, parameter[name[event_data]]]]
|
keyword[def] identifier[get_event_data] ( identifier[event_abi] , identifier[log_entry] ):
literal[string]
keyword[if] identifier[event_abi] [ literal[string] ]:
identifier[log_topics] = identifier[log_entry] [ literal[string] ]
keyword[elif] keyword[not] identifier[log_entry] [ literal[string] ]:
keyword[raise] identifier[MismatchedABI] ( literal[string] )
keyword[elif] identifier[event_abi_to_log_topic] ( identifier[event_abi] )!= identifier[log_entry] [ literal[string] ][ literal[int] ]:
keyword[raise] identifier[MismatchedABI] ( literal[string] )
keyword[else] :
identifier[log_topics] = identifier[log_entry] [ literal[string] ][ literal[int] :]
identifier[log_topics_abi] = identifier[get_indexed_event_inputs] ( identifier[event_abi] )
identifier[log_topic_normalized_inputs] = identifier[normalize_event_input_types] ( identifier[log_topics_abi] )
identifier[log_topic_types] = identifier[get_event_abi_types_for_decoding] ( identifier[log_topic_normalized_inputs] )
identifier[log_topic_names] = identifier[get_abi_input_names] ({ literal[string] : identifier[log_topics_abi] })
keyword[if] identifier[len] ( identifier[log_topics] )!= identifier[len] ( identifier[log_topic_types] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[len] ( identifier[log_topic_types] ),
identifier[len] ( identifier[log_topics] ),
))
identifier[log_data] = identifier[hexstr_if_str] ( identifier[to_bytes] , identifier[log_entry] [ literal[string] ])
identifier[log_data_abi] = identifier[exclude_indexed_event_inputs] ( identifier[event_abi] )
identifier[log_data_normalized_inputs] = identifier[normalize_event_input_types] ( identifier[log_data_abi] )
identifier[log_data_types] = identifier[get_event_abi_types_for_decoding] ( identifier[log_data_normalized_inputs] )
identifier[log_data_names] = identifier[get_abi_input_names] ({ literal[string] : identifier[log_data_abi] })
identifier[duplicate_names] = identifier[set] ( identifier[log_topic_names] ). identifier[intersection] ( identifier[log_data_names] )
keyword[if] identifier[duplicate_names] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[duplicate_names] ))
)
identifier[decoded_log_data] = identifier[decode_abi] ( identifier[log_data_types] , identifier[log_data] )
identifier[normalized_log_data] = identifier[map_abi_data] (
identifier[BASE_RETURN_NORMALIZERS] ,
identifier[log_data_types] ,
identifier[decoded_log_data]
)
identifier[decoded_topic_data] =[
identifier[decode_single] ( identifier[topic_type] , identifier[topic_data] )
keyword[for] identifier[topic_type] , identifier[topic_data]
keyword[in] identifier[zip] ( identifier[log_topic_types] , identifier[log_topics] )
]
identifier[normalized_topic_data] = identifier[map_abi_data] (
identifier[BASE_RETURN_NORMALIZERS] ,
identifier[log_topic_types] ,
identifier[decoded_topic_data]
)
identifier[event_args] = identifier[dict] ( identifier[itertools] . identifier[chain] (
identifier[zip] ( identifier[log_topic_names] , identifier[normalized_topic_data] ),
identifier[zip] ( identifier[log_data_names] , identifier[normalized_log_data] ),
))
identifier[event_data] ={
literal[string] : identifier[event_args] ,
literal[string] : identifier[event_abi] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
literal[string] : identifier[log_entry] [ literal[string] ],
}
keyword[return] identifier[AttributeDict] . identifier[recursive] ( identifier[event_data] )
|
def get_event_data(event_abi, log_entry):
"""
Given an event ABI and a log entry for that event, return the decoded
event data
"""
if event_abi['anonymous']:
log_topics = log_entry['topics'] # depends on [control=['if'], data=[]]
elif not log_entry['topics']:
raise MismatchedABI('Expected non-anonymous event to have 1 or more topics') # depends on [control=['if'], data=[]]
elif event_abi_to_log_topic(event_abi) != log_entry['topics'][0]:
raise MismatchedABI('The event signature did not match the provided ABI') # depends on [control=['if'], data=[]]
else:
log_topics = log_entry['topics'][1:]
log_topics_abi = get_indexed_event_inputs(event_abi)
log_topic_normalized_inputs = normalize_event_input_types(log_topics_abi)
log_topic_types = get_event_abi_types_for_decoding(log_topic_normalized_inputs)
log_topic_names = get_abi_input_names({'inputs': log_topics_abi})
if len(log_topics) != len(log_topic_types):
raise ValueError('Expected {0} log topics. Got {1}'.format(len(log_topic_types), len(log_topics))) # depends on [control=['if'], data=[]]
log_data = hexstr_if_str(to_bytes, log_entry['data'])
log_data_abi = exclude_indexed_event_inputs(event_abi)
log_data_normalized_inputs = normalize_event_input_types(log_data_abi)
log_data_types = get_event_abi_types_for_decoding(log_data_normalized_inputs)
log_data_names = get_abi_input_names({'inputs': log_data_abi})
# sanity check that there are not name intersections between the topic
# names and the data argument names.
duplicate_names = set(log_topic_names).intersection(log_data_names)
if duplicate_names:
raise ValueError("Invalid Event ABI: The following argument names are duplicated between event inputs: '{0}'".format(', '.join(duplicate_names))) # depends on [control=['if'], data=[]]
decoded_log_data = decode_abi(log_data_types, log_data)
normalized_log_data = map_abi_data(BASE_RETURN_NORMALIZERS, log_data_types, decoded_log_data)
decoded_topic_data = [decode_single(topic_type, topic_data) for (topic_type, topic_data) in zip(log_topic_types, log_topics)]
normalized_topic_data = map_abi_data(BASE_RETURN_NORMALIZERS, log_topic_types, decoded_topic_data)
event_args = dict(itertools.chain(zip(log_topic_names, normalized_topic_data), zip(log_data_names, normalized_log_data)))
event_data = {'args': event_args, 'event': event_abi['name'], 'logIndex': log_entry['logIndex'], 'transactionIndex': log_entry['transactionIndex'], 'transactionHash': log_entry['transactionHash'], 'address': log_entry['address'], 'blockHash': log_entry['blockHash'], 'blockNumber': log_entry['blockNumber']}
return AttributeDict.recursive(event_data)
|
def PartialDynamicSystem(self, ieq, variable):
"""
returns dynamical system blocks associated to output variable
"""
if ieq == 0:
# U1=0
if variable == self.physical_nodes[0].variable:
v = Step('Ground', 0)
return[Gain(v, variable, 1)]
|
def function[PartialDynamicSystem, parameter[self, ieq, variable]]:
constant[
returns dynamical system blocks associated to output variable
]
if compare[name[ieq] equal[==] constant[0]] begin[:]
if compare[name[variable] equal[==] call[name[self].physical_nodes][constant[0]].variable] begin[:]
variable[v] assign[=] call[name[Step], parameter[constant[Ground], constant[0]]]
return[list[[<ast.Call object at 0x7da1b0b65a20>]]]
|
keyword[def] identifier[PartialDynamicSystem] ( identifier[self] , identifier[ieq] , identifier[variable] ):
literal[string]
keyword[if] identifier[ieq] == literal[int] :
keyword[if] identifier[variable] == identifier[self] . identifier[physical_nodes] [ literal[int] ]. identifier[variable] :
identifier[v] = identifier[Step] ( literal[string] , literal[int] )
keyword[return] [ identifier[Gain] ( identifier[v] , identifier[variable] , literal[int] )]
|
def PartialDynamicSystem(self, ieq, variable):
"""
returns dynamical system blocks associated to output variable
"""
if ieq == 0:
# U1=0
if variable == self.physical_nodes[0].variable:
v = Step('Ground', 0)
return [Gain(v, variable, 1)] # depends on [control=['if'], data=['variable']] # depends on [control=['if'], data=[]]
|
def peek(self, size=-1):
"""
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
"""
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
self._raw.seek(self._seek)
return self._raw._peek(size)
|
def function[peek, parameter[self, size]]:
constant[
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
]
if <ast.UnaryOp object at 0x7da1b1a1c2e0> begin[:]
<ast.Raise object at 0x7da1b191fd00>
with name[self]._seek_lock begin[:]
call[name[self]._raw.seek, parameter[name[self]._seek]]
return[call[name[self]._raw._peek, parameter[name[size]]]]
|
keyword[def] identifier[peek] ( identifier[self] , identifier[size] =- literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_readable] :
keyword[raise] identifier[UnsupportedOperation] ( literal[string] )
keyword[with] identifier[self] . identifier[_seek_lock] :
identifier[self] . identifier[_raw] . identifier[seek] ( identifier[self] . identifier[_seek] )
keyword[return] identifier[self] . identifier[_raw] . identifier[_peek] ( identifier[size] )
|
def peek(self, size=-1):
"""
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
"""
if not self._readable:
raise UnsupportedOperation('read') # depends on [control=['if'], data=[]]
with self._seek_lock:
self._raw.seek(self._seek)
return self._raw._peek(size) # depends on [control=['with'], data=[]]
|
def _create_delete_one_query(self, row_id, ctx):
"""
Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query.
"""
assert isinstance(ctx, ResourceQueryContext)
return self._orm.query(self.model_cls).filter(self._model_pk == row_id)
|
def function[_create_delete_one_query, parameter[self, row_id, ctx]]:
constant[
Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query.
]
assert[call[name[isinstance], parameter[name[ctx], name[ResourceQueryContext]]]]
return[call[call[name[self]._orm.query, parameter[name[self].model_cls]].filter, parameter[compare[name[self]._model_pk equal[==] name[row_id]]]]]
|
keyword[def] identifier[_create_delete_one_query] ( identifier[self] , identifier[row_id] , identifier[ctx] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[ctx] , identifier[ResourceQueryContext] )
keyword[return] identifier[self] . identifier[_orm] . identifier[query] ( identifier[self] . identifier[model_cls] ). identifier[filter] ( identifier[self] . identifier[_model_pk] == identifier[row_id] )
|
def _create_delete_one_query(self, row_id, ctx):
"""
Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query.
"""
assert isinstance(ctx, ResourceQueryContext)
return self._orm.query(self.model_cls).filter(self._model_pk == row_id)
|
def size_as_drawn(lines, screen_width):
"""Get the bottom-right corner of some text as would be drawn by draw_lines"""
y = 0
x = 0
for line in lines:
wrapped = list(wc_wrap(line, screen_width))
if len(wrapped) > 0:
for wrapped_line in wrapped:
x = len(wrapped_line)
y += 1
else:
x = 0
y += 1
return y - 1, x - 1 if x != 0 else 0
|
def function[size_as_drawn, parameter[lines, screen_width]]:
constant[Get the bottom-right corner of some text as would be drawn by draw_lines]
variable[y] assign[=] constant[0]
variable[x] assign[=] constant[0]
for taget[name[line]] in starred[name[lines]] begin[:]
variable[wrapped] assign[=] call[name[list], parameter[call[name[wc_wrap], parameter[name[line], name[screen_width]]]]]
if compare[call[name[len], parameter[name[wrapped]]] greater[>] constant[0]] begin[:]
for taget[name[wrapped_line]] in starred[name[wrapped]] begin[:]
variable[x] assign[=] call[name[len], parameter[name[wrapped_line]]]
<ast.AugAssign object at 0x7da1b17de1d0>
return[tuple[[<ast.BinOp object at 0x7da1b17dc490>, <ast.IfExp object at 0x7da1b17de290>]]]
|
keyword[def] identifier[size_as_drawn] ( identifier[lines] , identifier[screen_width] ):
literal[string]
identifier[y] = literal[int]
identifier[x] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[wrapped] = identifier[list] ( identifier[wc_wrap] ( identifier[line] , identifier[screen_width] ))
keyword[if] identifier[len] ( identifier[wrapped] )> literal[int] :
keyword[for] identifier[wrapped_line] keyword[in] identifier[wrapped] :
identifier[x] = identifier[len] ( identifier[wrapped_line] )
identifier[y] += literal[int]
keyword[else] :
identifier[x] = literal[int]
identifier[y] += literal[int]
keyword[return] identifier[y] - literal[int] , identifier[x] - literal[int] keyword[if] identifier[x] != literal[int] keyword[else] literal[int]
|
def size_as_drawn(lines, screen_width):
"""Get the bottom-right corner of some text as would be drawn by draw_lines"""
y = 0
x = 0
for line in lines:
wrapped = list(wc_wrap(line, screen_width))
if len(wrapped) > 0:
for wrapped_line in wrapped:
x = len(wrapped_line)
y += 1 # depends on [control=['for'], data=['wrapped_line']] # depends on [control=['if'], data=[]]
else:
x = 0
y += 1 # depends on [control=['for'], data=['line']]
return (y - 1, x - 1 if x != 0 else 0)
|
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
xmlstr = '<OnCondition test="{0}"'.format(self.test)
chxmlstr = ''
for action in self.actions:
chxmlstr += action.toxml()
if chxmlstr:
xmlstr += '>' + chxmlstr + '</OnCondition>'
else:
xmlstr += '/>'
return xmlstr
|
def function[toxml, parameter[self]]:
constant[
Exports this object into a LEMS XML object
]
variable[xmlstr] assign[=] call[constant[<OnCondition test="{0}"].format, parameter[name[self].test]]
variable[chxmlstr] assign[=] constant[]
for taget[name[action]] in starred[name[self].actions] begin[:]
<ast.AugAssign object at 0x7da1b2462890>
if name[chxmlstr] begin[:]
<ast.AugAssign object at 0x7da1b2460e50>
return[name[xmlstr]]
|
keyword[def] identifier[toxml] ( identifier[self] ):
literal[string]
identifier[xmlstr] = literal[string] . identifier[format] ( identifier[self] . identifier[test] )
identifier[chxmlstr] = literal[string]
keyword[for] identifier[action] keyword[in] identifier[self] . identifier[actions] :
identifier[chxmlstr] += identifier[action] . identifier[toxml] ()
keyword[if] identifier[chxmlstr] :
identifier[xmlstr] += literal[string] + identifier[chxmlstr] + literal[string]
keyword[else] :
identifier[xmlstr] += literal[string]
keyword[return] identifier[xmlstr]
|
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
xmlstr = '<OnCondition test="{0}"'.format(self.test)
chxmlstr = ''
for action in self.actions:
chxmlstr += action.toxml() # depends on [control=['for'], data=['action']]
if chxmlstr:
xmlstr += '>' + chxmlstr + '</OnCondition>' # depends on [control=['if'], data=[]]
else:
xmlstr += '/>'
return xmlstr
|
def teardown_global_logging():
"""Disable global logging of stdio, warnings, and exceptions."""
global global_logging_started
if not global_logging_started:
return
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
if sys.stdout is stdout_logger:
sys.stdout = sys.stdout.stream
if sys.stderr is stderr_logger:
sys.stderr = sys.stderr.stream
# If we still have an unhandled exception go ahead and handle it with the
# replacement excepthook before deleting it
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type is not None:
sys.excepthook(exc_type, exc_value, exc_traceback)
del exc_type
del exc_value
del exc_traceback
if not PY3K:
sys.exc_clear()
del sys.excepthook
logging.captureWarnings(False)
rawinput = 'input' if PY3K else 'raw_input'
if hasattr(builtins, '_original_raw_input'):
setattr(builtins, rawinput, builtins._original_raw_input)
del builtins._original_raw_input
global_logging_started = False
|
def function[teardown_global_logging, parameter[]]:
constant[Disable global logging of stdio, warnings, and exceptions.]
<ast.Global object at 0x7da1b0ff3070>
if <ast.UnaryOp object at 0x7da1b0ff2fe0> begin[:]
return[None]
variable[stdout_logger] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[__name__] + constant[.stdout]]]]
variable[stderr_logger] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[__name__] + constant[.stderr]]]]
if compare[name[sys].stdout is name[stdout_logger]] begin[:]
name[sys].stdout assign[=] name[sys].stdout.stream
if compare[name[sys].stderr is name[stderr_logger]] begin[:]
name[sys].stderr assign[=] name[sys].stderr.stream
<ast.Tuple object at 0x7da1b0ff2080> assign[=] call[name[sys].exc_info, parameter[]]
if compare[name[exc_type] is_not constant[None]] begin[:]
call[name[sys].excepthook, parameter[name[exc_type], name[exc_value], name[exc_traceback]]]
<ast.Delete object at 0x7da1b0ff1de0>
<ast.Delete object at 0x7da1b0ff1cf0>
<ast.Delete object at 0x7da1b0ff1810>
if <ast.UnaryOp object at 0x7da1b0ff1900> begin[:]
call[name[sys].exc_clear, parameter[]]
<ast.Delete object at 0x7da1b0ff14b0>
call[name[logging].captureWarnings, parameter[constant[False]]]
variable[rawinput] assign[=] <ast.IfExp object at 0x7da1b0ff23b0>
if call[name[hasattr], parameter[name[builtins], constant[_original_raw_input]]] begin[:]
call[name[setattr], parameter[name[builtins], name[rawinput], name[builtins]._original_raw_input]]
<ast.Delete object at 0x7da1b0f45450>
variable[global_logging_started] assign[=] constant[False]
|
keyword[def] identifier[teardown_global_logging] ():
literal[string]
keyword[global] identifier[global_logging_started]
keyword[if] keyword[not] identifier[global_logging_started] :
keyword[return]
identifier[stdout_logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] + literal[string] )
identifier[stderr_logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] + literal[string] )
keyword[if] identifier[sys] . identifier[stdout] keyword[is] identifier[stdout_logger] :
identifier[sys] . identifier[stdout] = identifier[sys] . identifier[stdout] . identifier[stream]
keyword[if] identifier[sys] . identifier[stderr] keyword[is] identifier[stderr_logger] :
identifier[sys] . identifier[stderr] = identifier[sys] . identifier[stderr] . identifier[stream]
identifier[exc_type] , identifier[exc_value] , identifier[exc_traceback] = identifier[sys] . identifier[exc_info] ()
keyword[if] identifier[exc_type] keyword[is] keyword[not] keyword[None] :
identifier[sys] . identifier[excepthook] ( identifier[exc_type] , identifier[exc_value] , identifier[exc_traceback] )
keyword[del] identifier[exc_type]
keyword[del] identifier[exc_value]
keyword[del] identifier[exc_traceback]
keyword[if] keyword[not] identifier[PY3K] :
identifier[sys] . identifier[exc_clear] ()
keyword[del] identifier[sys] . identifier[excepthook]
identifier[logging] . identifier[captureWarnings] ( keyword[False] )
identifier[rawinput] = literal[string] keyword[if] identifier[PY3K] keyword[else] literal[string]
keyword[if] identifier[hasattr] ( identifier[builtins] , literal[string] ):
identifier[setattr] ( identifier[builtins] , identifier[rawinput] , identifier[builtins] . identifier[_original_raw_input] )
keyword[del] identifier[builtins] . identifier[_original_raw_input]
identifier[global_logging_started] = keyword[False]
|
def teardown_global_logging():
"""Disable global logging of stdio, warnings, and exceptions."""
global global_logging_started
if not global_logging_started:
return # depends on [control=['if'], data=[]]
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
if sys.stdout is stdout_logger:
sys.stdout = sys.stdout.stream # depends on [control=['if'], data=[]]
if sys.stderr is stderr_logger:
sys.stderr = sys.stderr.stream # depends on [control=['if'], data=[]]
# If we still have an unhandled exception go ahead and handle it with the
# replacement excepthook before deleting it
(exc_type, exc_value, exc_traceback) = sys.exc_info()
if exc_type is not None:
sys.excepthook(exc_type, exc_value, exc_traceback) # depends on [control=['if'], data=['exc_type']]
del exc_type
del exc_value
del exc_traceback
if not PY3K:
sys.exc_clear() # depends on [control=['if'], data=[]]
del sys.excepthook
logging.captureWarnings(False)
rawinput = 'input' if PY3K else 'raw_input'
if hasattr(builtins, '_original_raw_input'):
setattr(builtins, rawinput, builtins._original_raw_input)
del builtins._original_raw_input # depends on [control=['if'], data=[]]
global_logging_started = False
|
def get_command(all_pkg, hook):
"""
Collect the command-line interface names by querying ``hook`` in ``all_pkg``
Parameters
----------
all_pkg: list
list of package files
hook: str
A variable where the command is stored. ``__cli__`` by default.
Returns
-------
list
"""
ret = []
for r in all_pkg:
module = importlib.import_module(__name__ + '.' + r.lower())
ret.append(getattr(module, hook))
return ret
|
def function[get_command, parameter[all_pkg, hook]]:
constant[
Collect the command-line interface names by querying ``hook`` in ``all_pkg``
Parameters
----------
all_pkg: list
list of package files
hook: str
A variable where the command is stored. ``__cli__`` by default.
Returns
-------
list
]
variable[ret] assign[=] list[[]]
for taget[name[r]] in starred[name[all_pkg]] begin[:]
variable[module] assign[=] call[name[importlib].import_module, parameter[binary_operation[binary_operation[name[__name__] + constant[.]] + call[name[r].lower, parameter[]]]]]
call[name[ret].append, parameter[call[name[getattr], parameter[name[module], name[hook]]]]]
return[name[ret]]
|
keyword[def] identifier[get_command] ( identifier[all_pkg] , identifier[hook] ):
literal[string]
identifier[ret] =[]
keyword[for] identifier[r] keyword[in] identifier[all_pkg] :
identifier[module] = identifier[importlib] . identifier[import_module] ( identifier[__name__] + literal[string] + identifier[r] . identifier[lower] ())
identifier[ret] . identifier[append] ( identifier[getattr] ( identifier[module] , identifier[hook] ))
keyword[return] identifier[ret]
|
def get_command(all_pkg, hook):
"""
Collect the command-line interface names by querying ``hook`` in ``all_pkg``
Parameters
----------
all_pkg: list
list of package files
hook: str
A variable where the command is stored. ``__cli__`` by default.
Returns
-------
list
"""
ret = []
for r in all_pkg:
module = importlib.import_module(__name__ + '.' + r.lower())
ret.append(getattr(module, hook)) # depends on [control=['for'], data=['r']]
return ret
|
def tokenize_documents(docs):
"""Convert the imported documents to :py:class:'~estnltk.text.Text' instances."""
sep = '\n\n'
texts = []
for doc in docs:
text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[PARAGRAPHS]])
doc[TEXT] = text
del doc[PARAGRAPHS]
texts.append(Text(doc))
return texts
|
def function[tokenize_documents, parameter[docs]]:
constant[Convert the imported documents to :py:class:'~estnltk.text.Text' instances.]
variable[sep] assign[=] constant[
]
variable[texts] assign[=] list[[]]
for taget[name[doc]] in starred[name[docs]] begin[:]
variable[text] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da1b031dcc0>]]
call[name[doc]][name[TEXT]] assign[=] name[text]
<ast.Delete object at 0x7da1b031d060>
call[name[texts].append, parameter[call[name[Text], parameter[name[doc]]]]]
return[name[texts]]
|
keyword[def] identifier[tokenize_documents] ( identifier[docs] ):
literal[string]
identifier[sep] = literal[string]
identifier[texts] =[]
keyword[for] identifier[doc] keyword[in] identifier[docs] :
identifier[text] = literal[string] . identifier[join] ([ literal[string] . identifier[join] ( identifier[para] [ identifier[SENTENCES] ]) keyword[for] identifier[para] keyword[in] identifier[doc] [ identifier[PARAGRAPHS] ]])
identifier[doc] [ identifier[TEXT] ]= identifier[text]
keyword[del] identifier[doc] [ identifier[PARAGRAPHS] ]
identifier[texts] . identifier[append] ( identifier[Text] ( identifier[doc] ))
keyword[return] identifier[texts]
|
def tokenize_documents(docs):
"""Convert the imported documents to :py:class:'~estnltk.text.Text' instances."""
sep = '\n\n'
texts = []
for doc in docs:
text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[PARAGRAPHS]])
doc[TEXT] = text
del doc[PARAGRAPHS]
texts.append(Text(doc)) # depends on [control=['for'], data=['doc']]
return texts
|
def _fill_col_borders(self):
"""Add the first and last column to the data by extrapolation.
"""
first = True
last = True
if self.col_indices[0] == self.hcol_indices[0]:
first = False
if self.col_indices[-1] == self.hcol_indices[-1]:
last = False
for num, data in enumerate(self.tie_data):
self.tie_data[num] = self._extrapolate_cols(data, first, last)
if first and last:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]),
self.col_indices,
np.array([self.hcol_indices[-1]])))
elif first:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]),
self.col_indices))
elif last:
self.col_indices = np.concatenate((self.col_indices,
np.array([self.hcol_indices[-1]])))
|
def function[_fill_col_borders, parameter[self]]:
constant[Add the first and last column to the data by extrapolation.
]
variable[first] assign[=] constant[True]
variable[last] assign[=] constant[True]
if compare[call[name[self].col_indices][constant[0]] equal[==] call[name[self].hcol_indices][constant[0]]] begin[:]
variable[first] assign[=] constant[False]
if compare[call[name[self].col_indices][<ast.UnaryOp object at 0x7da1b1953df0>] equal[==] call[name[self].hcol_indices][<ast.UnaryOp object at 0x7da1b1951ea0>]] begin[:]
variable[last] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b19524d0>, <ast.Name object at 0x7da1b1952500>]]] in starred[call[name[enumerate], parameter[name[self].tie_data]]] begin[:]
call[name[self].tie_data][name[num]] assign[=] call[name[self]._extrapolate_cols, parameter[name[data], name[first], name[last]]]
if <ast.BoolOp object at 0x7da1b1950cd0> begin[:]
name[self].col_indices assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Call object at 0x7da1b1951630>, <ast.Attribute object at 0x7da1b1952440>, <ast.Call object at 0x7da1b1952380>]]]]
|
keyword[def] identifier[_fill_col_borders] ( identifier[self] ):
literal[string]
identifier[first] = keyword[True]
identifier[last] = keyword[True]
keyword[if] identifier[self] . identifier[col_indices] [ literal[int] ]== identifier[self] . identifier[hcol_indices] [ literal[int] ]:
identifier[first] = keyword[False]
keyword[if] identifier[self] . identifier[col_indices] [- literal[int] ]== identifier[self] . identifier[hcol_indices] [- literal[int] ]:
identifier[last] = keyword[False]
keyword[for] identifier[num] , identifier[data] keyword[in] identifier[enumerate] ( identifier[self] . identifier[tie_data] ):
identifier[self] . identifier[tie_data] [ identifier[num] ]= identifier[self] . identifier[_extrapolate_cols] ( identifier[data] , identifier[first] , identifier[last] )
keyword[if] identifier[first] keyword[and] identifier[last] :
identifier[self] . identifier[col_indices] = identifier[np] . identifier[concatenate] (( identifier[np] . identifier[array] ([ identifier[self] . identifier[hcol_indices] [ literal[int] ]]),
identifier[self] . identifier[col_indices] ,
identifier[np] . identifier[array] ([ identifier[self] . identifier[hcol_indices] [- literal[int] ]])))
keyword[elif] identifier[first] :
identifier[self] . identifier[col_indices] = identifier[np] . identifier[concatenate] (( identifier[np] . identifier[array] ([ identifier[self] . identifier[hcol_indices] [ literal[int] ]]),
identifier[self] . identifier[col_indices] ))
keyword[elif] identifier[last] :
identifier[self] . identifier[col_indices] = identifier[np] . identifier[concatenate] (( identifier[self] . identifier[col_indices] ,
identifier[np] . identifier[array] ([ identifier[self] . identifier[hcol_indices] [- literal[int] ]])))
|
def _fill_col_borders(self):
"""Add the first and last column to the data by extrapolation.
"""
first = True
last = True
if self.col_indices[0] == self.hcol_indices[0]:
first = False # depends on [control=['if'], data=[]]
if self.col_indices[-1] == self.hcol_indices[-1]:
last = False # depends on [control=['if'], data=[]]
for (num, data) in enumerate(self.tie_data):
self.tie_data[num] = self._extrapolate_cols(data, first, last) # depends on [control=['for'], data=[]]
if first and last:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices, np.array([self.hcol_indices[-1]]))) # depends on [control=['if'], data=[]]
elif first:
self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices)) # depends on [control=['if'], data=[]]
elif last:
self.col_indices = np.concatenate((self.col_indices, np.array([self.hcol_indices[-1]]))) # depends on [control=['if'], data=[]]
|
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name
|
def function[pypirc_temp, parameter[index_url]]:
constant[ Create a temporary pypirc file for interaction with twine ]
variable[pypirc_file] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
call[name[print], parameter[name[pypirc_file].name]]
with call[name[open], parameter[name[pypirc_file].name, constant[w]]] begin[:]
call[name[fh].write, parameter[call[name[PYPIRC_TEMPLATE].format, parameter[]]]]
return[name[pypirc_file].name]
|
keyword[def] identifier[pypirc_temp] ( identifier[index_url] ):
literal[string]
identifier[pypirc_file] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[suffix] = literal[string] , identifier[delete] = keyword[False] )
identifier[print] ( identifier[pypirc_file] . identifier[name] )
keyword[with] identifier[open] ( identifier[pypirc_file] . identifier[name] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[PYPIRC_TEMPLATE] . identifier[format] ( identifier[index_name] = identifier[PYPIRC_TEMP_INDEX_NAME] , identifier[index_url] = identifier[index_url] ))
keyword[return] identifier[pypirc_file] . identifier[name]
|
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url)) # depends on [control=['with'], data=['fh']]
return pypirc_file.name
|
def progress(status_code):
"""Translate PROGRESS status codes from GnuPG to messages."""
lookup = {
'pk_dsa': 'DSA key generation',
'pk_elg': 'Elgamal key generation',
'primegen': 'Prime generation',
'need_entropy': 'Waiting for new entropy in the RNG',
'tick': 'Generic tick without any special meaning - still working.',
'starting_agent': 'A gpg-agent was started.',
'learncard': 'gpg-agent or gpgsm is learning the smartcard data.',
'card_busy': 'A smartcard is still working.' }
for key, value in lookup.items():
if str(status_code) == key:
return value
|
def function[progress, parameter[status_code]]:
constant[Translate PROGRESS status codes from GnuPG to messages.]
variable[lookup] assign[=] dictionary[[<ast.Constant object at 0x7da20c6aae60>, <ast.Constant object at 0x7da20c6ab7c0>, <ast.Constant object at 0x7da20c6a8220>, <ast.Constant object at 0x7da20c6aa470>, <ast.Constant object at 0x7da20c6ab0a0>, <ast.Constant object at 0x7da20c6aae30>, <ast.Constant object at 0x7da20c6ab520>, <ast.Constant object at 0x7da20c6aa800>], [<ast.Constant object at 0x7da20c6a8f70>, <ast.Constant object at 0x7da20c6aa5f0>, <ast.Constant object at 0x7da20c6a8a90>, <ast.Constant object at 0x7da20c6a9660>, <ast.Constant object at 0x7da20c6a86d0>, <ast.Constant object at 0x7da20c6aa0e0>, <ast.Constant object at 0x7da20c6ab460>, <ast.Constant object at 0x7da20c6a8940>]]
for taget[tuple[[<ast.Name object at 0x7da20c6a8df0>, <ast.Name object at 0x7da20c6a9b40>]]] in starred[call[name[lookup].items, parameter[]]] begin[:]
if compare[call[name[str], parameter[name[status_code]]] equal[==] name[key]] begin[:]
return[name[value]]
|
keyword[def] identifier[progress] ( identifier[status_code] ):
literal[string]
identifier[lookup] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[lookup] . identifier[items] ():
keyword[if] identifier[str] ( identifier[status_code] )== identifier[key] :
keyword[return] identifier[value]
|
def progress(status_code):
"""Translate PROGRESS status codes from GnuPG to messages."""
lookup = {'pk_dsa': 'DSA key generation', 'pk_elg': 'Elgamal key generation', 'primegen': 'Prime generation', 'need_entropy': 'Waiting for new entropy in the RNG', 'tick': 'Generic tick without any special meaning - still working.', 'starting_agent': 'A gpg-agent was started.', 'learncard': 'gpg-agent or gpgsm is learning the smartcard data.', 'card_busy': 'A smartcard is still working.'}
for (key, value) in lookup.items():
if str(status_code) == key:
return value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def real(self, nested_scope=None):
"""Return the correspond floating point number."""
operation = self.children[0].operation()
expr = self.children[1].real(nested_scope)
return operation(expr)
|
def function[real, parameter[self, nested_scope]]:
constant[Return the correspond floating point number.]
variable[operation] assign[=] call[call[name[self].children][constant[0]].operation, parameter[]]
variable[expr] assign[=] call[call[name[self].children][constant[1]].real, parameter[name[nested_scope]]]
return[call[name[operation], parameter[name[expr]]]]
|
keyword[def] identifier[real] ( identifier[self] , identifier[nested_scope] = keyword[None] ):
literal[string]
identifier[operation] = identifier[self] . identifier[children] [ literal[int] ]. identifier[operation] ()
identifier[expr] = identifier[self] . identifier[children] [ literal[int] ]. identifier[real] ( identifier[nested_scope] )
keyword[return] identifier[operation] ( identifier[expr] )
|
def real(self, nested_scope=None):
"""Return the correspond floating point number."""
operation = self.children[0].operation()
expr = self.children[1].real(nested_scope)
return operation(expr)
|
def functions_to_table(mod, colwidth=[27, 48]):
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-'*colwidth[0] + '+' + '-'*colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0]-2) + 's} {2:1s} {3:' \
+ str(colwidth[1]-2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s
|
def function[functions_to_table, parameter[mod, colwidth]]:
constant[
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
]
variable[temp] assign[=] call[name[mod].__dir__, parameter[]]
variable[funcs] assign[=] <ast.ListComp object at 0x7da1b26af280>
call[name[funcs].sort, parameter[]]
variable[row] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[+] + binary_operation[constant[-] * call[name[colwidth]][constant[0]]]] + constant[+]] + binary_operation[constant[-] * call[name[colwidth]][constant[1]]]] + constant[+]]
variable[fmt] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[{0:1s} {1:] + call[name[str], parameter[binary_operation[call[name[colwidth]][constant[0]] - constant[2]]]]] + constant[s} {2:1s} {3:]] + call[name[str], parameter[binary_operation[call[name[colwidth]][constant[1]] - constant[2]]]]] + constant[s} {4:1s}]]
variable[lines] assign[=] list[[]]
call[name[lines].append, parameter[name[row]]]
call[name[lines].append, parameter[call[name[fmt].format, parameter[constant[|], constant[Method], constant[|], constant[Description], constant[|]]]]]
call[name[lines].append, parameter[call[name[row].replace, parameter[constant[-], constant[=]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b26af7f0>, <ast.Name object at 0x7da1b26ad7e0>]]] in starred[call[name[enumerate], parameter[name[funcs]]]] begin[:]
<ast.Try object at 0x7da1b26ae230>
variable[s] assign[=] call[constant[
].join, parameter[name[lines]]]
return[name[s]]
|
keyword[def] identifier[functions_to_table] ( identifier[mod] , identifier[colwidth] =[ literal[int] , literal[int] ]):
literal[string]
identifier[temp] = identifier[mod] . identifier[__dir__] ()
identifier[funcs] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[temp] keyword[if] keyword[not] identifier[i] [ literal[int] ]. identifier[startswith] ( literal[string] )]
identifier[funcs] . identifier[sort] ()
identifier[row] = literal[string] + literal[string] * identifier[colwidth] [ literal[int] ]+ literal[string] + literal[string] * identifier[colwidth] [ literal[int] ]+ literal[string]
identifier[fmt] = literal[string] + identifier[str] ( identifier[colwidth] [ literal[int] ]- literal[int] )+ literal[string] + identifier[str] ( identifier[colwidth] [ literal[int] ]- literal[int] )+ literal[string]
identifier[lines] =[]
identifier[lines] . identifier[append] ( identifier[row] )
identifier[lines] . identifier[append] ( identifier[fmt] . identifier[format] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ))
identifier[lines] . identifier[append] ( identifier[row] . identifier[replace] ( literal[string] , literal[string] ))
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[funcs] ):
keyword[try] :
identifier[s] = identifier[getattr] ( identifier[mod] , identifier[item] ). identifier[__doc__] . identifier[strip] ()
identifier[end] = identifier[s] . identifier[find] ( literal[string] )
keyword[if] identifier[end] > identifier[colwidth] [ literal[int] ]- literal[int] :
identifier[s] = identifier[s] [: identifier[colwidth] [ literal[int] ]- literal[int] ]+ literal[string]
identifier[lines] . identifier[append] ( identifier[fmt] . identifier[format] ( literal[string] , identifier[item] , literal[string] , identifier[s] [: identifier[end] ], literal[string] ))
identifier[lines] . identifier[append] ( identifier[row] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[s] = literal[string] . identifier[join] ( identifier[lines] )
keyword[return] identifier[s]
|
def functions_to_table(mod, colwidth=[27, 48]):
"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-' * colwidth[0] + '+' + '-' * colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0] - 2) + 's} {2:1s} {3:' + str(colwidth[1] - 2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for (i, item) in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...' # depends on [control=['if'], data=[]]
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
s = '\n'.join(lines)
return s
|
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
|
def function[resample, parameter[self, N]]:
constant[Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
]
variable[inds] assign[=] call[name[rand].randint, parameter[call[name[len], parameter[name[self].samples]]]]
return[call[name[self].samples][name[inds]]]
|
keyword[def] identifier[resample] ( identifier[self] , identifier[N] ):
literal[string]
identifier[inds] = identifier[rand] . identifier[randint] ( identifier[len] ( identifier[self] . identifier[samples] ), identifier[size] = identifier[N] )
keyword[return] identifier[self] . identifier[samples] [ identifier[inds] ]
|
def resample(self, N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples), size=N)
return self.samples[inds]
|
def get_embedded_object(self, signature_id):
''' Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
'''
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)
|
def function[get_embedded_object, parameter[self, signature_id]]:
constant[ Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
]
variable[request] assign[=] call[name[self]._get_request, parameter[]]
return[call[name[request].get, parameter[binary_operation[name[self].EMBEDDED_OBJECT_GET_URL + name[signature_id]]]]]
|
keyword[def] identifier[get_embedded_object] ( identifier[self] , identifier[signature_id] ):
literal[string]
identifier[request] = identifier[self] . identifier[_get_request] ()
keyword[return] identifier[request] . identifier[get] ( identifier[self] . identifier[EMBEDDED_OBJECT_GET_URL] + identifier[signature_id] )
|
def get_embedded_object(self, signature_id):
""" Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame.
Args:
signature_id (str): The id of the signature to get a signature url for
Returns:
An Embedded object
"""
request = self._get_request()
return request.get(self.EMBEDDED_OBJECT_GET_URL + signature_id)
|
def _get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
"""
if isinstance(mapper, (abc.Mapping, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
|
def function[_get_rename_function, parameter[mapper]]:
constant[
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
]
if call[name[isinstance], parameter[name[mapper], tuple[[<ast.Attribute object at 0x7da18dc99390>, <ast.Name object at 0x7da18dc9b340>]]]] begin[:]
def function[f, parameter[x]]:
if compare[name[x] in name[mapper]] begin[:]
return[call[name[mapper]][name[x]]]
return[name[f]]
|
keyword[def] identifier[_get_rename_function] ( identifier[mapper] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[mapper] ,( identifier[abc] . identifier[Mapping] , identifier[ABCSeries] )):
keyword[def] identifier[f] ( identifier[x] ):
keyword[if] identifier[x] keyword[in] identifier[mapper] :
keyword[return] identifier[mapper] [ identifier[x] ]
keyword[else] :
keyword[return] identifier[x]
keyword[else] :
identifier[f] = identifier[mapper]
keyword[return] identifier[f]
|
def _get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
"""
if isinstance(mapper, (abc.Mapping, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x] # depends on [control=['if'], data=['x', 'mapper']]
else:
return x # depends on [control=['if'], data=[]]
else:
f = mapper
return f
|
def _compute_acq_withGradients(self, x):
"""
Integrated Expected Improvement and its derivative
"""
means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x)
fmins = self.model.get_fmin()
f_acqu = None
df_acqu = None
for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs):
phi, Phi, u = get_quantiles(self.jitter, fmin, m, s)
f = Phi
df = -(phi/s)* (dmdx + dsdx * u)
if f_acqu is None:
f_acqu = f
df_acqu = df
else:
f_acqu += f
df_acqu += df
return f_acqu/(len(means)), df_acqu/(len(means))
|
def function[_compute_acq_withGradients, parameter[self, x]]:
constant[
Integrated Expected Improvement and its derivative
]
<ast.Tuple object at 0x7da1b2346830> assign[=] call[name[self].model.predict_withGradients, parameter[name[x]]]
variable[fmins] assign[=] call[name[self].model.get_fmin, parameter[]]
variable[f_acqu] assign[=] constant[None]
variable[df_acqu] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b2346fe0>, <ast.Name object at 0x7da1b2345510>, <ast.Name object at 0x7da1b23466e0>, <ast.Name object at 0x7da1b23475e0>, <ast.Name object at 0x7da1b2344ca0>]]] in starred[call[name[zip], parameter[name[means], name[stds], name[fmins], name[dmdxs], name[dsdxs]]]] begin[:]
<ast.Tuple object at 0x7da1b2346350> assign[=] call[name[get_quantiles], parameter[name[self].jitter, name[fmin], name[m], name[s]]]
variable[f] assign[=] name[Phi]
variable[df] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b2344370> * binary_operation[name[dmdx] + binary_operation[name[dsdx] * name[u]]]]
if compare[name[f_acqu] is constant[None]] begin[:]
variable[f_acqu] assign[=] name[f]
variable[df_acqu] assign[=] name[df]
return[tuple[[<ast.BinOp object at 0x7da1b23456f0>, <ast.BinOp object at 0x7da1b2346320>]]]
|
keyword[def] identifier[_compute_acq_withGradients] ( identifier[self] , identifier[x] ):
literal[string]
identifier[means] , identifier[stds] , identifier[dmdxs] , identifier[dsdxs] = identifier[self] . identifier[model] . identifier[predict_withGradients] ( identifier[x] )
identifier[fmins] = identifier[self] . identifier[model] . identifier[get_fmin] ()
identifier[f_acqu] = keyword[None]
identifier[df_acqu] = keyword[None]
keyword[for] identifier[m] , identifier[s] , identifier[fmin] , identifier[dmdx] , identifier[dsdx] keyword[in] identifier[zip] ( identifier[means] , identifier[stds] , identifier[fmins] , identifier[dmdxs] , identifier[dsdxs] ):
identifier[phi] , identifier[Phi] , identifier[u] = identifier[get_quantiles] ( identifier[self] . identifier[jitter] , identifier[fmin] , identifier[m] , identifier[s] )
identifier[f] = identifier[Phi]
identifier[df] =-( identifier[phi] / identifier[s] )*( identifier[dmdx] + identifier[dsdx] * identifier[u] )
keyword[if] identifier[f_acqu] keyword[is] keyword[None] :
identifier[f_acqu] = identifier[f]
identifier[df_acqu] = identifier[df]
keyword[else] :
identifier[f_acqu] += identifier[f]
identifier[df_acqu] += identifier[df]
keyword[return] identifier[f_acqu] /( identifier[len] ( identifier[means] )), identifier[df_acqu] /( identifier[len] ( identifier[means] ))
|
def _compute_acq_withGradients(self, x):
"""
Integrated Expected Improvement and its derivative
"""
(means, stds, dmdxs, dsdxs) = self.model.predict_withGradients(x)
fmins = self.model.get_fmin()
f_acqu = None
df_acqu = None
for (m, s, fmin, dmdx, dsdx) in zip(means, stds, fmins, dmdxs, dsdxs):
(phi, Phi, u) = get_quantiles(self.jitter, fmin, m, s)
f = Phi
df = -(phi / s) * (dmdx + dsdx * u)
if f_acqu is None:
f_acqu = f
df_acqu = df # depends on [control=['if'], data=['f_acqu']]
else:
f_acqu += f
df_acqu += df # depends on [control=['for'], data=[]]
return (f_acqu / len(means), df_acqu / len(means))
|
def make_gdf_graph(filename, stop=True):
"""Create a graph in simple GDF format, suitable for feeding into Gephi,
or some other graph manipulation and display tool. Setting stop to True
will stop the current trace.
"""
if stop:
stop_trace()
try:
f = open(filename, 'w')
f.write(get_gdf())
finally:
if f: f.close()
|
def function[make_gdf_graph, parameter[filename, stop]]:
constant[Create a graph in simple GDF format, suitable for feeding into Gephi,
or some other graph manipulation and display tool. Setting stop to True
will stop the current trace.
]
if name[stop] begin[:]
call[name[stop_trace], parameter[]]
<ast.Try object at 0x7da18bccaa10>
|
keyword[def] identifier[make_gdf_graph] ( identifier[filename] , identifier[stop] = keyword[True] ):
literal[string]
keyword[if] identifier[stop] :
identifier[stop_trace] ()
keyword[try] :
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
identifier[f] . identifier[write] ( identifier[get_gdf] ())
keyword[finally] :
keyword[if] identifier[f] : identifier[f] . identifier[close] ()
|
def make_gdf_graph(filename, stop=True):
"""Create a graph in simple GDF format, suitable for feeding into Gephi,
or some other graph manipulation and display tool. Setting stop to True
will stop the current trace.
"""
if stop:
stop_trace() # depends on [control=['if'], data=[]]
try:
f = open(filename, 'w')
f.write(get_gdf()) # depends on [control=['try'], data=[]]
finally:
if f:
f.close() # depends on [control=['if'], data=[]]
|
def update(self, default_activity_sid=values.unset,
event_callback_url=values.unset, events_filter=values.unset,
friendly_name=values.unset, multi_task_enabled=values.unset,
timeout_activity_sid=values.unset,
prioritize_queue_order=values.unset):
"""
Update the WorkspaceInstance
:param unicode default_activity_sid: The ID of the Activity that will be used when new Workers are created in this Workspace.
:param unicode event_callback_url: The Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param unicode friendly_name: Human readable description of this workspace
:param bool multi_task_enabled: Enable or Disable Multitasking by passing either true or False with the POST request.
:param unicode timeout_activity_sid: The ID of the Activity that will be assigned to a Worker when a Task reservation times out without a response.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Updated WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
"""
return self._proxy.update(
default_activity_sid=default_activity_sid,
event_callback_url=event_callback_url,
events_filter=events_filter,
friendly_name=friendly_name,
multi_task_enabled=multi_task_enabled,
timeout_activity_sid=timeout_activity_sid,
prioritize_queue_order=prioritize_queue_order,
)
|
def function[update, parameter[self, default_activity_sid, event_callback_url, events_filter, friendly_name, multi_task_enabled, timeout_activity_sid, prioritize_queue_order]]:
constant[
Update the WorkspaceInstance
:param unicode default_activity_sid: The ID of the Activity that will be used when new Workers are created in this Workspace.
:param unicode event_callback_url: The Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param unicode friendly_name: Human readable description of this workspace
:param bool multi_task_enabled: Enable or Disable Multitasking by passing either true or False with the POST request.
:param unicode timeout_activity_sid: The ID of the Activity that will be assigned to a Worker when a Task reservation times out without a response.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Updated WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
]
return[call[name[self]._proxy.update, parameter[]]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[default_activity_sid] = identifier[values] . identifier[unset] ,
identifier[event_callback_url] = identifier[values] . identifier[unset] , identifier[events_filter] = identifier[values] . identifier[unset] ,
identifier[friendly_name] = identifier[values] . identifier[unset] , identifier[multi_task_enabled] = identifier[values] . identifier[unset] ,
identifier[timeout_activity_sid] = identifier[values] . identifier[unset] ,
identifier[prioritize_queue_order] = identifier[values] . identifier[unset] ):
literal[string]
keyword[return] identifier[self] . identifier[_proxy] . identifier[update] (
identifier[default_activity_sid] = identifier[default_activity_sid] ,
identifier[event_callback_url] = identifier[event_callback_url] ,
identifier[events_filter] = identifier[events_filter] ,
identifier[friendly_name] = identifier[friendly_name] ,
identifier[multi_task_enabled] = identifier[multi_task_enabled] ,
identifier[timeout_activity_sid] = identifier[timeout_activity_sid] ,
identifier[prioritize_queue_order] = identifier[prioritize_queue_order] ,
)
|
def update(self, default_activity_sid=values.unset, event_callback_url=values.unset, events_filter=values.unset, friendly_name=values.unset, multi_task_enabled=values.unset, timeout_activity_sid=values.unset, prioritize_queue_order=values.unset):
"""
Update the WorkspaceInstance
:param unicode default_activity_sid: The ID of the Activity that will be used when new Workers are created in this Workspace.
:param unicode event_callback_url: The Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param unicode friendly_name: Human readable description of this workspace
:param bool multi_task_enabled: Enable or Disable Multitasking by passing either true or False with the POST request.
:param unicode timeout_activity_sid: The ID of the Activity that will be assigned to a Worker when a Task reservation times out without a response.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Updated WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
"""
return self._proxy.update(default_activity_sid=default_activity_sid, event_callback_url=event_callback_url, events_filter=events_filter, friendly_name=friendly_name, multi_task_enabled=multi_task_enabled, timeout_activity_sid=timeout_activity_sid, prioritize_queue_order=prioritize_queue_order)
|
def cover_update(self, album, photo, **kwds):
"""
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of an album.
Returns the updated album object.
"""
result = self._client.post("/album/%s/cover/%s/update.json" %
(self._extract_id(album),
self._extract_id(photo)),
**kwds)["result"]
# API currently doesn't return the updated album
# (frontend issue #1369)
if isinstance(result, bool): # pragma: no cover
result = self._client.get("/album/%s/view.json" %
self._extract_id(album))["result"]
return Album(self._client, result)
|
def function[cover_update, parameter[self, album, photo]]:
constant[
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of an album.
Returns the updated album object.
]
variable[result] assign[=] call[call[name[self]._client.post, parameter[binary_operation[constant[/album/%s/cover/%s/update.json] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f02260>, <ast.Call object at 0x7da207f01de0>]]]]]][constant[result]]
if call[name[isinstance], parameter[name[result], name[bool]]] begin[:]
variable[result] assign[=] call[call[name[self]._client.get, parameter[binary_operation[constant[/album/%s/view.json] <ast.Mod object at 0x7da2590d6920> call[name[self]._extract_id, parameter[name[album]]]]]]][constant[result]]
return[call[name[Album], parameter[name[self]._client, name[result]]]]
|
keyword[def] identifier[cover_update] ( identifier[self] , identifier[album] , identifier[photo] ,** identifier[kwds] ):
literal[string]
identifier[result] = identifier[self] . identifier[_client] . identifier[post] ( literal[string] %
( identifier[self] . identifier[_extract_id] ( identifier[album] ),
identifier[self] . identifier[_extract_id] ( identifier[photo] )),
** identifier[kwds] )[ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[result] , identifier[bool] ):
identifier[result] = identifier[self] . identifier[_client] . identifier[get] ( literal[string] %
identifier[self] . identifier[_extract_id] ( identifier[album] ))[ literal[string] ]
keyword[return] identifier[Album] ( identifier[self] . identifier[_client] , identifier[result] )
|
def cover_update(self, album, photo, **kwds):
"""
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of an album.
Returns the updated album object.
"""
result = self._client.post('/album/%s/cover/%s/update.json' % (self._extract_id(album), self._extract_id(photo)), **kwds)['result']
# API currently doesn't return the updated album
# (frontend issue #1369)
if isinstance(result, bool): # pragma: no cover
result = self._client.get('/album/%s/view.json' % self._extract_id(album))['result'] # depends on [control=['if'], data=[]]
return Album(self._client, result)
|
def enable(self, synchronous=True, **kwargs):
"""Enables the RedHat Repository
RedHat Repos needs to be enabled first, so that we can sync it.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('enable'), **kwargs)
return _handle_response(response, self._server_config, synchronous)
|
def function[enable, parameter[self, synchronous]]:
constant[Enables the RedHat Repository
RedHat Repos needs to be enabled first, so that we can sync it.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
]
if compare[constant[data] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[data]] assign[=] call[name[dict], parameter[]]
call[call[name[kwargs]][constant[data]]][constant[product_id]] assign[=] name[self].product.id
variable[kwargs] assign[=] call[name[kwargs].copy, parameter[]]
call[name[kwargs].update, parameter[call[name[self]._server_config.get_client_kwargs, parameter[]]]]
variable[response] assign[=] call[name[client].put, parameter[call[name[self].path, parameter[constant[enable]]]]]
return[call[name[_handle_response], parameter[name[response], name[self]._server_config, name[synchronous]]]]
|
keyword[def] identifier[enable] ( identifier[self] , identifier[synchronous] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[dict] ()
identifier[kwargs] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[product] . identifier[id]
identifier[kwargs] = identifier[kwargs] . identifier[copy] ()
identifier[kwargs] . identifier[update] ( identifier[self] . identifier[_server_config] . identifier[get_client_kwargs] ())
identifier[response] = identifier[client] . identifier[put] ( identifier[self] . identifier[path] ( literal[string] ),** identifier[kwargs] )
keyword[return] identifier[_handle_response] ( identifier[response] , identifier[self] . identifier[_server_config] , identifier[synchronous] )
|
def enable(self, synchronous=True, **kwargs):
"""Enables the RedHat Repository
RedHat Repos needs to be enabled first, so that we can sync it.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id # depends on [control=['if'], data=['kwargs']]
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('enable'), **kwargs)
return _handle_response(response, self._server_config, synchronous)
|
def to_idf(self, buffer_or_path=None, dump_external_files=True):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
"""
# prepare comment
comment = get_multi_line_copyright_message()
if self._comment != "":
comment += textwrap.indent(self._comment, "! ", lambda line: True)
comment += "\n\n"
# prepare external files dir path if file path
if isinstance(buffer_or_path, str):
dir_path, file_name = os.path.split(buffer_or_path)
model_name, _ = os.path.splitext(file_name)
else:
model_name, dir_path = None, os.path.curdir
# dump files if asked
if dump_external_files:
self.dump_external_files(
target_dir_path=os.path.join(dir_path, get_external_files_dir_name(model_name=model_name))
)
# prepare body
formatted_records = []
for table_ref, table in self._tables.items(): # self._tables is already sorted
formatted_records.extend([r.to_idf(model_name=model_name) for r in sorted(table)])
body = "\n\n".join(formatted_records)
# return
content = comment + body
return multi_mode_write(
lambda f: f.write(content),
lambda: content,
buffer_or_path
)
|
def function[to_idf, parameter[self, buffer_or_path, dump_external_files]]:
constant[
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
]
variable[comment] assign[=] call[name[get_multi_line_copyright_message], parameter[]]
if compare[name[self]._comment not_equal[!=] constant[]] begin[:]
<ast.AugAssign object at 0x7da20c795030>
<ast.AugAssign object at 0x7da20c794af0>
if call[name[isinstance], parameter[name[buffer_or_path], name[str]]] begin[:]
<ast.Tuple object at 0x7da20c795660> assign[=] call[name[os].path.split, parameter[name[buffer_or_path]]]
<ast.Tuple object at 0x7da20c794b50> assign[=] call[name[os].path.splitext, parameter[name[file_name]]]
if name[dump_external_files] begin[:]
call[name[self].dump_external_files, parameter[]]
variable[formatted_records] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b02a4940>, <ast.Name object at 0x7da1b02a4370>]]] in starred[call[name[self]._tables.items, parameter[]]] begin[:]
call[name[formatted_records].extend, parameter[<ast.ListComp object at 0x7da1b02a61a0>]]
variable[body] assign[=] call[constant[
].join, parameter[name[formatted_records]]]
variable[content] assign[=] binary_operation[name[comment] + name[body]]
return[call[name[multi_mode_write], parameter[<ast.Lambda object at 0x7da1b02a60e0>, <ast.Lambda object at 0x7da1b02a5c90>, name[buffer_or_path]]]]
|
keyword[def] identifier[to_idf] ( identifier[self] , identifier[buffer_or_path] = keyword[None] , identifier[dump_external_files] = keyword[True] ):
literal[string]
identifier[comment] = identifier[get_multi_line_copyright_message] ()
keyword[if] identifier[self] . identifier[_comment] != literal[string] :
identifier[comment] += identifier[textwrap] . identifier[indent] ( identifier[self] . identifier[_comment] , literal[string] , keyword[lambda] identifier[line] : keyword[True] )
identifier[comment] += literal[string]
keyword[if] identifier[isinstance] ( identifier[buffer_or_path] , identifier[str] ):
identifier[dir_path] , identifier[file_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[buffer_or_path] )
identifier[model_name] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_name] )
keyword[else] :
identifier[model_name] , identifier[dir_path] = keyword[None] , identifier[os] . identifier[path] . identifier[curdir]
keyword[if] identifier[dump_external_files] :
identifier[self] . identifier[dump_external_files] (
identifier[target_dir_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir_path] , identifier[get_external_files_dir_name] ( identifier[model_name] = identifier[model_name] ))
)
identifier[formatted_records] =[]
keyword[for] identifier[table_ref] , identifier[table] keyword[in] identifier[self] . identifier[_tables] . identifier[items] ():
identifier[formatted_records] . identifier[extend] ([ identifier[r] . identifier[to_idf] ( identifier[model_name] = identifier[model_name] ) keyword[for] identifier[r] keyword[in] identifier[sorted] ( identifier[table] )])
identifier[body] = literal[string] . identifier[join] ( identifier[formatted_records] )
identifier[content] = identifier[comment] + identifier[body]
keyword[return] identifier[multi_mode_write] (
keyword[lambda] identifier[f] : identifier[f] . identifier[write] ( identifier[content] ),
keyword[lambda] : identifier[content] ,
identifier[buffer_or_path]
)
|
def to_idf(self, buffer_or_path=None, dump_external_files=True):
"""
Parameters
----------
buffer_or_path: buffer or path, default None
output to write into. If None, will return a json string.
dump_external_files: boolean, default True
if True, external files will be dumped in external files directory
Returns
-------
None, or an idf string (if buffer_or_path is None).
"""
# prepare comment
comment = get_multi_line_copyright_message()
if self._comment != '':
comment += textwrap.indent(self._comment, '! ', lambda line: True) # depends on [control=['if'], data=[]]
comment += '\n\n'
# prepare external files dir path if file path
if isinstance(buffer_or_path, str):
(dir_path, file_name) = os.path.split(buffer_or_path)
(model_name, _) = os.path.splitext(file_name) # depends on [control=['if'], data=[]]
else:
(model_name, dir_path) = (None, os.path.curdir)
# dump files if asked
if dump_external_files:
self.dump_external_files(target_dir_path=os.path.join(dir_path, get_external_files_dir_name(model_name=model_name))) # depends on [control=['if'], data=[]]
# prepare body
formatted_records = []
for (table_ref, table) in self._tables.items(): # self._tables is already sorted
formatted_records.extend([r.to_idf(model_name=model_name) for r in sorted(table)]) # depends on [control=['for'], data=[]]
body = '\n\n'.join(formatted_records)
# return
content = comment + body
return multi_mode_write(lambda f: f.write(content), lambda : content, buffer_or_path)
|
def delete_idx_status(self, rdf_class):
"""
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
"""
sparql_template = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?rdftypes {{\n\t\t{} }} .
?s a ?rdftypes .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
FILTER(bound(?esTime)||bound(?esError))
}}
"""
rdf_types = [rdf_class.uri] + [item.uri
for item in rdf_class.subclasses]
sparql = sparql_template.format("\n\t\t".join(rdf_types))
log.warn("Deleting index status for %s", rdf_class.uri)
return self.tstore_conn.update_query(sparql)
|
def function[delete_idx_status, parameter[self, rdf_class]]:
constant[
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
]
variable[sparql_template] assign[=] constant[
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?rdftypes {{
{} }} .
?s a ?rdftypes .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
FILTER(bound(?esTime)||bound(?esError))
}}
]
variable[rdf_types] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b1522e90>]] + <ast.ListComp object at 0x7da1b1522ef0>]
variable[sparql] assign[=] call[name[sparql_template].format, parameter[call[constant[
].join, parameter[name[rdf_types]]]]]
call[name[log].warn, parameter[constant[Deleting index status for %s], name[rdf_class].uri]]
return[call[name[self].tstore_conn.update_query, parameter[name[sparql]]]]
|
keyword[def] identifier[delete_idx_status] ( identifier[self] , identifier[rdf_class] ):
literal[string]
identifier[sparql_template] = literal[string]
identifier[rdf_types] =[ identifier[rdf_class] . identifier[uri] ]+[ identifier[item] . identifier[uri]
keyword[for] identifier[item] keyword[in] identifier[rdf_class] . identifier[subclasses] ]
identifier[sparql] = identifier[sparql_template] . identifier[format] ( literal[string] . identifier[join] ( identifier[rdf_types] ))
identifier[log] . identifier[warn] ( literal[string] , identifier[rdf_class] . identifier[uri] )
keyword[return] identifier[self] . identifier[tstore_conn] . identifier[update_query] ( identifier[sparql] )
|
def delete_idx_status(self, rdf_class):
"""
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
"""
sparql_template = '\n DELETE\n {{\n ?s kds:esIndexTime ?esTime .\n ?s kds:esIndexError ?esError .\n }}\n WHERE\n {{\n\n VALUES ?rdftypes {{\n\t\t{} }} .\n ?s a ?rdftypes .\n OPTIONAL {{\n ?s kds:esIndexTime ?esTime\n }}\n OPTIONAL {{\n ?s kds:esIndexError ?esError\n }}\n FILTER(bound(?esTime)||bound(?esError))\n }}\n '
rdf_types = [rdf_class.uri] + [item.uri for item in rdf_class.subclasses]
sparql = sparql_template.format('\n\t\t'.join(rdf_types))
log.warn('Deleting index status for %s', rdf_class.uri)
return self.tstore_conn.update_query(sparql)
|
def reboot(name, provision=False):
'''
Reboot a VM. (vagrant reload)
CLI Example:
.. code-block:: bash
salt <host> vagrant.reboot <salt_id> provision=True
:param name: The salt_id name you will use to control this VM
:param provision: (False) also re-run the Vagrant provisioning scripts.
'''
vm_ = get_vm_info(name)
machine = vm_['machine']
prov = '--provision' if provision else ''
cmd = 'vagrant reload {} {}'.format(machine, prov)
ret = __salt__['cmd.retcode'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'))
return ret == 0
|
def function[reboot, parameter[name, provision]]:
constant[
Reboot a VM. (vagrant reload)
CLI Example:
.. code-block:: bash
salt <host> vagrant.reboot <salt_id> provision=True
:param name: The salt_id name you will use to control this VM
:param provision: (False) also re-run the Vagrant provisioning scripts.
]
variable[vm_] assign[=] call[name[get_vm_info], parameter[name[name]]]
variable[machine] assign[=] call[name[vm_]][constant[machine]]
variable[prov] assign[=] <ast.IfExp object at 0x7da1b2047940>
variable[cmd] assign[=] call[constant[vagrant reload {} {}].format, parameter[name[machine], name[prov]]]
variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[cmd]]]
return[compare[name[ret] equal[==] constant[0]]]
|
keyword[def] identifier[reboot] ( identifier[name] , identifier[provision] = keyword[False] ):
literal[string]
identifier[vm_] = identifier[get_vm_info] ( identifier[name] )
identifier[machine] = identifier[vm_] [ literal[string] ]
identifier[prov] = literal[string] keyword[if] identifier[provision] keyword[else] literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[machine] , identifier[prov] )
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] ,
identifier[runas] = identifier[vm_] . identifier[get] ( literal[string] ),
identifier[cwd] = identifier[vm_] . identifier[get] ( literal[string] ))
keyword[return] identifier[ret] == literal[int]
|
def reboot(name, provision=False):
"""
Reboot a VM. (vagrant reload)
CLI Example:
.. code-block:: bash
salt <host> vagrant.reboot <salt_id> provision=True
:param name: The salt_id name you will use to control this VM
:param provision: (False) also re-run the Vagrant provisioning scripts.
"""
vm_ = get_vm_info(name)
machine = vm_['machine']
prov = '--provision' if provision else ''
cmd = 'vagrant reload {} {}'.format(machine, prov)
ret = __salt__['cmd.retcode'](cmd, runas=vm_.get('runas'), cwd=vm_.get('cwd'))
return ret == 0
|
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
|
def function[isoformat, parameter[self, sep]]:
constant[Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
]
variable[s] assign[=] binary_operation[binary_operation[constant[%04d-%02d-%02d%c] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c7ca530>, <ast.Attribute object at 0x7da20c7c8d90>, <ast.Attribute object at 0x7da20c7c80a0>, <ast.Name object at 0x7da20c7c9720>]]] + call[name[_format_time], parameter[name[self]._hour, name[self]._minute, name[self]._second, name[self]._microsecond]]]
variable[off] assign[=] call[name[self].utcoffset, parameter[]]
if compare[name[off] is_not constant[None]] begin[:]
if compare[name[off].days less[<] constant[0]] begin[:]
variable[sign] assign[=] constant[-]
variable[off] assign[=] <ast.UnaryOp object at 0x7da18f58e260>
<ast.Tuple object at 0x7da18f58e020> assign[=] call[name[divmod], parameter[name[off], call[name[timedelta], parameter[]]]]
assert[<ast.UnaryOp object at 0x7da18f58c7f0>]
<ast.AugAssign object at 0x7da18f58c5e0>
<ast.AugAssign object at 0x7da20c7cbfd0>
return[name[s]]
|
keyword[def] identifier[isoformat] ( identifier[self] , identifier[sep] = literal[string] ):
literal[string]
identifier[s] =( literal[string] %( identifier[self] . identifier[_year] , identifier[self] . identifier[_month] , identifier[self] . identifier[_day] ,
identifier[sep] )+
identifier[_format_time] ( identifier[self] . identifier[_hour] , identifier[self] . identifier[_minute] , identifier[self] . identifier[_second] ,
identifier[self] . identifier[_microsecond] ))
identifier[off] = identifier[self] . identifier[utcoffset] ()
keyword[if] identifier[off] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[off] . identifier[days] < literal[int] :
identifier[sign] = literal[string]
identifier[off] =- identifier[off]
keyword[else] :
identifier[sign] = literal[string]
identifier[hh] , identifier[mm] = identifier[divmod] ( identifier[off] , identifier[timedelta] ( identifier[hours] = literal[int] ))
keyword[assert] keyword[not] identifier[mm] % identifier[timedelta] ( identifier[minutes] = literal[int] ), literal[string]
identifier[mm] //= identifier[timedelta] ( identifier[minutes] = literal[int] )
identifier[s] += literal[string] %( identifier[sign] , identifier[hh] , identifier[mm] )
keyword[return] identifier[s]
|
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = '%04d-%02d-%02d%c' % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond)
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = '-'
off = -off # depends on [control=['if'], data=[]]
else:
sign = '+'
(hh, mm) = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), 'whole minute'
mm //= timedelta(minutes=1)
s += '%s%02d:%02d' % (sign, hh, mm) # depends on [control=['if'], data=['off']]
return s
|
def array(self):
"""
return the underlying numpy array
"""
return np.logspace(self.start, self.stop, self.num, self.endpoint, self.base)
|
def function[array, parameter[self]]:
constant[
return the underlying numpy array
]
return[call[name[np].logspace, parameter[name[self].start, name[self].stop, name[self].num, name[self].endpoint, name[self].base]]]
|
keyword[def] identifier[array] ( identifier[self] ):
literal[string]
keyword[return] identifier[np] . identifier[logspace] ( identifier[self] . identifier[start] , identifier[self] . identifier[stop] , identifier[self] . identifier[num] , identifier[self] . identifier[endpoint] , identifier[self] . identifier[base] )
|
def array(self):
"""
return the underlying numpy array
"""
return np.logspace(self.start, self.stop, self.num, self.endpoint, self.base)
|
def configure(self, mount_point, mfa_type='duo', force=False):
"""Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify
the provided mount_point is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response
"""
if mfa_type != 'duo' and not force:
# The situation described via this exception is not likely to change in the future.
# However we provided that flexibility here just in case.
error_msg = 'Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"'
raise exceptions.ParamValidationError(error_msg.format(
mfa_types=','.join(SUPPORTED_MFA_TYPES),
arg=mfa_type,
))
params = {
'type': mfa_type,
}
api_path = '/v1/auth/{mount_point}/mfa_config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
)
|
def function[configure, parameter[self, mount_point, mfa_type, force]]:
constant[Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify
the provided mount_point is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response
]
if <ast.BoolOp object at 0x7da20c992020> begin[:]
variable[error_msg] assign[=] constant[Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"]
<ast.Raise object at 0x7da18ede69e0>
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f811540>], [<ast.Name object at 0x7da18f811c90>]]
variable[api_path] assign[=] call[constant[/v1/auth/{mount_point}/mfa_config].format, parameter[]]
return[call[name[self]._adapter.post, parameter[]]]
|
keyword[def] identifier[configure] ( identifier[self] , identifier[mount_point] , identifier[mfa_type] = literal[string] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[mfa_type] != literal[string] keyword[and] keyword[not] identifier[force] :
identifier[error_msg] = literal[string]
keyword[raise] identifier[exceptions] . identifier[ParamValidationError] ( identifier[error_msg] . identifier[format] (
identifier[mfa_types] = literal[string] . identifier[join] ( identifier[SUPPORTED_MFA_TYPES] ),
identifier[arg] = identifier[mfa_type] ,
))
identifier[params] ={
literal[string] : identifier[mfa_type] ,
}
identifier[api_path] = literal[string] . identifier[format] (
identifier[mount_point] = identifier[mount_point]
)
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] (
identifier[url] = identifier[api_path] ,
identifier[json] = identifier[params] ,
)
|
def configure(self, mount_point, mfa_type='duo', force=False):
"""Configure MFA for a supported method.
This endpoint allows you to turn on multi-factor authentication with a given backend.
Currently only Duo is supported.
Supported methods:
POST: /auth/{mount_point}/mfa_config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:param mfa_type: Enables MFA with given backend (available: duo)
:type mfa_type: str | unicode
:param force: If True, make the "mfa_config" request regardless of circumstance. If False (the default), verify
the provided mount_point is available and one of the types of methods supported by this feature.
:type force: bool
:return: The response of the configure MFA request.
:rtype: requests.Response
"""
if mfa_type != 'duo' and (not force):
# The situation described via this exception is not likely to change in the future.
# However we provided that flexibility here just in case.
error_msg = 'Unsupported mfa_type argument provided "{arg}", supported types: "{mfa_types}"'
raise exceptions.ParamValidationError(error_msg.format(mfa_types=','.join(SUPPORTED_MFA_TYPES), arg=mfa_type)) # depends on [control=['if'], data=[]]
params = {'type': mfa_type}
api_path = '/v1/auth/{mount_point}/mfa_config'.format(mount_point=mount_point)
return self._adapter.post(url=api_path, json=params)
|
def explain_template_loading_attempts(app, template, attempts):
"""
This should help developers understand what failed. Mostly the same as
:func:`flask.debughelpers.explain_template_loading_attempts`, except here we've
extended it to support showing what :class:`UnchainedJinjaLoader` is doing.
"""
from flask import Flask, Blueprint
from flask.debughelpers import _dump_loader_info
from flask.globals import _request_ctx_stack
template, expected_priors = parse_template(template)
info = [f'Locating {pretty_num(expected_priors + 1)} template "{template}":']
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name,
srcobj.import_name)
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (
idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line)
if triple is None:
detail = 'no match'
else:
if total_found < expected_priors:
action = 'skipping'
elif total_found == expected_priors:
action = 'using'
else:
action = 'ignoring'
detail = '%s (%r)' % (action, triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail)
seems_fishy = False
if total_found < expected_priors:
info.append('Error: the template could not be found.')
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that '
'belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
app.logger.info('\n'.join(info))
|
def function[explain_template_loading_attempts, parameter[app, template, attempts]]:
constant[
This should help developers understand what failed. Mostly the same as
:func:`flask.debughelpers.explain_template_loading_attempts`, except here we've
extended it to support showing what :class:`UnchainedJinjaLoader` is doing.
]
from relative_module[flask] import module[Flask], module[Blueprint]
from relative_module[flask.debughelpers] import module[_dump_loader_info]
from relative_module[flask.globals] import module[_request_ctx_stack]
<ast.Tuple object at 0x7da20c6c7a60> assign[=] call[name[parse_template], parameter[name[template]]]
variable[info] assign[=] list[[<ast.JoinedStr object at 0x7da20c6c46a0>]]
variable[total_found] assign[=] constant[0]
variable[blueprint] assign[=] constant[None]
variable[reqctx] assign[=] name[_request_ctx_stack].top
if <ast.BoolOp object at 0x7da20c6c7310> begin[:]
variable[blueprint] assign[=] name[reqctx].request.blueprint
for taget[tuple[[<ast.Name object at 0x7da20c6c6cb0>, <ast.Tuple object at 0x7da20c6c48b0>]]] in starred[call[name[enumerate], parameter[name[attempts]]]] begin[:]
if call[name[isinstance], parameter[name[srcobj], name[Flask]]] begin[:]
variable[src_info] assign[=] binary_operation[constant[application "%s"] <ast.Mod object at 0x7da2590d6920> name[srcobj].import_name]
call[name[info].append, parameter[binary_operation[constant[% 5d: trying loader of %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da20c6c5480>, <ast.Name object at 0x7da20c6c6dd0>]]]]]
for taget[name[line]] in starred[call[name[_dump_loader_info], parameter[name[loader]]]] begin[:]
call[name[info].append, parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[line]]]]
if compare[name[triple] is constant[None]] begin[:]
variable[detail] assign[=] constant[no match]
call[name[info].append, parameter[binary_operation[constant[ -> %s] <ast.Mod object at 0x7da2590d6920> name[detail]]]]
variable[seems_fishy] assign[=] constant[False]
if compare[name[total_found] less[<] name[expected_priors]] begin[:]
call[name[info].append, parameter[constant[Error: the template could not be found.]]]
variable[seems_fishy] assign[=] constant[True]
if <ast.BoolOp object at 0x7da207f98730> begin[:]
call[name[info].append, parameter[binary_operation[constant[ The template was looked up from an endpoint that belongs to the blueprint "%s".] <ast.Mod object at 0x7da2590d6920> name[blueprint]]]]
call[name[info].append, parameter[constant[ Maybe you did not place a template in the right folder?]]]
call[name[info].append, parameter[constant[ See http://flask.pocoo.org/docs/blueprints/#templates]]]
call[name[app].logger.info, parameter[call[constant[
].join, parameter[name[info]]]]]
|
keyword[def] identifier[explain_template_loading_attempts] ( identifier[app] , identifier[template] , identifier[attempts] ):
literal[string]
keyword[from] identifier[flask] keyword[import] identifier[Flask] , identifier[Blueprint]
keyword[from] identifier[flask] . identifier[debughelpers] keyword[import] identifier[_dump_loader_info]
keyword[from] identifier[flask] . identifier[globals] keyword[import] identifier[_request_ctx_stack]
identifier[template] , identifier[expected_priors] = identifier[parse_template] ( identifier[template] )
identifier[info] =[ literal[string] ]
identifier[total_found] = literal[int]
identifier[blueprint] = keyword[None]
identifier[reqctx] = identifier[_request_ctx_stack] . identifier[top]
keyword[if] identifier[reqctx] keyword[is] keyword[not] keyword[None] keyword[and] identifier[reqctx] . identifier[request] . identifier[blueprint] keyword[is] keyword[not] keyword[None] :
identifier[blueprint] = identifier[reqctx] . identifier[request] . identifier[blueprint]
keyword[for] identifier[idx] ,( identifier[loader] , identifier[srcobj] , identifier[triple] ) keyword[in] identifier[enumerate] ( identifier[attempts] ):
keyword[if] identifier[isinstance] ( identifier[srcobj] , identifier[Flask] ):
identifier[src_info] = literal[string] % identifier[srcobj] . identifier[import_name]
keyword[elif] identifier[isinstance] ( identifier[srcobj] , identifier[Blueprint] ):
identifier[src_info] = literal[string] %( identifier[srcobj] . identifier[name] ,
identifier[srcobj] . identifier[import_name] )
keyword[else] :
identifier[src_info] = identifier[repr] ( identifier[srcobj] )
identifier[info] . identifier[append] ( literal[string] %(
identifier[idx] + literal[int] , identifier[src_info] ))
keyword[for] identifier[line] keyword[in] identifier[_dump_loader_info] ( identifier[loader] ):
identifier[info] . identifier[append] ( literal[string] % identifier[line] )
keyword[if] identifier[triple] keyword[is] keyword[None] :
identifier[detail] = literal[string]
keyword[else] :
keyword[if] identifier[total_found] < identifier[expected_priors] :
identifier[action] = literal[string]
keyword[elif] identifier[total_found] == identifier[expected_priors] :
identifier[action] = literal[string]
keyword[else] :
identifier[action] = literal[string]
identifier[detail] = literal[string] %( identifier[action] , identifier[triple] [ literal[int] ] keyword[or] literal[string] )
identifier[total_found] += literal[int]
identifier[info] . identifier[append] ( literal[string] % identifier[detail] )
identifier[seems_fishy] = keyword[False]
keyword[if] identifier[total_found] < identifier[expected_priors] :
identifier[info] . identifier[append] ( literal[string] )
identifier[seems_fishy] = keyword[True]
keyword[if] identifier[blueprint] keyword[is] keyword[not] keyword[None] keyword[and] identifier[seems_fishy] :
identifier[info] . identifier[append] ( literal[string]
literal[string] % identifier[blueprint] )
identifier[info] . identifier[append] ( literal[string] )
identifier[info] . identifier[append] ( literal[string] )
identifier[app] . identifier[logger] . identifier[info] ( literal[string] . identifier[join] ( identifier[info] ))
|
def explain_template_loading_attempts(app, template, attempts):
"""
This should help developers understand what failed. Mostly the same as
:func:`flask.debughelpers.explain_template_loading_attempts`, except here we've
extended it to support showing what :class:`UnchainedJinjaLoader` is doing.
"""
from flask import Flask, Blueprint
from flask.debughelpers import _dump_loader_info
from flask.globals import _request_ctx_stack
(template, expected_priors) = parse_template(template)
info = [f'Locating {pretty_num(expected_priors + 1)} template "{template}":']
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint # depends on [control=['if'], data=[]]
for (idx, (loader, srcobj, triple)) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name # depends on [control=['if'], data=[]]
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name, srcobj.import_name) # depends on [control=['if'], data=[]]
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line) # depends on [control=['for'], data=['line']]
if triple is None:
detail = 'no match' # depends on [control=['if'], data=[]]
else:
if total_found < expected_priors:
action = 'skipping' # depends on [control=['if'], data=[]]
elif total_found == expected_priors:
action = 'using' # depends on [control=['if'], data=[]]
else:
action = 'ignoring'
detail = '%s (%r)' % (action, triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail) # depends on [control=['for'], data=[]]
seems_fishy = False
if total_found < expected_priors:
info.append('Error: the template could not be found.')
seems_fishy = True # depends on [control=['if'], data=[]]
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') # depends on [control=['if'], data=[]]
app.logger.info('\n'.join(info))
|
def serialize(self, include_class=True, save_dynamic=False, **kwargs):
"""Serializes a **HasProperties** instance to dictionary
This uses the Property serializers to serialize all Property values
to a JSON-compatible dictionary. Properties that are undefined are
not included. If the **HasProperties** instance contains a reference
to itself, a :code:`properties.SelfReferenceError` will be raised.
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* Any other keyword arguments will be passed through to the Property
serializers.
"""
if getattr(self, '_getting_serialized', False):
raise utils.SelfReferenceError('Object contains unserializable '
'self reference')
self._getting_serialized = True
try:
kwargs.update({
'include_class': include_class,
'save_dynamic': save_dynamic
})
if save_dynamic:
prop_source = self._props
else:
prop_source = self._backend
data = (
(key, self._props[key].serialize(getattr(self, key), **kwargs))
for key in prop_source
)
json_dict = {k: v for k, v in data if v is not None}
if include_class:
json_dict.update({'__class__': self.__class__.__name__})
return json_dict
finally:
self._getting_serialized = False
|
def function[serialize, parameter[self, include_class, save_dynamic]]:
constant[Serializes a **HasProperties** instance to dictionary
This uses the Property serializers to serialize all Property values
to a JSON-compatible dictionary. Properties that are undefined are
not included. If the **HasProperties** instance contains a reference
to itself, a :code:`properties.SelfReferenceError` will be raised.
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* Any other keyword arguments will be passed through to the Property
serializers.
]
if call[name[getattr], parameter[name[self], constant[_getting_serialized], constant[False]]] begin[:]
<ast.Raise object at 0x7da1b0432aa0>
name[self]._getting_serialized assign[=] constant[True]
<ast.Try object at 0x7da1b0430430>
|
keyword[def] identifier[serialize] ( identifier[self] , identifier[include_class] = keyword[True] , identifier[save_dynamic] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[getattr] ( identifier[self] , literal[string] , keyword[False] ):
keyword[raise] identifier[utils] . identifier[SelfReferenceError] ( literal[string]
literal[string] )
identifier[self] . identifier[_getting_serialized] = keyword[True]
keyword[try] :
identifier[kwargs] . identifier[update] ({
literal[string] : identifier[include_class] ,
literal[string] : identifier[save_dynamic]
})
keyword[if] identifier[save_dynamic] :
identifier[prop_source] = identifier[self] . identifier[_props]
keyword[else] :
identifier[prop_source] = identifier[self] . identifier[_backend]
identifier[data] =(
( identifier[key] , identifier[self] . identifier[_props] [ identifier[key] ]. identifier[serialize] ( identifier[getattr] ( identifier[self] , identifier[key] ),** identifier[kwargs] ))
keyword[for] identifier[key] keyword[in] identifier[prop_source]
)
identifier[json_dict] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }
keyword[if] identifier[include_class] :
identifier[json_dict] . identifier[update] ({ literal[string] : identifier[self] . identifier[__class__] . identifier[__name__] })
keyword[return] identifier[json_dict]
keyword[finally] :
identifier[self] . identifier[_getting_serialized] = keyword[False]
|
def serialize(self, include_class=True, save_dynamic=False, **kwargs):
"""Serializes a **HasProperties** instance to dictionary
This uses the Property serializers to serialize all Property values
to a JSON-compatible dictionary. Properties that are undefined are
not included. If the **HasProperties** instance contains a reference
to itself, a :code:`properties.SelfReferenceError` will be raised.
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* Any other keyword arguments will be passed through to the Property
serializers.
"""
if getattr(self, '_getting_serialized', False):
raise utils.SelfReferenceError('Object contains unserializable self reference') # depends on [control=['if'], data=[]]
self._getting_serialized = True
try:
kwargs.update({'include_class': include_class, 'save_dynamic': save_dynamic})
if save_dynamic:
prop_source = self._props # depends on [control=['if'], data=[]]
else:
prop_source = self._backend
data = ((key, self._props[key].serialize(getattr(self, key), **kwargs)) for key in prop_source)
json_dict = {k: v for (k, v) in data if v is not None}
if include_class:
json_dict.update({'__class__': self.__class__.__name__}) # depends on [control=['if'], data=[]]
return json_dict # depends on [control=['try'], data=[]]
finally:
self._getting_serialized = False
|
def plot_trajectory_projection(self, x_axis, y_axis,
*args, **kwargs):
"""
Plots trajectory projection on the specified x and y axes
See :meth:`InferenceResult.plot_trajectory_projection()` for information on the arguments and keyword arguments
:param x_axis: variable to be plotted on the x axis of the projection
:param y_axis: variable to be plotted on the y axis of the projection
:param args: arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
:param kwargs: keyword arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
"""
# Just plot all of the trajectories
for result in self.results:
result.plot_trajectory_projection(x_axis, y_axis,
*args, **kwargs)
|
def function[plot_trajectory_projection, parameter[self, x_axis, y_axis]]:
constant[
Plots trajectory projection on the specified x and y axes
See :meth:`InferenceResult.plot_trajectory_projection()` for information on the arguments and keyword arguments
:param x_axis: variable to be plotted on the x axis of the projection
:param y_axis: variable to be plotted on the y axis of the projection
:param args: arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
:param kwargs: keyword arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
]
for taget[name[result]] in starred[name[self].results] begin[:]
call[name[result].plot_trajectory_projection, parameter[name[x_axis], name[y_axis], <ast.Starred object at 0x7da1b26afdc0>]]
|
keyword[def] identifier[plot_trajectory_projection] ( identifier[self] , identifier[x_axis] , identifier[y_axis] ,
* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[result] keyword[in] identifier[self] . identifier[results] :
identifier[result] . identifier[plot_trajectory_projection] ( identifier[x_axis] , identifier[y_axis] ,
* identifier[args] ,** identifier[kwargs] )
|
def plot_trajectory_projection(self, x_axis, y_axis, *args, **kwargs):
"""
Plots trajectory projection on the specified x and y axes
See :meth:`InferenceResult.plot_trajectory_projection()` for information on the arguments and keyword arguments
:param x_axis: variable to be plotted on the x axis of the projection
:param y_axis: variable to be plotted on the y axis of the projection
:param args: arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
:param kwargs: keyword arguments to be passed to :meth:`InferenceResult.plot_trajectory_projection()`
"""
# Just plot all of the trajectories
for result in self.results:
result.plot_trajectory_projection(x_axis, y_axis, *args, **kwargs) # depends on [control=['for'], data=['result']]
|
def curvature(self, name, **kwargs):
"""Test whether a source shows spectral curvature by comparing
the likelihood ratio of PowerLaw and LogParabola spectral
models.
Parameters
----------
name : str
Source name.
"""
name = self.roi.get_source_by_name(name).name
saved_state = LikelihoodState(self.like)
source = self.components[0].like.logLike.getSource(str(name))
old_spectrum = source.spectrum()
old_pars = copy.deepcopy(self.roi[name].spectral_pars)
old_type = self.roi[name]['SpectrumType']
if old_type != 'PowerLaw':
dnde = self.like[name].spectrum()(pyLike.dArg(1000.))
value, scale = utils.scale_parameter(dnde)
pars0 = {
'Prefactor':
{'value': value, 'scale': scale,
'min': 1E-5, 'max': 1000., 'free': True},
'Index':
{'value': 2.0, 'scale': -1.0, 'min': 0.0,
'max': 5.0, 'free': False},
'Scale':
{'value': 1E3, 'scale': 1.0, 'min': 1.,
'max': 1E6, 'free': False},
}
self.set_source_spectrum(str(name), 'PowerLaw',
spectrum_pars=pars0,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_pl = self._fit(loglevel=logging.DEBUG)
prefactor = self._get_param(name, 'Prefactor')
index = self._get_param(name, 'Index')
scale = self._get_param(name, 'Scale')
pars1 = {
'norm': copy.deepcopy(prefactor),
'alpha': copy.deepcopy(index),
'Eb': copy.deepcopy(scale),
}
pars1['alpha']['scale'] *= -1
pars1['alpha']['min'] = -5.0
pars1['alpha']['max'] = 5.0
self.set_source_spectrum(str(name), 'LogParabola',
spectrum_pars=pars1,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_lp = self._fit(loglevel=logging.DEBUG)
self.set_source_spectrum(str(name), old_type,
spectrum_pars=old_pars,
update_source=False)
pars2 = {
'Prefactor': copy.deepcopy(prefactor),
'Index1': copy.deepcopy(index),
'Cutoff': {'value': 1000.0, 'scale': 1E3,
'min': 10.0, 'max': 1E4, 'free': True},
'Index2': {'value': 1.0, 'scale': 1.0,
'min': 1.0, 'max': 1.0, 'free': False},
'Scale': copy.deepcopy(scale)
}
self.set_source_spectrum(str(name), 'PLSuperExpCutoff',
spectrum_pars=pars2,
update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_ple = self._fit(loglevel=logging.DEBUG)
# Revert to initial spectral model
self.set_source_spectrum(str(name), old_type,
spectrum_pars=old_pars,
update_source=False)
saved_state.restore()
lp_ts_curv = 2.0 * (fit_lp['loglike'] - fit_pl['loglike'])
ple_ts_curv = 2.0 * (fit_ple['loglike'] - fit_pl['loglike'])
o = MutableNamedTuple(ts_curv=lp_ts_curv,
lp_ts_curv=lp_ts_curv,
ple_ts_curv=ple_ts_curv,
loglike_pl=fit_pl['loglike'],
loglike_lp=fit_lp['loglike'],
loglike_ple=fit_ple['loglike'])
self.logger.info('LogLike_PL: %12.3f LogLike_LP: %12.3f LogLike_PLE: %12.3f',
o.loglike_pl, o.loglike_lp, o.loglike_ple)
self.logger.info('TS_curv: %.3f (LP)', o.lp_ts_curv)
self.logger.info('TS_curv: %.3f (PLE)', o.ple_ts_curv)
return o
|
def function[curvature, parameter[self, name]]:
constant[Test whether a source shows spectral curvature by comparing
the likelihood ratio of PowerLaw and LogParabola spectral
models.
Parameters
----------
name : str
Source name.
]
variable[name] assign[=] call[name[self].roi.get_source_by_name, parameter[name[name]]].name
variable[saved_state] assign[=] call[name[LikelihoodState], parameter[name[self].like]]
variable[source] assign[=] call[call[name[self].components][constant[0]].like.logLike.getSource, parameter[call[name[str], parameter[name[name]]]]]
variable[old_spectrum] assign[=] call[name[source].spectrum, parameter[]]
variable[old_pars] assign[=] call[name[copy].deepcopy, parameter[call[name[self].roi][name[name]].spectral_pars]]
variable[old_type] assign[=] call[call[name[self].roi][name[name]]][constant[SpectrumType]]
if compare[name[old_type] not_equal[!=] constant[PowerLaw]] begin[:]
variable[dnde] assign[=] call[call[call[name[self].like][name[name]].spectrum, parameter[]], parameter[call[name[pyLike].dArg, parameter[constant[1000.0]]]]]
<ast.Tuple object at 0x7da18c4cec80> assign[=] call[name[utils].scale_parameter, parameter[name[dnde]]]
variable[pars0] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cc9a0>, <ast.Constant object at 0x7da18c4cdf60>, <ast.Constant object at 0x7da18c4cece0>], [<ast.Dict object at 0x7da18c4cdcf0>, <ast.Dict object at 0x7da18c4cd930>, <ast.Dict object at 0x7da18c4cf700>]]
call[name[self].set_source_spectrum, parameter[call[name[str], parameter[name[name]]], constant[PowerLaw]]]
call[name[self].free_source, parameter[name[name]]]
variable[fit_pl] assign[=] call[name[self]._fit, parameter[]]
variable[prefactor] assign[=] call[name[self]._get_param, parameter[name[name], constant[Prefactor]]]
variable[index] assign[=] call[name[self]._get_param, parameter[name[name], constant[Index]]]
variable[scale] assign[=] call[name[self]._get_param, parameter[name[name], constant[Scale]]]
variable[pars1] assign[=] dictionary[[<ast.Constant object at 0x7da204963f70>, <ast.Constant object at 0x7da204960a30>, <ast.Constant object at 0x7da204963fa0>], [<ast.Call object at 0x7da204962650>, <ast.Call object at 0x7da204960100>, <ast.Call object at 0x7da204961d80>]]
<ast.AugAssign object at 0x7da18c4cf250>
call[call[name[pars1]][constant[alpha]]][constant[min]] assign[=] <ast.UnaryOp object at 0x7da18c4cfb20>
call[call[name[pars1]][constant[alpha]]][constant[max]] assign[=] constant[5.0]
call[name[self].set_source_spectrum, parameter[call[name[str], parameter[name[name]]], constant[LogParabola]]]
call[name[self].free_source, parameter[name[name]]]
variable[fit_lp] assign[=] call[name[self]._fit, parameter[]]
call[name[self].set_source_spectrum, parameter[call[name[str], parameter[name[name]]], name[old_type]]]
variable[pars2] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cdc60>, <ast.Constant object at 0x7da18c4ce6e0>, <ast.Constant object at 0x7da18c4cd9c0>, <ast.Constant object at 0x7da18c4ce2c0>, <ast.Constant object at 0x7da18c4cdf30>], [<ast.Call object at 0x7da18c4cea70>, <ast.Call object at 0x7da18c4cc190>, <ast.Dict object at 0x7da18c4cf880>, <ast.Dict object at 0x7da18c4ccbb0>, <ast.Call object at 0x7da18c4cfa90>]]
call[name[self].set_source_spectrum, parameter[call[name[str], parameter[name[name]]], constant[PLSuperExpCutoff]]]
call[name[self].free_source, parameter[name[name]]]
variable[fit_ple] assign[=] call[name[self]._fit, parameter[]]
call[name[self].set_source_spectrum, parameter[call[name[str], parameter[name[name]]], name[old_type]]]
call[name[saved_state].restore, parameter[]]
variable[lp_ts_curv] assign[=] binary_operation[constant[2.0] * binary_operation[call[name[fit_lp]][constant[loglike]] - call[name[fit_pl]][constant[loglike]]]]
variable[ple_ts_curv] assign[=] binary_operation[constant[2.0] * binary_operation[call[name[fit_ple]][constant[loglike]] - call[name[fit_pl]][constant[loglike]]]]
variable[o] assign[=] call[name[MutableNamedTuple], parameter[]]
call[name[self].logger.info, parameter[constant[LogLike_PL: %12.3f LogLike_LP: %12.3f LogLike_PLE: %12.3f], name[o].loglike_pl, name[o].loglike_lp, name[o].loglike_ple]]
call[name[self].logger.info, parameter[constant[TS_curv: %.3f (LP)], name[o].lp_ts_curv]]
call[name[self].logger.info, parameter[constant[TS_curv: %.3f (PLE)], name[o].ple_ts_curv]]
return[name[o]]
|
keyword[def] identifier[curvature] ( identifier[self] , identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[name] = identifier[self] . identifier[roi] . identifier[get_source_by_name] ( identifier[name] ). identifier[name]
identifier[saved_state] = identifier[LikelihoodState] ( identifier[self] . identifier[like] )
identifier[source] = identifier[self] . identifier[components] [ literal[int] ]. identifier[like] . identifier[logLike] . identifier[getSource] ( identifier[str] ( identifier[name] ))
identifier[old_spectrum] = identifier[source] . identifier[spectrum] ()
identifier[old_pars] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[roi] [ identifier[name] ]. identifier[spectral_pars] )
identifier[old_type] = identifier[self] . identifier[roi] [ identifier[name] ][ literal[string] ]
keyword[if] identifier[old_type] != literal[string] :
identifier[dnde] = identifier[self] . identifier[like] [ identifier[name] ]. identifier[spectrum] ()( identifier[pyLike] . identifier[dArg] ( literal[int] ))
identifier[value] , identifier[scale] = identifier[utils] . identifier[scale_parameter] ( identifier[dnde] )
identifier[pars0] ={
literal[string] :
{ literal[string] : identifier[value] , literal[string] : identifier[scale] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[True] },
literal[string] :
{ literal[string] : literal[int] , literal[string] :- literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : keyword[False] },
literal[string] :
{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : keyword[False] },
}
identifier[self] . identifier[set_source_spectrum] ( identifier[str] ( identifier[name] ), literal[string] ,
identifier[spectrum_pars] = identifier[pars0] ,
identifier[update_source] = keyword[False] )
identifier[self] . identifier[free_source] ( identifier[name] , identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[fit_pl] = identifier[self] . identifier[_fit] ( identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[prefactor] = identifier[self] . identifier[_get_param] ( identifier[name] , literal[string] )
identifier[index] = identifier[self] . identifier[_get_param] ( identifier[name] , literal[string] )
identifier[scale] = identifier[self] . identifier[_get_param] ( identifier[name] , literal[string] )
identifier[pars1] ={
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[prefactor] ),
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[index] ),
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[scale] ),
}
identifier[pars1] [ literal[string] ][ literal[string] ]*=- literal[int]
identifier[pars1] [ literal[string] ][ literal[string] ]=- literal[int]
identifier[pars1] [ literal[string] ][ literal[string] ]= literal[int]
identifier[self] . identifier[set_source_spectrum] ( identifier[str] ( identifier[name] ), literal[string] ,
identifier[spectrum_pars] = identifier[pars1] ,
identifier[update_source] = keyword[False] )
identifier[self] . identifier[free_source] ( identifier[name] , identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[fit_lp] = identifier[self] . identifier[_fit] ( identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[self] . identifier[set_source_spectrum] ( identifier[str] ( identifier[name] ), identifier[old_type] ,
identifier[spectrum_pars] = identifier[old_pars] ,
identifier[update_source] = keyword[False] )
identifier[pars2] ={
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[prefactor] ),
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[index] ),
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[True] },
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[int] ,
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[False] },
literal[string] : identifier[copy] . identifier[deepcopy] ( identifier[scale] )
}
identifier[self] . identifier[set_source_spectrum] ( identifier[str] ( identifier[name] ), literal[string] ,
identifier[spectrum_pars] = identifier[pars2] ,
identifier[update_source] = keyword[False] )
identifier[self] . identifier[free_source] ( identifier[name] , identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[fit_ple] = identifier[self] . identifier[_fit] ( identifier[loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[self] . identifier[set_source_spectrum] ( identifier[str] ( identifier[name] ), identifier[old_type] ,
identifier[spectrum_pars] = identifier[old_pars] ,
identifier[update_source] = keyword[False] )
identifier[saved_state] . identifier[restore] ()
identifier[lp_ts_curv] = literal[int] *( identifier[fit_lp] [ literal[string] ]- identifier[fit_pl] [ literal[string] ])
identifier[ple_ts_curv] = literal[int] *( identifier[fit_ple] [ literal[string] ]- identifier[fit_pl] [ literal[string] ])
identifier[o] = identifier[MutableNamedTuple] ( identifier[ts_curv] = identifier[lp_ts_curv] ,
identifier[lp_ts_curv] = identifier[lp_ts_curv] ,
identifier[ple_ts_curv] = identifier[ple_ts_curv] ,
identifier[loglike_pl] = identifier[fit_pl] [ literal[string] ],
identifier[loglike_lp] = identifier[fit_lp] [ literal[string] ],
identifier[loglike_ple] = identifier[fit_ple] [ literal[string] ])
identifier[self] . identifier[logger] . identifier[info] ( literal[string] ,
identifier[o] . identifier[loglike_pl] , identifier[o] . identifier[loglike_lp] , identifier[o] . identifier[loglike_ple] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[o] . identifier[lp_ts_curv] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[o] . identifier[ple_ts_curv] )
keyword[return] identifier[o]
|
def curvature(self, name, **kwargs):
"""Test whether a source shows spectral curvature by comparing
the likelihood ratio of PowerLaw and LogParabola spectral
models.
Parameters
----------
name : str
Source name.
"""
name = self.roi.get_source_by_name(name).name
saved_state = LikelihoodState(self.like)
source = self.components[0].like.logLike.getSource(str(name))
old_spectrum = source.spectrum()
old_pars = copy.deepcopy(self.roi[name].spectral_pars)
old_type = self.roi[name]['SpectrumType']
if old_type != 'PowerLaw':
dnde = self.like[name].spectrum()(pyLike.dArg(1000.0))
(value, scale) = utils.scale_parameter(dnde)
pars0 = {'Prefactor': {'value': value, 'scale': scale, 'min': 1e-05, 'max': 1000.0, 'free': True}, 'Index': {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0, 'free': False}, 'Scale': {'value': 1000.0, 'scale': 1.0, 'min': 1.0, 'max': 1000000.0, 'free': False}}
self.set_source_spectrum(str(name), 'PowerLaw', spectrum_pars=pars0, update_source=False) # depends on [control=['if'], data=[]]
self.free_source(name, loglevel=logging.DEBUG)
fit_pl = self._fit(loglevel=logging.DEBUG)
prefactor = self._get_param(name, 'Prefactor')
index = self._get_param(name, 'Index')
scale = self._get_param(name, 'Scale')
pars1 = {'norm': copy.deepcopy(prefactor), 'alpha': copy.deepcopy(index), 'Eb': copy.deepcopy(scale)}
pars1['alpha']['scale'] *= -1
pars1['alpha']['min'] = -5.0
pars1['alpha']['max'] = 5.0
self.set_source_spectrum(str(name), 'LogParabola', spectrum_pars=pars1, update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_lp = self._fit(loglevel=logging.DEBUG)
self.set_source_spectrum(str(name), old_type, spectrum_pars=old_pars, update_source=False)
pars2 = {'Prefactor': copy.deepcopy(prefactor), 'Index1': copy.deepcopy(index), 'Cutoff': {'value': 1000.0, 'scale': 1000.0, 'min': 10.0, 'max': 10000.0, 'free': True}, 'Index2': {'value': 1.0, 'scale': 1.0, 'min': 1.0, 'max': 1.0, 'free': False}, 'Scale': copy.deepcopy(scale)}
self.set_source_spectrum(str(name), 'PLSuperExpCutoff', spectrum_pars=pars2, update_source=False)
self.free_source(name, loglevel=logging.DEBUG)
fit_ple = self._fit(loglevel=logging.DEBUG)
# Revert to initial spectral model
self.set_source_spectrum(str(name), old_type, spectrum_pars=old_pars, update_source=False)
saved_state.restore()
lp_ts_curv = 2.0 * (fit_lp['loglike'] - fit_pl['loglike'])
ple_ts_curv = 2.0 * (fit_ple['loglike'] - fit_pl['loglike'])
o = MutableNamedTuple(ts_curv=lp_ts_curv, lp_ts_curv=lp_ts_curv, ple_ts_curv=ple_ts_curv, loglike_pl=fit_pl['loglike'], loglike_lp=fit_lp['loglike'], loglike_ple=fit_ple['loglike'])
self.logger.info('LogLike_PL: %12.3f LogLike_LP: %12.3f LogLike_PLE: %12.3f', o.loglike_pl, o.loglike_lp, o.loglike_ple)
self.logger.info('TS_curv: %.3f (LP)', o.lp_ts_curv)
self.logger.info('TS_curv: %.3f (PLE)', o.ple_ts_curv)
return o
|
def _Henry(T, gas, liquid="H2O"):
"""Equation for the calculation of Henry's constant
Parameters
----------
T : float
Temperature, [K]
gas : string
Name of gas to calculate solubility
liquid : string
Name of liquid solvent, can be H20 (default) or D2O
Returns
-------
kw : float
Henry's constant, [MPa]
Notes
-----
The gas availables for H2O solvent are He, Ne, Ar, Kr, Xe, H2, N2, O2, CO,
CO2, H2S, CH4, C2H6, SF6
For D2O as solvent He, Ne, Ar, Kr, Xe, D2, CH4
Raise :class:`NotImplementedError` if input gas or liquid are unsupported
Examples
--------
>>> _Henry(500, "He")
1.1973
>>> _Henry(300, "D2", "D2O")
1.6594
References
----------
IAPWS, Guideline on the Henry's Constant and Vapor-Liquid Distribution
Constant for Gases in H2O and D2O at High Temperatures,
http://www.iapws.org/relguide/HenGuide.html
"""
if liquid == "D2O":
gas += "(D2O)"
limit = {
"He": (273.21, 553.18),
"Ne": (273.20, 543.36),
"Ar": (273.19, 568.36),
"Kr": (273.19, 525.56),
"Xe": (273.22, 574.85),
"H2": (273.15, 636.09),
"N2": (278.12, 636.46),
"O2": (274.15, 616.52),
"CO": (278.15, 588.67),
"CO2": (274.19, 642.66),
"H2S": (273.15, 533.09),
"CH4": (275.46, 633.11),
"C2H6": (275.44, 473.46),
"SF6": (283.14, 505.55),
"He(D2O)": (288.15, 553.18),
"Ne(D2O)": (288.18, 549.96),
"Ar(D2O)": (288.30, 583.76),
"Kr(D2O)": (288.19, 523.06),
"Xe(D2O)": (295.39, 574.85),
"D2(D2O)": (288.17, 581.00),
"CH4(D2O)": (288.16, 517.46)}
# Check input parameters
if liquid != "D2O" and liquid != "H2O":
raise NotImplementedError("Solvent liquid unsupported")
if gas not in limit:
raise NotImplementedError("Gas unsupported")
Tmin, Tmax = limit[gas]
if T < Tmin or T > Tmax:
warnings.warn("Temperature out of data of correlation")
if liquid == "D2O":
Tc = 643.847
Pc = 21.671
else:
Tc = 647.096
Pc = 22.064
Tr = T/Tc
tau = 1-Tr
# Eq 4
if liquid == "H2O":
ai = [-7.85951783, 1.84408259, -11.7866497, 22.6807411, -15.9618719,
1.80122502]
bi = [1, 1.5, 3, 3.5, 4, 7.5]
else:
ai = [-7.896657, 24.73308, -27.81128, 9.355913, -9.220083]
bi = [1, 1.89, 2, 3, 3.6]
ps = Pc*exp(1/Tr*sum([a*tau**b for a, b in zip(ai, bi)]))
# Select values from Table 2
par = {
"He": (-3.52839, 7.12983, 4.47770),
"Ne": (-3.18301, 5.31448, 5.43774),
"Ar": (-8.40954, 4.29587, 10.52779),
"Kr": (-8.97358, 3.61508, 11.29963),
"Xe": (-14.21635, 4.00041, 15.60999),
"H2": (-4.73284, 6.08954, 6.06066),
"N2": (-9.67578, 4.72162, 11.70585),
"O2": (-9.44833, 4.43822, 11.42005),
"CO": (-10.52862, 5.13259, 12.01421),
"CO2": (-8.55445, 4.01195, 9.52345),
"H2S": (-4.51499, 5.23538, 4.42126),
"CH4": (-10.44708, 4.66491, 12.12986),
"C2H6": (-19.67563, 4.51222, 20.62567),
"SF6": (-16.56118, 2.15289, 20.35440),
"He(D2O)": (-0.72643, 7.02134, 2.04433),
"Ne(D2O)": (-0.91999, 5.65327, 3.17247),
"Ar(D2O)": (-7.17725, 4.48177, 9.31509),
"Kr(D2O)": (-8.47059, 3.91580, 10.69433),
"Xe(D2O)": (-14.46485, 4.42330, 15.60919),
"D2(D2O)": (-5.33843, 6.15723, 6.53046),
"CH4(D2O)": (-10.01915, 4.73368, 11.75711)}
A, B, C = par[gas]
# Eq 3
kh = ps*exp(A/Tr+B*tau**0.355/Tr+C*Tr**-0.41*exp(tau))
return kh
|
def function[_Henry, parameter[T, gas, liquid]]:
constant[Equation for the calculation of Henry's constant
Parameters
----------
T : float
Temperature, [K]
gas : string
Name of gas to calculate solubility
liquid : string
Name of liquid solvent, can be H20 (default) or D2O
Returns
-------
kw : float
Henry's constant, [MPa]
Notes
-----
The gas availables for H2O solvent are He, Ne, Ar, Kr, Xe, H2, N2, O2, CO,
CO2, H2S, CH4, C2H6, SF6
For D2O as solvent He, Ne, Ar, Kr, Xe, D2, CH4
Raise :class:`NotImplementedError` if input gas or liquid are unsupported
Examples
--------
>>> _Henry(500, "He")
1.1973
>>> _Henry(300, "D2", "D2O")
1.6594
References
----------
IAPWS, Guideline on the Henry's Constant and Vapor-Liquid Distribution
Constant for Gases in H2O and D2O at High Temperatures,
http://www.iapws.org/relguide/HenGuide.html
]
if compare[name[liquid] equal[==] constant[D2O]] begin[:]
<ast.AugAssign object at 0x7da1b06c7d30>
variable[limit] assign[=] dictionary[[<ast.Constant object at 0x7da1b06c7c10>, <ast.Constant object at 0x7da1b06c7be0>, <ast.Constant object at 0x7da1b06c7bb0>, <ast.Constant object at 0x7da1b06c7b80>, <ast.Constant object at 0x7da1b06c7b50>, <ast.Constant object at 0x7da1b06c7b20>, <ast.Constant object at 0x7da1b06c7af0>, <ast.Constant object at 0x7da1b06c7ac0>, <ast.Constant object at 0x7da1b06c7a90>, <ast.Constant object at 0x7da1b06c7a60>, <ast.Constant object at 0x7da1b06c7a30>, <ast.Constant object at 0x7da1b06c7a00>, <ast.Constant object at 0x7da1b06c79d0>, <ast.Constant object at 0x7da1b06c79a0>, <ast.Constant object at 0x7da1b06c7970>, <ast.Constant object at 0x7da1b06c7940>, <ast.Constant object at 0x7da1b06c7910>, <ast.Constant object at 0x7da1b06c78e0>, <ast.Constant object at 0x7da1b06c78b0>, <ast.Constant object at 0x7da1b06c7880>, <ast.Constant object at 0x7da1b06c7850>], [<ast.Tuple object at 0x7da1b06c7820>, <ast.Tuple object at 0x7da1b06c7790>, <ast.Tuple object at 0x7da1b06c7700>, <ast.Tuple object at 0x7da1b06c7670>, <ast.Tuple object at 0x7da1b06c75e0>, <ast.Tuple object at 0x7da1b06c7550>, <ast.Tuple object at 0x7da1b06c74c0>, <ast.Tuple object at 0x7da1b06c7430>, <ast.Tuple object at 0x7da1b06c73a0>, <ast.Tuple object at 0x7da1b06c7310>, <ast.Tuple object at 0x7da1b06c7280>, <ast.Tuple object at 0x7da1b06c71f0>, <ast.Tuple object at 0x7da1b06c7160>, <ast.Tuple object at 0x7da1b06c70d0>, <ast.Tuple object at 0x7da1b06c7040>, <ast.Tuple object at 0x7da1b06c6fb0>, <ast.Tuple object at 0x7da1b06c6f20>, <ast.Tuple object at 0x7da1b06c5db0>, <ast.Tuple object at 0x7da1b06c5d20>, <ast.Tuple object at 0x7da1b06c5c90>, <ast.Tuple object at 0x7da1b06c5c00>]]
if <ast.BoolOp object at 0x7da1b06c5b40> begin[:]
<ast.Raise object at 0x7da1b06c59f0>
if compare[name[gas] <ast.NotIn object at 0x7da2590d7190> name[limit]] begin[:]
<ast.Raise object at 0x7da1b06c5870>
<ast.Tuple object at 0x7da1b06c5780> assign[=] call[name[limit]][name[gas]]
if <ast.BoolOp object at 0x7da1b06c5630> begin[:]
call[name[warnings].warn, parameter[constant[Temperature out of data of correlation]]]
if compare[name[liquid] equal[==] constant[D2O]] begin[:]
variable[Tc] assign[=] constant[643.847]
variable[Pc] assign[=] constant[21.671]
variable[Tr] assign[=] binary_operation[name[T] / name[Tc]]
variable[tau] assign[=] binary_operation[constant[1] - name[Tr]]
if compare[name[liquid] equal[==] constant[H2O]] begin[:]
variable[ai] assign[=] list[[<ast.UnaryOp object at 0x7da1b06c4d60>, <ast.Constant object at 0x7da1b06c4d00>, <ast.UnaryOp object at 0x7da1b06c4cd0>, <ast.Constant object at 0x7da1b06c4c70>, <ast.UnaryOp object at 0x7da1b06c4c40>, <ast.Constant object at 0x7da1b06c4be0>]]
variable[bi] assign[=] list[[<ast.Constant object at 0x7da1b06c4af0>, <ast.Constant object at 0x7da1b06c4ac0>, <ast.Constant object at 0x7da1b06c4a90>, <ast.Constant object at 0x7da1b06c4a60>, <ast.Constant object at 0x7da1b06c4a30>, <ast.Constant object at 0x7da1b06c4a00>]]
variable[ps] assign[=] binary_operation[name[Pc] * call[name[exp], parameter[binary_operation[binary_operation[constant[1] / name[Tr]] * call[name[sum], parameter[<ast.ListComp object at 0x7da1b06c43a0>]]]]]]
variable[par] assign[=] dictionary[[<ast.Constant object at 0x7da1b06c4040>, <ast.Constant object at 0x7da1b06d7fd0>, <ast.Constant object at 0x7da1b06d7fa0>, <ast.Constant object at 0x7da1b06d7f70>, <ast.Constant object at 0x7da1b06d7f40>, <ast.Constant object at 0x7da1b06d7f10>, <ast.Constant object at 0x7da1b06d7ee0>, <ast.Constant object at 0x7da1b06d7eb0>, <ast.Constant object at 0x7da1b06d7e80>, <ast.Constant object at 0x7da1b06d7e50>, <ast.Constant object at 0x7da1b06d7e20>, <ast.Constant object at 0x7da1b06d7df0>, <ast.Constant object at 0x7da1b06d7dc0>, <ast.Constant object at 0x7da1b06d7d90>, <ast.Constant object at 0x7da1b06d7d60>, <ast.Constant object at 0x7da1b06d7d30>, <ast.Constant object at 0x7da1b06d7d00>, <ast.Constant object at 0x7da1b06d7cd0>, <ast.Constant object at 0x7da1b06d7ca0>, <ast.Constant object at 0x7da1b06d4e80>, <ast.Constant object at 0x7da1b06d4eb0>], [<ast.Tuple object at 0x7da1b06d4ee0>, <ast.Tuple object at 0x7da1b06d4fd0>, <ast.Tuple object at 0x7da1b06d46d0>, <ast.Tuple object at 0x7da1b06d45e0>, <ast.Tuple object at 0x7da1b06d44f0>, <ast.Tuple object at 0x7da1b06d4400>, <ast.Tuple object at 0x7da1b06d4310>, <ast.Tuple object at 0x7da1b06d4220>, <ast.Tuple object at 0x7da1b06d4130>, <ast.Tuple object at 0x7da1b06d4040>, <ast.Tuple object at 0x7da1b06d52d0>, <ast.Tuple object at 0x7da1b06d53c0>, <ast.Tuple object at 0x7da18bcca530>, <ast.Tuple object at 0x7da18bcc9f00>, <ast.Tuple object at 0x7da18bcc9fc0>, <ast.Tuple object at 0x7da2054a47c0>, <ast.Tuple object at 0x7da2054a63b0>, <ast.Tuple object at 0x7da2054a4580>, <ast.Tuple object at 0x7da2054a4850>, <ast.Tuple object at 0x7da2054a6ce0>, <ast.Tuple object at 0x7da2054a68f0>]]
<ast.Tuple object at 0x7da2054a4790> assign[=] call[name[par]][name[gas]]
variable[kh] assign[=] binary_operation[name[ps] * call[name[exp], parameter[binary_operation[binary_operation[binary_operation[name[A] / name[Tr]] + binary_operation[binary_operation[name[B] * binary_operation[name[tau] ** constant[0.355]]] / name[Tr]]] + binary_operation[binary_operation[name[C] * binary_operation[name[Tr] ** <ast.UnaryOp object at 0x7da2054a4a00>]] * call[name[exp], parameter[name[tau]]]]]]]]
return[name[kh]]
|
keyword[def] identifier[_Henry] ( identifier[T] , identifier[gas] , identifier[liquid] = literal[string] ):
literal[string]
keyword[if] identifier[liquid] == literal[string] :
identifier[gas] += literal[string]
identifier[limit] ={
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] )}
keyword[if] identifier[liquid] != literal[string] keyword[and] identifier[liquid] != literal[string] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
keyword[if] identifier[gas] keyword[not] keyword[in] identifier[limit] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[Tmin] , identifier[Tmax] = identifier[limit] [ identifier[gas] ]
keyword[if] identifier[T] < identifier[Tmin] keyword[or] identifier[T] > identifier[Tmax] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[if] identifier[liquid] == literal[string] :
identifier[Tc] = literal[int]
identifier[Pc] = literal[int]
keyword[else] :
identifier[Tc] = literal[int]
identifier[Pc] = literal[int]
identifier[Tr] = identifier[T] / identifier[Tc]
identifier[tau] = literal[int] - identifier[Tr]
keyword[if] identifier[liquid] == literal[string] :
identifier[ai] =[- literal[int] , literal[int] ,- literal[int] , literal[int] ,- literal[int] ,
literal[int] ]
identifier[bi] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
keyword[else] :
identifier[ai] =[- literal[int] , literal[int] ,- literal[int] , literal[int] ,- literal[int] ]
identifier[bi] =[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]
identifier[ps] = identifier[Pc] * identifier[exp] ( literal[int] / identifier[Tr] * identifier[sum] ([ identifier[a] * identifier[tau] ** identifier[b] keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[ai] , identifier[bi] )]))
identifier[par] ={
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] ),
literal[string] :(- literal[int] , literal[int] , literal[int] )}
identifier[A] , identifier[B] , identifier[C] = identifier[par] [ identifier[gas] ]
identifier[kh] = identifier[ps] * identifier[exp] ( identifier[A] / identifier[Tr] + identifier[B] * identifier[tau] ** literal[int] / identifier[Tr] + identifier[C] * identifier[Tr] **- literal[int] * identifier[exp] ( identifier[tau] ))
keyword[return] identifier[kh]
|
def _Henry(T, gas, liquid='H2O'):
"""Equation for the calculation of Henry's constant
Parameters
----------
T : float
Temperature, [K]
gas : string
Name of gas to calculate solubility
liquid : string
Name of liquid solvent, can be H20 (default) or D2O
Returns
-------
kw : float
Henry's constant, [MPa]
Notes
-----
The gas availables for H2O solvent are He, Ne, Ar, Kr, Xe, H2, N2, O2, CO,
CO2, H2S, CH4, C2H6, SF6
For D2O as solvent He, Ne, Ar, Kr, Xe, D2, CH4
Raise :class:`NotImplementedError` if input gas or liquid are unsupported
Examples
--------
>>> _Henry(500, "He")
1.1973
>>> _Henry(300, "D2", "D2O")
1.6594
References
----------
IAPWS, Guideline on the Henry's Constant and Vapor-Liquid Distribution
Constant for Gases in H2O and D2O at High Temperatures,
http://www.iapws.org/relguide/HenGuide.html
"""
if liquid == 'D2O':
gas += '(D2O)' # depends on [control=['if'], data=[]]
limit = {'He': (273.21, 553.18), 'Ne': (273.2, 543.36), 'Ar': (273.19, 568.36), 'Kr': (273.19, 525.56), 'Xe': (273.22, 574.85), 'H2': (273.15, 636.09), 'N2': (278.12, 636.46), 'O2': (274.15, 616.52), 'CO': (278.15, 588.67), 'CO2': (274.19, 642.66), 'H2S': (273.15, 533.09), 'CH4': (275.46, 633.11), 'C2H6': (275.44, 473.46), 'SF6': (283.14, 505.55), 'He(D2O)': (288.15, 553.18), 'Ne(D2O)': (288.18, 549.96), 'Ar(D2O)': (288.3, 583.76), 'Kr(D2O)': (288.19, 523.06), 'Xe(D2O)': (295.39, 574.85), 'D2(D2O)': (288.17, 581.0), 'CH4(D2O)': (288.16, 517.46)}
# Check input parameters
if liquid != 'D2O' and liquid != 'H2O':
raise NotImplementedError('Solvent liquid unsupported') # depends on [control=['if'], data=[]]
if gas not in limit:
raise NotImplementedError('Gas unsupported') # depends on [control=['if'], data=[]]
(Tmin, Tmax) = limit[gas]
if T < Tmin or T > Tmax:
warnings.warn('Temperature out of data of correlation') # depends on [control=['if'], data=[]]
if liquid == 'D2O':
Tc = 643.847
Pc = 21.671 # depends on [control=['if'], data=[]]
else:
Tc = 647.096
Pc = 22.064
Tr = T / Tc
tau = 1 - Tr
# Eq 4
if liquid == 'H2O':
ai = [-7.85951783, 1.84408259, -11.7866497, 22.6807411, -15.9618719, 1.80122502]
bi = [1, 1.5, 3, 3.5, 4, 7.5] # depends on [control=['if'], data=[]]
else:
ai = [-7.896657, 24.73308, -27.81128, 9.355913, -9.220083]
bi = [1, 1.89, 2, 3, 3.6]
ps = Pc * exp(1 / Tr * sum([a * tau ** b for (a, b) in zip(ai, bi)]))
# Select values from Table 2
par = {'He': (-3.52839, 7.12983, 4.4777), 'Ne': (-3.18301, 5.31448, 5.43774), 'Ar': (-8.40954, 4.29587, 10.52779), 'Kr': (-8.97358, 3.61508, 11.29963), 'Xe': (-14.21635, 4.00041, 15.60999), 'H2': (-4.73284, 6.08954, 6.06066), 'N2': (-9.67578, 4.72162, 11.70585), 'O2': (-9.44833, 4.43822, 11.42005), 'CO': (-10.52862, 5.13259, 12.01421), 'CO2': (-8.55445, 4.01195, 9.52345), 'H2S': (-4.51499, 5.23538, 4.42126), 'CH4': (-10.44708, 4.66491, 12.12986), 'C2H6': (-19.67563, 4.51222, 20.62567), 'SF6': (-16.56118, 2.15289, 20.3544), 'He(D2O)': (-0.72643, 7.02134, 2.04433), 'Ne(D2O)': (-0.91999, 5.65327, 3.17247), 'Ar(D2O)': (-7.17725, 4.48177, 9.31509), 'Kr(D2O)': (-8.47059, 3.9158, 10.69433), 'Xe(D2O)': (-14.46485, 4.4233, 15.60919), 'D2(D2O)': (-5.33843, 6.15723, 6.53046), 'CH4(D2O)': (-10.01915, 4.73368, 11.75711)}
(A, B, C) = par[gas]
# Eq 3
kh = ps * exp(A / Tr + B * tau ** 0.355 / Tr + C * Tr ** (-0.41) * exp(tau))
return kh
|
def retrieve_client_credentials(self):
"""Return the client credentials.
:returns: tuple(client_id, client_secret)
"""
client_id = self.params.get('client_id')
client_secret = self.params.get('client_secret')
return (client_id, client_secret)
|
def function[retrieve_client_credentials, parameter[self]]:
constant[Return the client credentials.
:returns: tuple(client_id, client_secret)
]
variable[client_id] assign[=] call[name[self].params.get, parameter[constant[client_id]]]
variable[client_secret] assign[=] call[name[self].params.get, parameter[constant[client_secret]]]
return[tuple[[<ast.Name object at 0x7da1b0f99540>, <ast.Name object at 0x7da1b0f99b40>]]]
|
keyword[def] identifier[retrieve_client_credentials] ( identifier[self] ):
literal[string]
identifier[client_id] = identifier[self] . identifier[params] . identifier[get] ( literal[string] )
identifier[client_secret] = identifier[self] . identifier[params] . identifier[get] ( literal[string] )
keyword[return] ( identifier[client_id] , identifier[client_secret] )
|
def retrieve_client_credentials(self):
"""Return the client credentials.
:returns: tuple(client_id, client_secret)
"""
client_id = self.params.get('client_id')
client_secret = self.params.get('client_secret')
return (client_id, client_secret)
|
def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs)
|
def function[emails_with_users_and_watches, parameter[subject, template_path, vars, users_and_watches, from_email]]:
constant[Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
]
variable[template] assign[=] call[name[loader].get_template, parameter[name[template_path]]]
variable[context] assign[=] call[name[Context], parameter[name[vars]]]
for taget[tuple[[<ast.Name object at 0x7da18f7205e0>, <ast.Name object at 0x7da18f721780>]]] in starred[name[users_and_watches]] begin[:]
call[name[context]][constant[user]] assign[=] name[u]
call[name[context]][constant[watch]] assign[=] call[name[w]][constant[0]]
call[name[context]][constant[watches]] assign[=] name[w]
<ast.Yield object at 0x7da1b054a680>
|
keyword[def] identifier[emails_with_users_and_watches] (
identifier[subject] , identifier[template_path] , identifier[vars] , identifier[users_and_watches] ,
identifier[from_email] = identifier[settings] . identifier[TIDINGS_FROM_ADDRESS] ,** identifier[extra_kwargs] ):
literal[string]
identifier[template] = identifier[loader] . identifier[get_template] ( identifier[template_path] )
identifier[context] = identifier[Context] ( identifier[vars] )
keyword[for] identifier[u] , identifier[w] keyword[in] identifier[users_and_watches] :
identifier[context] [ literal[string] ]= identifier[u]
identifier[context] [ literal[string] ]= identifier[w] [ literal[int] ]
identifier[context] [ literal[string] ]= identifier[w]
keyword[yield] identifier[EmailMessage] ( identifier[subject] ,
identifier[template] . identifier[render] ( identifier[context] ),
identifier[from_email] ,
[ identifier[u] . identifier[email] ],
** identifier[extra_kwargs] )
|
def emails_with_users_and_watches(subject, template_path, vars, users_and_watches, from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for (u, w) in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject, template.render(context), from_email, [u.email], **extra_kwargs) # depends on [control=['for'], data=[]]
|
def ekgd(selidx, row, element):
"""
Return an element of an entry in a column of double precision type in a
specified row.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgd_c.html
:param selidx: Index of parent column in SELECT clause.
:type selidx: int
:param row: Row to fetch from.
:type row: int
:param element: Index of element, within column entry, to fetch.
:type element: int
:return:
Double precision element of column entry,
Flag indicating whether column entry was null.
:rtype: tuple
"""
selidx = ctypes.c_int(selidx)
row = ctypes.c_int(row)
element = ctypes.c_int(element)
ddata = ctypes.c_double()
null = ctypes.c_int()
found = ctypes.c_int()
libspice.ekgd_c(selidx, row, element, ctypes.byref(ddata),
ctypes.byref(null), ctypes.byref(found))
return ddata.value, null.value, bool(found.value)
|
def function[ekgd, parameter[selidx, row, element]]:
constant[
Return an element of an entry in a column of double precision type in a
specified row.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgd_c.html
:param selidx: Index of parent column in SELECT clause.
:type selidx: int
:param row: Row to fetch from.
:type row: int
:param element: Index of element, within column entry, to fetch.
:type element: int
:return:
Double precision element of column entry,
Flag indicating whether column entry was null.
:rtype: tuple
]
variable[selidx] assign[=] call[name[ctypes].c_int, parameter[name[selidx]]]
variable[row] assign[=] call[name[ctypes].c_int, parameter[name[row]]]
variable[element] assign[=] call[name[ctypes].c_int, parameter[name[element]]]
variable[ddata] assign[=] call[name[ctypes].c_double, parameter[]]
variable[null] assign[=] call[name[ctypes].c_int, parameter[]]
variable[found] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].ekgd_c, parameter[name[selidx], name[row], name[element], call[name[ctypes].byref, parameter[name[ddata]]], call[name[ctypes].byref, parameter[name[null]]], call[name[ctypes].byref, parameter[name[found]]]]]
return[tuple[[<ast.Attribute object at 0x7da2054a5fc0>, <ast.Attribute object at 0x7da2054a6950>, <ast.Call object at 0x7da2054a4190>]]]
|
keyword[def] identifier[ekgd] ( identifier[selidx] , identifier[row] , identifier[element] ):
literal[string]
identifier[selidx] = identifier[ctypes] . identifier[c_int] ( identifier[selidx] )
identifier[row] = identifier[ctypes] . identifier[c_int] ( identifier[row] )
identifier[element] = identifier[ctypes] . identifier[c_int] ( identifier[element] )
identifier[ddata] = identifier[ctypes] . identifier[c_double] ()
identifier[null] = identifier[ctypes] . identifier[c_int] ()
identifier[found] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[ekgd_c] ( identifier[selidx] , identifier[row] , identifier[element] , identifier[ctypes] . identifier[byref] ( identifier[ddata] ),
identifier[ctypes] . identifier[byref] ( identifier[null] ), identifier[ctypes] . identifier[byref] ( identifier[found] ))
keyword[return] identifier[ddata] . identifier[value] , identifier[null] . identifier[value] , identifier[bool] ( identifier[found] . identifier[value] )
|
def ekgd(selidx, row, element):
"""
Return an element of an entry in a column of double precision type in a
specified row.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgd_c.html
:param selidx: Index of parent column in SELECT clause.
:type selidx: int
:param row: Row to fetch from.
:type row: int
:param element: Index of element, within column entry, to fetch.
:type element: int
:return:
Double precision element of column entry,
Flag indicating whether column entry was null.
:rtype: tuple
"""
selidx = ctypes.c_int(selidx)
row = ctypes.c_int(row)
element = ctypes.c_int(element)
ddata = ctypes.c_double()
null = ctypes.c_int()
found = ctypes.c_int()
libspice.ekgd_c(selidx, row, element, ctypes.byref(ddata), ctypes.byref(null), ctypes.byref(found))
return (ddata.value, null.value, bool(found.value))
|
def get_blast2(pdb_id, chain_id='A', output_form='HTML'):
'''Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'\n')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
'''
raw_results = get_raw_blast(pdb_id, chain_id=chain_id, output_form=output_form)
out = parse_blast(raw_results)
return out
|
def function[get_blast2, parameter[pdb_id, chain_id, output_form]]:
constant[Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'
')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
]
variable[raw_results] assign[=] call[name[get_raw_blast], parameter[name[pdb_id]]]
variable[out] assign[=] call[name[parse_blast], parameter[name[raw_results]]]
return[name[out]]
|
keyword[def] identifier[get_blast2] ( identifier[pdb_id] , identifier[chain_id] = literal[string] , identifier[output_form] = literal[string] ):
literal[string]
identifier[raw_results] = identifier[get_raw_blast] ( identifier[pdb_id] , identifier[chain_id] = identifier[chain_id] , identifier[output_form] = identifier[output_form] )
identifier[out] = identifier[parse_blast] ( identifier[raw_results] )
keyword[return] identifier[out]
|
def get_blast2(pdb_id, chain_id='A', output_form='HTML'):
"""Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'
')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
"""
raw_results = get_raw_blast(pdb_id, chain_id=chain_id, output_form=output_form)
out = parse_blast(raw_results)
return out
|
def _get_labels_left(self, validate=None):
"""Get all labels of the left dataframe."""
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_left)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels)
|
def function[_get_labels_left, parameter[self, validate]]:
constant[Get all labels of the left dataframe.]
variable[labels] assign[=] list[[]]
for taget[name[compare_func]] in starred[name[self].features] begin[:]
variable[labels] assign[=] binary_operation[name[labels] + call[name[listify], parameter[name[compare_func].labels_left]]]
if <ast.UnaryOp object at 0x7da204620bb0> begin[:]
variable[error_msg] assign[=] constant[label is not found in the dataframe]
<ast.Raise object at 0x7da204620070>
return[call[name[unique], parameter[name[labels]]]]
|
keyword[def] identifier[_get_labels_left] ( identifier[self] , identifier[validate] = keyword[None] ):
literal[string]
identifier[labels] =[]
keyword[for] identifier[compare_func] keyword[in] identifier[self] . identifier[features] :
identifier[labels] = identifier[labels] + identifier[listify] ( identifier[compare_func] . identifier[labels_left] )
keyword[if] keyword[not] identifier[is_label_dataframe] ( identifier[labels] , identifier[validate] ):
identifier[error_msg] = literal[string]
keyword[raise] identifier[KeyError] ( identifier[error_msg] )
keyword[return] identifier[unique] ( identifier[labels] )
|
def _get_labels_left(self, validate=None):
"""Get all labels of the left dataframe."""
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_left) # depends on [control=['for'], data=['compare_func']]
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = 'label is not found in the dataframe'
raise KeyError(error_msg) # depends on [control=['if'], data=[]]
return unique(labels)
|
def get(self, key):
"""
Retrieves a crash from the container.
@type key: L{Crash} unique key.
@param key: Key of the crash to get.
@rtype: L{Crash} object.
@return: Crash matching the given key.
@see: L{iterkeys}
@warning: A B{copy} of each object is returned,
so any changes made to them will be lost.
To preserve changes do the following:
1. Keep a reference to the object.
2. Delete the object from the set.
3. Modify the object and add it again.
"""
skey = self.marshall_key(key)
data = self.__db[skey]
crash = self.unmarshall_value(data)
return crash
|
def function[get, parameter[self, key]]:
constant[
Retrieves a crash from the container.
@type key: L{Crash} unique key.
@param key: Key of the crash to get.
@rtype: L{Crash} object.
@return: Crash matching the given key.
@see: L{iterkeys}
@warning: A B{copy} of each object is returned,
so any changes made to them will be lost.
To preserve changes do the following:
1. Keep a reference to the object.
2. Delete the object from the set.
3. Modify the object and add it again.
]
variable[skey] assign[=] call[name[self].marshall_key, parameter[name[key]]]
variable[data] assign[=] call[name[self].__db][name[skey]]
variable[crash] assign[=] call[name[self].unmarshall_value, parameter[name[data]]]
return[name[crash]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[key] ):
literal[string]
identifier[skey] = identifier[self] . identifier[marshall_key] ( identifier[key] )
identifier[data] = identifier[self] . identifier[__db] [ identifier[skey] ]
identifier[crash] = identifier[self] . identifier[unmarshall_value] ( identifier[data] )
keyword[return] identifier[crash]
|
def get(self, key):
"""
Retrieves a crash from the container.
@type key: L{Crash} unique key.
@param key: Key of the crash to get.
@rtype: L{Crash} object.
@return: Crash matching the given key.
@see: L{iterkeys}
@warning: A B{copy} of each object is returned,
so any changes made to them will be lost.
To preserve changes do the following:
1. Keep a reference to the object.
2. Delete the object from the set.
3. Modify the object and add it again.
"""
skey = self.marshall_key(key)
data = self.__db[skey]
crash = self.unmarshall_value(data)
return crash
|
def find_label(self, label: Label):
"""
Helper function that iterates over the program and looks for a JumpTarget that has a
Label matching the input label.
:param label: Label object to search for in program
:return: Program index where ``label`` is found
"""
for index, action in enumerate(self.program):
if isinstance(action, JumpTarget):
if label == action.label:
return index
raise RuntimeError("Improper program - Jump Target not found in the "
"input program!")
|
def function[find_label, parameter[self, label]]:
constant[
Helper function that iterates over the program and looks for a JumpTarget that has a
Label matching the input label.
:param label: Label object to search for in program
:return: Program index where ``label`` is found
]
for taget[tuple[[<ast.Name object at 0x7da1b1bf8ac0>, <ast.Name object at 0x7da1b1bfba90>]]] in starred[call[name[enumerate], parameter[name[self].program]]] begin[:]
if call[name[isinstance], parameter[name[action], name[JumpTarget]]] begin[:]
if compare[name[label] equal[==] name[action].label] begin[:]
return[name[index]]
<ast.Raise object at 0x7da1b1bbba00>
|
keyword[def] identifier[find_label] ( identifier[self] , identifier[label] : identifier[Label] ):
literal[string]
keyword[for] identifier[index] , identifier[action] keyword[in] identifier[enumerate] ( identifier[self] . identifier[program] ):
keyword[if] identifier[isinstance] ( identifier[action] , identifier[JumpTarget] ):
keyword[if] identifier[label] == identifier[action] . identifier[label] :
keyword[return] identifier[index]
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] )
|
def find_label(self, label: Label):
"""
Helper function that iterates over the program and looks for a JumpTarget that has a
Label matching the input label.
:param label: Label object to search for in program
:return: Program index where ``label`` is found
"""
for (index, action) in enumerate(self.program):
if isinstance(action, JumpTarget):
if label == action.label:
return index # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise RuntimeError('Improper program - Jump Target not found in the input program!')
|
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
|
def function[check_extras, parameter[dist, attr, value]]:
constant[Verify that extras_require mapping is valid]
<ast.Try object at 0x7da1b26ac8b0>
|
keyword[def] identifier[check_extras] ( identifier[dist] , identifier[attr] , identifier[value] ):
literal[string]
keyword[try] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[value] . identifier[items] ():
identifier[list] ( identifier[pkg_resources] . identifier[parse_requirements] ( identifier[v] ))
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[AttributeError] ):
keyword[raise] identifier[DistutilsSetupError] (
literal[string]
literal[string]
literal[string]
)
|
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for (k, v) in value.items():
list(pkg_resources.parse_requirements(v)) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError("'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.") # depends on [control=['except'], data=[]]
|
def user(self, username=None):
"""gets the user's content. If None is passed, the current user is
used.
Input:
username - name of the login for a given user on a site.
"""
if username is None:
username = self.__getUsername()
url = "%s/%s" % (self.root, username)
return User(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=True)
|
def function[user, parameter[self, username]]:
constant[gets the user's content. If None is passed, the current user is
used.
Input:
username - name of the login for a given user on a site.
]
if compare[name[username] is constant[None]] begin[:]
variable[username] assign[=] call[name[self].__getUsername, parameter[]]
variable[url] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1256110>, <ast.Name object at 0x7da1b1254f70>]]]
return[call[name[User], parameter[]]]
|
keyword[def] identifier[user] ( identifier[self] , identifier[username] = keyword[None] ):
literal[string]
keyword[if] identifier[username] keyword[is] keyword[None] :
identifier[username] = identifier[self] . identifier[__getUsername] ()
identifier[url] = literal[string] %( identifier[self] . identifier[root] , identifier[username] )
keyword[return] identifier[User] ( identifier[url] = identifier[url] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ,
identifier[initalize] = keyword[True] )
|
def user(self, username=None):
"""gets the user's content. If None is passed, the current user is
used.
Input:
username - name of the login for a given user on a site.
"""
if username is None:
username = self.__getUsername() # depends on [control=['if'], data=['username']]
url = '%s/%s' % (self.root, username)
return User(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initalize=True)
|
def get_default(self, node):
"""
Unless specified otherwise, intr fields are implicitly stickybit
"""
if node.inst.properties.get("intr", False):
# Interrupt is set!
# Default is implicitly stickybit, unless the mutually-exclusive
# sticky property was set instead
return not node.inst.properties.get("sticky", False)
else:
return False
|
def function[get_default, parameter[self, node]]:
constant[
Unless specified otherwise, intr fields are implicitly stickybit
]
if call[name[node].inst.properties.get, parameter[constant[intr], constant[False]]] begin[:]
return[<ast.UnaryOp object at 0x7da1b0d61780>]
|
keyword[def] identifier[get_default] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[node] . identifier[inst] . identifier[properties] . identifier[get] ( literal[string] , keyword[False] ):
keyword[return] keyword[not] identifier[node] . identifier[inst] . identifier[properties] . identifier[get] ( literal[string] , keyword[False] )
keyword[else] :
keyword[return] keyword[False]
|
def get_default(self, node):
"""
Unless specified otherwise, intr fields are implicitly stickybit
"""
if node.inst.properties.get('intr', False):
# Interrupt is set!
# Default is implicitly stickybit, unless the mutually-exclusive
# sticky property was set instead
return not node.inst.properties.get('sticky', False) # depends on [control=['if'], data=[]]
else:
return False
|
def _download_wrapper(self, url, *args, **kwargs):
"""
Actual download call. Calls the underlying file downloader,
catches all exceptions and returns the result.
"""
try:
return url, self._file_downloader.download(url, *args, **kwargs)
except Exception as e:
logging.error("AbstractDownloader: %s", traceback.format_exc())
return url, e
|
def function[_download_wrapper, parameter[self, url]]:
constant[
Actual download call. Calls the underlying file downloader,
catches all exceptions and returns the result.
]
<ast.Try object at 0x7da18f812800>
|
keyword[def] identifier[_download_wrapper] ( identifier[self] , identifier[url] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[url] , identifier[self] . identifier[_file_downloader] . identifier[download] ( identifier[url] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] , identifier[traceback] . identifier[format_exc] ())
keyword[return] identifier[url] , identifier[e]
|
def _download_wrapper(self, url, *args, **kwargs):
"""
Actual download call. Calls the underlying file downloader,
catches all exceptions and returns the result.
"""
try:
return (url, self._file_downloader.download(url, *args, **kwargs)) # depends on [control=['try'], data=[]]
except Exception as e:
logging.error('AbstractDownloader: %s', traceback.format_exc())
return (url, e) # depends on [control=['except'], data=['e']]
|
def remove(self,
package,
echo=None,
options=None,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None):
"""Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
"""
# If separated by spaces, remove separately
shutit = self.shutit
if note != None:
shutit.handle_note('Removing package: ' + package + '\n' + note)
if options is None: options = {}
install_type = self.current_environment.install_type
cmd = ''
if install_type == 'src':
# If this is a src build, we assume it's already installed.
return True
elif install_type == 'none':
# If this is a none build, installing is invalid.
shutit.fail('ShutiIt has no install type for the identified OS, so cannot use install method. Inform maintainers if believed to be a bug.') # pragma: no cover
if install_type == 'apt':
cmd += 'apt-get purge'
opts = options['apt'] if 'apt' in options else '-qq -y'
elif install_type == 'yum':
cmd += 'yum erase'
opts = options['yum'] if 'yum' in options else '-y'
elif install_type == 'pacman':
cmd += 'pacman -R'
if 'pacman' in options:
opts = options['pacman']
elif install_type == 'apk':
cmd += 'apk del'
opts = options['apt'] if 'apt' in options else '-q'
elif install_type == 'emerge':
cmd += 'emerge -cav'
if 'emerge' in options:
opts = options['emerge']
elif install_type == 'docker':
cmd += 'docker rmi'
if 'docker' in options:
opts = options['docker']
elif install_type == 'brew':
cmd += 'brew uninstall'
if 'brew' in options:
opts = options['brew']
else:
opts += ' --force'
else:
# Not handled
return False
# Get mapped package.
package = package_map.map_packages(self, package, self.current_environment.install_type)
pw = self.get_sudo_pass_if_needed(shutit, ignore_brew=True)
if pw != '':
cmd = 'sudo ' + cmd
res = self.multisend(ShutItSendSpec(self,
send='%s %s %s' % (cmd, opts, package),
send_dict={'assword:':[pw, True]},
timeout=timeout,
exit_values=['0','100'],
echo=echo,
secret=True))
if res == -1:
# Should not happen
assert False, shutit_util.print_debug()
else:
self.send(ShutItSendSpec(self,
send='%s %s %s' % (cmd, opts, package),
timeout=timeout,
exit_values=['0','100'],
echo=echo,
ignore_background=False,
run_in_background=False,
block_other_commands=True))
shutit.handle_note_after(note=note)
return True
|
def function[remove, parameter[self, package, echo, options, timeout, note]]:
constant[Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
]
variable[shutit] assign[=] name[self].shutit
if compare[name[note] not_equal[!=] constant[None]] begin[:]
call[name[shutit].handle_note, parameter[binary_operation[binary_operation[binary_operation[constant[Removing package: ] + name[package]] + constant[
]] + name[note]]]]
if compare[name[options] is constant[None]] begin[:]
variable[options] assign[=] dictionary[[], []]
variable[install_type] assign[=] name[self].current_environment.install_type
variable[cmd] assign[=] constant[]
if compare[name[install_type] equal[==] constant[src]] begin[:]
return[constant[True]]
if compare[name[install_type] equal[==] constant[apt]] begin[:]
<ast.AugAssign object at 0x7da18bc70340>
variable[opts] assign[=] <ast.IfExp object at 0x7da18bc709a0>
variable[package] assign[=] call[name[package_map].map_packages, parameter[name[self], name[package], name[self].current_environment.install_type]]
variable[pw] assign[=] call[name[self].get_sudo_pass_if_needed, parameter[name[shutit]]]
if compare[name[pw] not_equal[!=] constant[]] begin[:]
variable[cmd] assign[=] binary_operation[constant[sudo ] + name[cmd]]
variable[res] assign[=] call[name[self].multisend, parameter[call[name[ShutItSendSpec], parameter[name[self]]]]]
if compare[name[res] equal[==] <ast.UnaryOp object at 0x7da1b11f7eb0>] begin[:]
assert[constant[False]]
call[name[shutit].handle_note_after, parameter[]]
return[constant[True]]
|
keyword[def] identifier[remove] ( identifier[self] ,
identifier[package] ,
identifier[echo] = keyword[None] ,
identifier[options] = keyword[None] ,
identifier[timeout] = identifier[shutit_global] . identifier[shutit_global_object] . identifier[default_timeout] ,
identifier[note] = keyword[None] ):
literal[string]
identifier[shutit] = identifier[self] . identifier[shutit]
keyword[if] identifier[note] != keyword[None] :
identifier[shutit] . identifier[handle_note] ( literal[string] + identifier[package] + literal[string] + identifier[note] )
keyword[if] identifier[options] keyword[is] keyword[None] : identifier[options] ={}
identifier[install_type] = identifier[self] . identifier[current_environment] . identifier[install_type]
identifier[cmd] = literal[string]
keyword[if] identifier[install_type] == literal[string] :
keyword[return] keyword[True]
keyword[elif] identifier[install_type] == literal[string] :
identifier[shutit] . identifier[fail] ( literal[string] )
keyword[if] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
identifier[opts] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[else] literal[string]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
identifier[opts] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[else] literal[string]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
keyword[if] literal[string] keyword[in] identifier[options] :
identifier[opts] = identifier[options] [ literal[string] ]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
identifier[opts] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[else] literal[string]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
keyword[if] literal[string] keyword[in] identifier[options] :
identifier[opts] = identifier[options] [ literal[string] ]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
keyword[if] literal[string] keyword[in] identifier[options] :
identifier[opts] = identifier[options] [ literal[string] ]
keyword[elif] identifier[install_type] == literal[string] :
identifier[cmd] += literal[string]
keyword[if] literal[string] keyword[in] identifier[options] :
identifier[opts] = identifier[options] [ literal[string] ]
keyword[else] :
identifier[opts] += literal[string]
keyword[else] :
keyword[return] keyword[False]
identifier[package] = identifier[package_map] . identifier[map_packages] ( identifier[self] , identifier[package] , identifier[self] . identifier[current_environment] . identifier[install_type] )
identifier[pw] = identifier[self] . identifier[get_sudo_pass_if_needed] ( identifier[shutit] , identifier[ignore_brew] = keyword[True] )
keyword[if] identifier[pw] != literal[string] :
identifier[cmd] = literal[string] + identifier[cmd]
identifier[res] = identifier[self] . identifier[multisend] ( identifier[ShutItSendSpec] ( identifier[self] ,
identifier[send] = literal[string] %( identifier[cmd] , identifier[opts] , identifier[package] ),
identifier[send_dict] ={ literal[string] :[ identifier[pw] , keyword[True] ]},
identifier[timeout] = identifier[timeout] ,
identifier[exit_values] =[ literal[string] , literal[string] ],
identifier[echo] = identifier[echo] ,
identifier[secret] = keyword[True] ))
keyword[if] identifier[res] ==- literal[int] :
keyword[assert] keyword[False] , identifier[shutit_util] . identifier[print_debug] ()
keyword[else] :
identifier[self] . identifier[send] ( identifier[ShutItSendSpec] ( identifier[self] ,
identifier[send] = literal[string] %( identifier[cmd] , identifier[opts] , identifier[package] ),
identifier[timeout] = identifier[timeout] ,
identifier[exit_values] =[ literal[string] , literal[string] ],
identifier[echo] = identifier[echo] ,
identifier[ignore_background] = keyword[False] ,
identifier[run_in_background] = keyword[False] ,
identifier[block_other_commands] = keyword[True] ))
identifier[shutit] . identifier[handle_note_after] ( identifier[note] = identifier[note] )
keyword[return] keyword[True]
|
def remove(self, package, echo=None, options=None, timeout=shutit_global.shutit_global_object.default_timeout, note=None):
"""Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
""" # If separated by spaces, remove separately
shutit = self.shutit
if note != None:
shutit.handle_note('Removing package: ' + package + '\n' + note) # depends on [control=['if'], data=['note']]
if options is None:
options = {} # depends on [control=['if'], data=['options']]
install_type = self.current_environment.install_type
cmd = ''
if install_type == 'src': # If this is a src build, we assume it's already installed.
return True # depends on [control=['if'], data=[]]
elif install_type == 'none': # If this is a none build, installing is invalid.
shutit.fail('ShutiIt has no install type for the identified OS, so cannot use install method. Inform maintainers if believed to be a bug.') # pragma: no cover # depends on [control=['if'], data=[]]
if install_type == 'apt':
cmd += 'apt-get purge'
opts = options['apt'] if 'apt' in options else '-qq -y' # depends on [control=['if'], data=[]]
elif install_type == 'yum':
cmd += 'yum erase'
opts = options['yum'] if 'yum' in options else '-y' # depends on [control=['if'], data=[]]
elif install_type == 'pacman':
cmd += 'pacman -R'
if 'pacman' in options:
opts = options['pacman'] # depends on [control=['if'], data=['options']] # depends on [control=['if'], data=[]]
elif install_type == 'apk':
cmd += 'apk del'
opts = options['apt'] if 'apt' in options else '-q' # depends on [control=['if'], data=[]]
elif install_type == 'emerge':
cmd += 'emerge -cav'
if 'emerge' in options:
opts = options['emerge'] # depends on [control=['if'], data=['options']] # depends on [control=['if'], data=[]]
elif install_type == 'docker':
cmd += 'docker rmi'
if 'docker' in options:
opts = options['docker'] # depends on [control=['if'], data=['options']] # depends on [control=['if'], data=[]]
elif install_type == 'brew':
cmd += 'brew uninstall'
if 'brew' in options:
opts = options['brew'] # depends on [control=['if'], data=['options']]
else:
opts += ' --force' # depends on [control=['if'], data=[]]
else: # Not handled
return False # Get mapped package.
package = package_map.map_packages(self, package, self.current_environment.install_type)
pw = self.get_sudo_pass_if_needed(shutit, ignore_brew=True)
if pw != '':
cmd = 'sudo ' + cmd
res = self.multisend(ShutItSendSpec(self, send='%s %s %s' % (cmd, opts, package), send_dict={'assword:': [pw, True]}, timeout=timeout, exit_values=['0', '100'], echo=echo, secret=True))
if res == -1: # Should not happen
assert False, shutit_util.print_debug() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pw']]
else:
self.send(ShutItSendSpec(self, send='%s %s %s' % (cmd, opts, package), timeout=timeout, exit_values=['0', '100'], echo=echo, ignore_background=False, run_in_background=False, block_other_commands=True))
shutit.handle_note_after(note=note)
return True
|
def parse(cls, fptr, offset, length):
"""Parse data reference box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataReferenceBox
Instance of the current data reference box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
# Read the number of data references
ndr, = struct.unpack_from('>H', read_buffer, offset=0)
# Need to keep track of where the next url box starts.
box_offset = 2
data_entry_url_box_list = []
for j in range(ndr):
# Create an in-memory binary stream for each URL box.
box_fptr = io.BytesIO(read_buffer[box_offset:])
box_buffer = box_fptr.read(8)
(box_length, box_id) = struct.unpack_from('>I4s', box_buffer,
offset=0)
box = DataEntryURLBox.parse(box_fptr, 0, box_length)
# Need to adjust the box start to that of the "real" file.
box.offset = offset + 8 + box_offset
data_entry_url_box_list.append(box)
# Point to the next embedded URL box.
box_offset += box_length
return cls(data_entry_url_box_list, length=length, offset=offset)
|
def function[parse, parameter[cls, fptr, offset, length]]:
constant[Parse data reference box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataReferenceBox
Instance of the current data reference box.
]
variable[num_bytes] assign[=] binary_operation[binary_operation[name[offset] + name[length]] - call[name[fptr].tell, parameter[]]]
variable[read_buffer] assign[=] call[name[fptr].read, parameter[name[num_bytes]]]
<ast.Tuple object at 0x7da20c6e51e0> assign[=] call[name[struct].unpack_from, parameter[constant[>H], name[read_buffer]]]
variable[box_offset] assign[=] constant[2]
variable[data_entry_url_box_list] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[name[ndr]]]] begin[:]
variable[box_fptr] assign[=] call[name[io].BytesIO, parameter[call[name[read_buffer]][<ast.Slice object at 0x7da20c6e7b20>]]]
variable[box_buffer] assign[=] call[name[box_fptr].read, parameter[constant[8]]]
<ast.Tuple object at 0x7da20c6e4430> assign[=] call[name[struct].unpack_from, parameter[constant[>I4s], name[box_buffer]]]
variable[box] assign[=] call[name[DataEntryURLBox].parse, parameter[name[box_fptr], constant[0], name[box_length]]]
name[box].offset assign[=] binary_operation[binary_operation[name[offset] + constant[8]] + name[box_offset]]
call[name[data_entry_url_box_list].append, parameter[name[box]]]
<ast.AugAssign object at 0x7da20c6e6320>
return[call[name[cls], parameter[name[data_entry_url_box_list]]]]
|
keyword[def] identifier[parse] ( identifier[cls] , identifier[fptr] , identifier[offset] , identifier[length] ):
literal[string]
identifier[num_bytes] = identifier[offset] + identifier[length] - identifier[fptr] . identifier[tell] ()
identifier[read_buffer] = identifier[fptr] . identifier[read] ( identifier[num_bytes] )
identifier[ndr] ,= identifier[struct] . identifier[unpack_from] ( literal[string] , identifier[read_buffer] , identifier[offset] = literal[int] )
identifier[box_offset] = literal[int]
identifier[data_entry_url_box_list] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[ndr] ):
identifier[box_fptr] = identifier[io] . identifier[BytesIO] ( identifier[read_buffer] [ identifier[box_offset] :])
identifier[box_buffer] = identifier[box_fptr] . identifier[read] ( literal[int] )
( identifier[box_length] , identifier[box_id] )= identifier[struct] . identifier[unpack_from] ( literal[string] , identifier[box_buffer] ,
identifier[offset] = literal[int] )
identifier[box] = identifier[DataEntryURLBox] . identifier[parse] ( identifier[box_fptr] , literal[int] , identifier[box_length] )
identifier[box] . identifier[offset] = identifier[offset] + literal[int] + identifier[box_offset]
identifier[data_entry_url_box_list] . identifier[append] ( identifier[box] )
identifier[box_offset] += identifier[box_length]
keyword[return] identifier[cls] ( identifier[data_entry_url_box_list] , identifier[length] = identifier[length] , identifier[offset] = identifier[offset] )
|
def parse(cls, fptr, offset, length):
"""Parse data reference box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataReferenceBox
Instance of the current data reference box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
# Read the number of data references
(ndr,) = struct.unpack_from('>H', read_buffer, offset=0)
# Need to keep track of where the next url box starts.
box_offset = 2
data_entry_url_box_list = []
for j in range(ndr):
# Create an in-memory binary stream for each URL box.
box_fptr = io.BytesIO(read_buffer[box_offset:])
box_buffer = box_fptr.read(8)
(box_length, box_id) = struct.unpack_from('>I4s', box_buffer, offset=0)
box = DataEntryURLBox.parse(box_fptr, 0, box_length)
# Need to adjust the box start to that of the "real" file.
box.offset = offset + 8 + box_offset
data_entry_url_box_list.append(box)
# Point to the next embedded URL box.
box_offset += box_length # depends on [control=['for'], data=[]]
return cls(data_entry_url_box_list, length=length, offset=offset)
|
def get_base_naive(cls, name, ignore=''):
""" Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
"""
return cls._get_regex_search(name, cls.REGEX_BASENAME, match_index=0, ignore=ignore)
|
def function[get_base_naive, parameter[cls, name, ignore]]:
constant[ Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
]
return[call[name[cls]._get_regex_search, parameter[name[name], name[cls].REGEX_BASENAME]]]
|
keyword[def] identifier[get_base_naive] ( identifier[cls] , identifier[name] , identifier[ignore] = literal[string] ):
literal[string]
keyword[return] identifier[cls] . identifier[_get_regex_search] ( identifier[name] , identifier[cls] . identifier[REGEX_BASENAME] , identifier[match_index] = literal[int] , identifier[ignore] = identifier[ignore] )
|
def get_base_naive(cls, name, ignore=''):
""" Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
"""
return cls._get_regex_search(name, cls.REGEX_BASENAME, match_index=0, ignore=ignore)
|
def getUmis(self, n):
''' return n umis from the random_umis atr.'''
if n < (self.random_fill_size - self.random_ix):
barcodes = self.random_umis[self.random_ix: self.random_ix+n]
else:
# could use the end of the random_umis but
# let's just make a new random_umis
if n > self.random_fill_size: # ensure random_umis is long enough
self.random_fill_size = n * 2
self.refill_random()
barcodes = self.random_umis[self.random_ix: self.random_ix+n]
self.random_ix += n
return barcodes
|
def function[getUmis, parameter[self, n]]:
constant[ return n umis from the random_umis atr.]
if compare[name[n] less[<] binary_operation[name[self].random_fill_size - name[self].random_ix]] begin[:]
variable[barcodes] assign[=] call[name[self].random_umis][<ast.Slice object at 0x7da207f02620>]
<ast.AugAssign object at 0x7da207f019f0>
return[name[barcodes]]
|
keyword[def] identifier[getUmis] ( identifier[self] , identifier[n] ):
literal[string]
keyword[if] identifier[n] <( identifier[self] . identifier[random_fill_size] - identifier[self] . identifier[random_ix] ):
identifier[barcodes] = identifier[self] . identifier[random_umis] [ identifier[self] . identifier[random_ix] : identifier[self] . identifier[random_ix] + identifier[n] ]
keyword[else] :
keyword[if] identifier[n] > identifier[self] . identifier[random_fill_size] :
identifier[self] . identifier[random_fill_size] = identifier[n] * literal[int]
identifier[self] . identifier[refill_random] ()
identifier[barcodes] = identifier[self] . identifier[random_umis] [ identifier[self] . identifier[random_ix] : identifier[self] . identifier[random_ix] + identifier[n] ]
identifier[self] . identifier[random_ix] += identifier[n]
keyword[return] identifier[barcodes]
|
def getUmis(self, n):
""" return n umis from the random_umis atr."""
if n < self.random_fill_size - self.random_ix:
barcodes = self.random_umis[self.random_ix:self.random_ix + n] # depends on [control=['if'], data=['n']]
else:
# could use the end of the random_umis but
# let's just make a new random_umis
if n > self.random_fill_size: # ensure random_umis is long enough
self.random_fill_size = n * 2 # depends on [control=['if'], data=['n']]
self.refill_random()
barcodes = self.random_umis[self.random_ix:self.random_ix + n]
self.random_ix += n
return barcodes
|
def _analyze_variable_attributes(self, attributes):
"""
Analyze event variable attributes
:param attributes: The event variable attributes to parse.
:return: None
"""
# Check for the indexed attribute
if 'indexed' in attributes:
self._indexed = attributes['indexed']
super(EventVariableSolc, self)._analyze_variable_attributes(attributes)
|
def function[_analyze_variable_attributes, parameter[self, attributes]]:
constant[
Analyze event variable attributes
:param attributes: The event variable attributes to parse.
:return: None
]
if compare[constant[indexed] in name[attributes]] begin[:]
name[self]._indexed assign[=] call[name[attributes]][constant[indexed]]
call[call[name[super], parameter[name[EventVariableSolc], name[self]]]._analyze_variable_attributes, parameter[name[attributes]]]
|
keyword[def] identifier[_analyze_variable_attributes] ( identifier[self] , identifier[attributes] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[attributes] :
identifier[self] . identifier[_indexed] = identifier[attributes] [ literal[string] ]
identifier[super] ( identifier[EventVariableSolc] , identifier[self] ). identifier[_analyze_variable_attributes] ( identifier[attributes] )
|
def _analyze_variable_attributes(self, attributes):
"""
Analyze event variable attributes
:param attributes: The event variable attributes to parse.
:return: None
"""
# Check for the indexed attribute
if 'indexed' in attributes:
self._indexed = attributes['indexed'] # depends on [control=['if'], data=['attributes']]
super(EventVariableSolc, self)._analyze_variable_attributes(attributes)
|
def get_source_models(oqparam, gsim_lt, source_model_lt, monitor,
in_memory=True, srcfilter=None):
"""
Build all the source models generated by the logic tree.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param gsim_lt:
a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
:param source_model_lt:
a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
:param monitor:
a `openquake.baselib.performance.Monitor` instance
:param in_memory:
if True, keep in memory the sources, else just collect the TRTs
:param srcfilter:
a SourceFilter instance with an .filename pointing to the cache file
:returns:
an iterator over :class:`openquake.commonlib.logictree.LtSourceModel`
tuples
"""
make_sm = SourceModelFactory()
spinning_off = oqparam.pointsource_distance == {'default': 0.0}
if spinning_off:
logging.info('Removing nodal plane and hypocenter distributions')
dist = 'no' if os.environ.get('OQ_DISTRIBUTE') == 'no' else 'processpool'
smlt_dir = os.path.dirname(source_model_lt.filename)
converter = sourceconverter.SourceConverter(
oqparam.investigation_time,
oqparam.rupture_mesh_spacing,
oqparam.complex_fault_mesh_spacing,
oqparam.width_of_mfd_bin,
oqparam.area_source_discretization,
oqparam.minimum_magnitude,
not spinning_off,
oqparam.source_id)
if oqparam.calculation_mode.startswith('ucerf'):
[grp] = nrml.to_python(oqparam.inputs["source_model"], converter)
elif in_memory:
logging.info('Reading the source model(s) in parallel')
smap = parallel.Starmap(
nrml.read_source_models, monitor=monitor, distribute=dist)
for sm in source_model_lt.gen_source_models(gsim_lt):
for name in sm.names.split():
fname = os.path.abspath(os.path.join(smlt_dir, name))
smap.submit([fname], converter)
dic = {sm.fname: sm for sm in smap}
# consider only the effective realizations
nr = 0
idx = 0
grp_id = 0
if monitor.hdf5:
sources = hdf5.create(monitor.hdf5, 'source_info', source_info_dt)
hdf5.create(monitor.hdf5, 'source_geom', point3d)
filename = (getattr(srcfilter, 'filename', None)
if oqparam.prefilter_sources == 'no' else None)
source_ids = set()
for sm in source_model_lt.gen_source_models(gsim_lt):
apply_unc = functools.partial(
source_model_lt.apply_uncertainties, sm.path)
src_groups = []
for name in sm.names.split():
fname = os.path.abspath(os.path.join(smlt_dir, name))
if oqparam.calculation_mode.startswith('ucerf'):
sg = copy.copy(grp)
sg.id = grp_id
src = sg[0].new(sm.ordinal, sm.names) # one source
source_ids.add(src.source_id)
src.src_group_id = grp_id
src.id = idx
if oqparam.number_of_logic_tree_samples:
src.samples = sm.samples
sg.sources = [src]
src_groups.append(sg)
idx += 1
grp_id += 1
data = [((sg.id, src.source_id, src.code, 0, 0,
src.num_ruptures, 0, 0, 0))]
hdf5.extend(sources, numpy.array(data, source_info_dt))
elif in_memory:
newsm = make_sm(fname, dic[fname], apply_unc,
oqparam.investigation_time)
for sg in newsm:
nr += sum(src.num_ruptures for src in sg)
# sample a source for each group
if os.environ.get('OQ_SAMPLE_SOURCES'):
sg.sources = random_filtered_sources(
sg.sources, srcfilter, sg.id + oqparam.random_seed)
for src in sg:
source_ids.add(src.source_id)
src.src_group_id = grp_id
src.id = idx
idx += 1
sg.id = grp_id
grp_id += 1
src_groups.append(sg)
if monitor.hdf5:
store_sm(newsm, filename, monitor)
else: # just collect the TRT models
groups = logictree.read_source_groups(fname)
for group in groups:
source_ids.update(src['id'] for src in group)
src_groups.extend(groups)
if grp_id >= TWO16:
# the limit is really needed only for event based calculations
raise ValueError('There is a limit of %d src groups!' % TWO16)
for brid, srcids in source_model_lt.info.applytosources.items():
for srcid in srcids:
if srcid not in source_ids:
raise ValueError(
'The source %s is not in the source model, please fix '
'applyToSources in %s or the source model' %
(srcid, source_model_lt.filename))
num_sources = sum(len(sg.sources) for sg in src_groups)
sm.src_groups = src_groups
trts = [mod.trt for mod in src_groups]
source_model_lt.tectonic_region_types.update(trts)
logging.info(
'Processed source model %d with %d gsim path(s) and %d '
'sources', sm.ordinal + 1, sm.num_gsim_paths, num_sources)
gsim_file = oqparam.inputs.get('gsim_logic_tree')
if gsim_file: # check TRTs
for src_group in src_groups:
if src_group.trt not in gsim_lt.values:
raise ValueError(
"Found in %r a tectonic region type %r inconsistent "
"with the ones in %r" % (sm, src_group.trt, gsim_file))
yield sm
logging.info('The composite source model has {:,d} ruptures'.format(nr))
# log if some source file is being used more than once
dupl = 0
for fname, hits in make_sm.fname_hits.items():
if hits > 1:
logging.info('%s has been considered %d times', fname, hits)
if not make_sm.changes:
dupl += hits
if (dupl and not oqparam.optimize_same_id_sources and
not oqparam.is_event_based()):
logging.warning(
'You are doing redundant calculations: please make sure '
'that different sources have different IDs and set '
'optimize_same_id_sources=true in your .ini file')
if make_sm.changes:
logging.info('Applied %d changes to the composite source model',
make_sm.changes)
|
def function[get_source_models, parameter[oqparam, gsim_lt, source_model_lt, monitor, in_memory, srcfilter]]:
constant[
Build all the source models generated by the logic tree.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param gsim_lt:
a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
:param source_model_lt:
a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
:param monitor:
a `openquake.baselib.performance.Monitor` instance
:param in_memory:
if True, keep in memory the sources, else just collect the TRTs
:param srcfilter:
a SourceFilter instance with an .filename pointing to the cache file
:returns:
an iterator over :class:`openquake.commonlib.logictree.LtSourceModel`
tuples
]
variable[make_sm] assign[=] call[name[SourceModelFactory], parameter[]]
variable[spinning_off] assign[=] compare[name[oqparam].pointsource_distance equal[==] dictionary[[<ast.Constant object at 0x7da1b138c2b0>], [<ast.Constant object at 0x7da1b138e200>]]]
if name[spinning_off] begin[:]
call[name[logging].info, parameter[constant[Removing nodal plane and hypocenter distributions]]]
variable[dist] assign[=] <ast.IfExp object at 0x7da1b138c280>
variable[smlt_dir] assign[=] call[name[os].path.dirname, parameter[name[source_model_lt].filename]]
variable[converter] assign[=] call[name[sourceconverter].SourceConverter, parameter[name[oqparam].investigation_time, name[oqparam].rupture_mesh_spacing, name[oqparam].complex_fault_mesh_spacing, name[oqparam].width_of_mfd_bin, name[oqparam].area_source_discretization, name[oqparam].minimum_magnitude, <ast.UnaryOp object at 0x7da1b138e950>, name[oqparam].source_id]]
if call[name[oqparam].calculation_mode.startswith, parameter[constant[ucerf]]] begin[:]
<ast.List object at 0x7da1b138da50> assign[=] call[name[nrml].to_python, parameter[call[name[oqparam].inputs][constant[source_model]], name[converter]]]
variable[nr] assign[=] constant[0]
variable[idx] assign[=] constant[0]
variable[grp_id] assign[=] constant[0]
if name[monitor].hdf5 begin[:]
variable[sources] assign[=] call[name[hdf5].create, parameter[name[monitor].hdf5, constant[source_info], name[source_info_dt]]]
call[name[hdf5].create, parameter[name[monitor].hdf5, constant[source_geom], name[point3d]]]
variable[filename] assign[=] <ast.IfExp object at 0x7da1b138f820>
variable[source_ids] assign[=] call[name[set], parameter[]]
for taget[name[sm]] in starred[call[name[source_model_lt].gen_source_models, parameter[name[gsim_lt]]]] begin[:]
variable[apply_unc] assign[=] call[name[functools].partial, parameter[name[source_model_lt].apply_uncertainties, name[sm].path]]
variable[src_groups] assign[=] list[[]]
for taget[name[name]] in starred[call[name[sm].names.split, parameter[]]] begin[:]
variable[fname] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[smlt_dir], name[name]]]]]
if call[name[oqparam].calculation_mode.startswith, parameter[constant[ucerf]]] begin[:]
variable[sg] assign[=] call[name[copy].copy, parameter[name[grp]]]
name[sg].id assign[=] name[grp_id]
variable[src] assign[=] call[call[name[sg]][constant[0]].new, parameter[name[sm].ordinal, name[sm].names]]
call[name[source_ids].add, parameter[name[src].source_id]]
name[src].src_group_id assign[=] name[grp_id]
name[src].id assign[=] name[idx]
if name[oqparam].number_of_logic_tree_samples begin[:]
name[src].samples assign[=] name[sm].samples
name[sg].sources assign[=] list[[<ast.Name object at 0x7da207f02950>]]
call[name[src_groups].append, parameter[name[sg]]]
<ast.AugAssign object at 0x7da207f01d50>
<ast.AugAssign object at 0x7da207f01360>
variable[data] assign[=] list[[<ast.Tuple object at 0x7da207f037c0>]]
call[name[hdf5].extend, parameter[name[sources], call[name[numpy].array, parameter[name[data], name[source_info_dt]]]]]
if compare[name[grp_id] greater_or_equal[>=] name[TWO16]] begin[:]
<ast.Raise object at 0x7da1b1305270>
for taget[tuple[[<ast.Name object at 0x7da1b14c7370>, <ast.Name object at 0x7da1b14c4e80>]]] in starred[call[name[source_model_lt].info.applytosources.items, parameter[]]] begin[:]
for taget[name[srcid]] in starred[name[srcids]] begin[:]
if compare[name[srcid] <ast.NotIn object at 0x7da2590d7190> name[source_ids]] begin[:]
<ast.Raise object at 0x7da1b14c7520>
variable[num_sources] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b14c7d90>]]
name[sm].src_groups assign[=] name[src_groups]
variable[trts] assign[=] <ast.ListComp object at 0x7da1b133c3a0>
call[name[source_model_lt].tectonic_region_types.update, parameter[name[trts]]]
call[name[logging].info, parameter[constant[Processed source model %d with %d gsim path(s) and %d sources], binary_operation[name[sm].ordinal + constant[1]], name[sm].num_gsim_paths, name[num_sources]]]
variable[gsim_file] assign[=] call[name[oqparam].inputs.get, parameter[constant[gsim_logic_tree]]]
if name[gsim_file] begin[:]
for taget[name[src_group]] in starred[name[src_groups]] begin[:]
if compare[name[src_group].trt <ast.NotIn object at 0x7da2590d7190> name[gsim_lt].values] begin[:]
<ast.Raise object at 0x7da1b133d3f0>
<ast.Yield object at 0x7da1b133f100>
call[name[logging].info, parameter[call[constant[The composite source model has {:,d} ruptures].format, parameter[name[nr]]]]]
variable[dupl] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b133ecb0>, <ast.Name object at 0x7da1b15f3760>]]] in starred[call[name[make_sm].fname_hits.items, parameter[]]] begin[:]
if compare[name[hits] greater[>] constant[1]] begin[:]
call[name[logging].info, parameter[constant[%s has been considered %d times], name[fname], name[hits]]]
if <ast.UnaryOp object at 0x7da1b15f27a0> begin[:]
<ast.AugAssign object at 0x7da1b15f2dd0>
if <ast.BoolOp object at 0x7da1b15f3580> begin[:]
call[name[logging].warning, parameter[constant[You are doing redundant calculations: please make sure that different sources have different IDs and set optimize_same_id_sources=true in your .ini file]]]
if name[make_sm].changes begin[:]
call[name[logging].info, parameter[constant[Applied %d changes to the composite source model], name[make_sm].changes]]
|
keyword[def] identifier[get_source_models] ( identifier[oqparam] , identifier[gsim_lt] , identifier[source_model_lt] , identifier[monitor] ,
identifier[in_memory] = keyword[True] , identifier[srcfilter] = keyword[None] ):
literal[string]
identifier[make_sm] = identifier[SourceModelFactory] ()
identifier[spinning_off] = identifier[oqparam] . identifier[pointsource_distance] =={ literal[string] : literal[int] }
keyword[if] identifier[spinning_off] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[dist] = literal[string] keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] )== literal[string] keyword[else] literal[string]
identifier[smlt_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[source_model_lt] . identifier[filename] )
identifier[converter] = identifier[sourceconverter] . identifier[SourceConverter] (
identifier[oqparam] . identifier[investigation_time] ,
identifier[oqparam] . identifier[rupture_mesh_spacing] ,
identifier[oqparam] . identifier[complex_fault_mesh_spacing] ,
identifier[oqparam] . identifier[width_of_mfd_bin] ,
identifier[oqparam] . identifier[area_source_discretization] ,
identifier[oqparam] . identifier[minimum_magnitude] ,
keyword[not] identifier[spinning_off] ,
identifier[oqparam] . identifier[source_id] )
keyword[if] identifier[oqparam] . identifier[calculation_mode] . identifier[startswith] ( literal[string] ):
[ identifier[grp] ]= identifier[nrml] . identifier[to_python] ( identifier[oqparam] . identifier[inputs] [ literal[string] ], identifier[converter] )
keyword[elif] identifier[in_memory] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[smap] = identifier[parallel] . identifier[Starmap] (
identifier[nrml] . identifier[read_source_models] , identifier[monitor] = identifier[monitor] , identifier[distribute] = identifier[dist] )
keyword[for] identifier[sm] keyword[in] identifier[source_model_lt] . identifier[gen_source_models] ( identifier[gsim_lt] ):
keyword[for] identifier[name] keyword[in] identifier[sm] . identifier[names] . identifier[split] ():
identifier[fname] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[smlt_dir] , identifier[name] ))
identifier[smap] . identifier[submit] ([ identifier[fname] ], identifier[converter] )
identifier[dic] ={ identifier[sm] . identifier[fname] : identifier[sm] keyword[for] identifier[sm] keyword[in] identifier[smap] }
identifier[nr] = literal[int]
identifier[idx] = literal[int]
identifier[grp_id] = literal[int]
keyword[if] identifier[monitor] . identifier[hdf5] :
identifier[sources] = identifier[hdf5] . identifier[create] ( identifier[monitor] . identifier[hdf5] , literal[string] , identifier[source_info_dt] )
identifier[hdf5] . identifier[create] ( identifier[monitor] . identifier[hdf5] , literal[string] , identifier[point3d] )
identifier[filename] =( identifier[getattr] ( identifier[srcfilter] , literal[string] , keyword[None] )
keyword[if] identifier[oqparam] . identifier[prefilter_sources] == literal[string] keyword[else] keyword[None] )
identifier[source_ids] = identifier[set] ()
keyword[for] identifier[sm] keyword[in] identifier[source_model_lt] . identifier[gen_source_models] ( identifier[gsim_lt] ):
identifier[apply_unc] = identifier[functools] . identifier[partial] (
identifier[source_model_lt] . identifier[apply_uncertainties] , identifier[sm] . identifier[path] )
identifier[src_groups] =[]
keyword[for] identifier[name] keyword[in] identifier[sm] . identifier[names] . identifier[split] ():
identifier[fname] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[smlt_dir] , identifier[name] ))
keyword[if] identifier[oqparam] . identifier[calculation_mode] . identifier[startswith] ( literal[string] ):
identifier[sg] = identifier[copy] . identifier[copy] ( identifier[grp] )
identifier[sg] . identifier[id] = identifier[grp_id]
identifier[src] = identifier[sg] [ literal[int] ]. identifier[new] ( identifier[sm] . identifier[ordinal] , identifier[sm] . identifier[names] )
identifier[source_ids] . identifier[add] ( identifier[src] . identifier[source_id] )
identifier[src] . identifier[src_group_id] = identifier[grp_id]
identifier[src] . identifier[id] = identifier[idx]
keyword[if] identifier[oqparam] . identifier[number_of_logic_tree_samples] :
identifier[src] . identifier[samples] = identifier[sm] . identifier[samples]
identifier[sg] . identifier[sources] =[ identifier[src] ]
identifier[src_groups] . identifier[append] ( identifier[sg] )
identifier[idx] += literal[int]
identifier[grp_id] += literal[int]
identifier[data] =[(( identifier[sg] . identifier[id] , identifier[src] . identifier[source_id] , identifier[src] . identifier[code] , literal[int] , literal[int] ,
identifier[src] . identifier[num_ruptures] , literal[int] , literal[int] , literal[int] ))]
identifier[hdf5] . identifier[extend] ( identifier[sources] , identifier[numpy] . identifier[array] ( identifier[data] , identifier[source_info_dt] ))
keyword[elif] identifier[in_memory] :
identifier[newsm] = identifier[make_sm] ( identifier[fname] , identifier[dic] [ identifier[fname] ], identifier[apply_unc] ,
identifier[oqparam] . identifier[investigation_time] )
keyword[for] identifier[sg] keyword[in] identifier[newsm] :
identifier[nr] += identifier[sum] ( identifier[src] . identifier[num_ruptures] keyword[for] identifier[src] keyword[in] identifier[sg] )
keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ):
identifier[sg] . identifier[sources] = identifier[random_filtered_sources] (
identifier[sg] . identifier[sources] , identifier[srcfilter] , identifier[sg] . identifier[id] + identifier[oqparam] . identifier[random_seed] )
keyword[for] identifier[src] keyword[in] identifier[sg] :
identifier[source_ids] . identifier[add] ( identifier[src] . identifier[source_id] )
identifier[src] . identifier[src_group_id] = identifier[grp_id]
identifier[src] . identifier[id] = identifier[idx]
identifier[idx] += literal[int]
identifier[sg] . identifier[id] = identifier[grp_id]
identifier[grp_id] += literal[int]
identifier[src_groups] . identifier[append] ( identifier[sg] )
keyword[if] identifier[monitor] . identifier[hdf5] :
identifier[store_sm] ( identifier[newsm] , identifier[filename] , identifier[monitor] )
keyword[else] :
identifier[groups] = identifier[logictree] . identifier[read_source_groups] ( identifier[fname] )
keyword[for] identifier[group] keyword[in] identifier[groups] :
identifier[source_ids] . identifier[update] ( identifier[src] [ literal[string] ] keyword[for] identifier[src] keyword[in] identifier[group] )
identifier[src_groups] . identifier[extend] ( identifier[groups] )
keyword[if] identifier[grp_id] >= identifier[TWO16] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[TWO16] )
keyword[for] identifier[brid] , identifier[srcids] keyword[in] identifier[source_model_lt] . identifier[info] . identifier[applytosources] . identifier[items] ():
keyword[for] identifier[srcid] keyword[in] identifier[srcids] :
keyword[if] identifier[srcid] keyword[not] keyword[in] identifier[source_ids] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %
( identifier[srcid] , identifier[source_model_lt] . identifier[filename] ))
identifier[num_sources] = identifier[sum] ( identifier[len] ( identifier[sg] . identifier[sources] ) keyword[for] identifier[sg] keyword[in] identifier[src_groups] )
identifier[sm] . identifier[src_groups] = identifier[src_groups]
identifier[trts] =[ identifier[mod] . identifier[trt] keyword[for] identifier[mod] keyword[in] identifier[src_groups] ]
identifier[source_model_lt] . identifier[tectonic_region_types] . identifier[update] ( identifier[trts] )
identifier[logging] . identifier[info] (
literal[string]
literal[string] , identifier[sm] . identifier[ordinal] + literal[int] , identifier[sm] . identifier[num_gsim_paths] , identifier[num_sources] )
identifier[gsim_file] = identifier[oqparam] . identifier[inputs] . identifier[get] ( literal[string] )
keyword[if] identifier[gsim_file] :
keyword[for] identifier[src_group] keyword[in] identifier[src_groups] :
keyword[if] identifier[src_group] . identifier[trt] keyword[not] keyword[in] identifier[gsim_lt] . identifier[values] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[sm] , identifier[src_group] . identifier[trt] , identifier[gsim_file] ))
keyword[yield] identifier[sm]
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[nr] ))
identifier[dupl] = literal[int]
keyword[for] identifier[fname] , identifier[hits] keyword[in] identifier[make_sm] . identifier[fname_hits] . identifier[items] ():
keyword[if] identifier[hits] > literal[int] :
identifier[logging] . identifier[info] ( literal[string] , identifier[fname] , identifier[hits] )
keyword[if] keyword[not] identifier[make_sm] . identifier[changes] :
identifier[dupl] += identifier[hits]
keyword[if] ( identifier[dupl] keyword[and] keyword[not] identifier[oqparam] . identifier[optimize_same_id_sources] keyword[and]
keyword[not] identifier[oqparam] . identifier[is_event_based] ()):
identifier[logging] . identifier[warning] (
literal[string]
literal[string]
literal[string] )
keyword[if] identifier[make_sm] . identifier[changes] :
identifier[logging] . identifier[info] ( literal[string] ,
identifier[make_sm] . identifier[changes] )
|
def get_source_models(oqparam, gsim_lt, source_model_lt, monitor, in_memory=True, srcfilter=None):
"""
Build all the source models generated by the logic tree.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param gsim_lt:
a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
:param source_model_lt:
a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
:param monitor:
a `openquake.baselib.performance.Monitor` instance
:param in_memory:
if True, keep in memory the sources, else just collect the TRTs
:param srcfilter:
a SourceFilter instance with an .filename pointing to the cache file
:returns:
an iterator over :class:`openquake.commonlib.logictree.LtSourceModel`
tuples
"""
make_sm = SourceModelFactory()
spinning_off = oqparam.pointsource_distance == {'default': 0.0}
if spinning_off:
logging.info('Removing nodal plane and hypocenter distributions') # depends on [control=['if'], data=[]]
dist = 'no' if os.environ.get('OQ_DISTRIBUTE') == 'no' else 'processpool'
smlt_dir = os.path.dirname(source_model_lt.filename)
converter = sourceconverter.SourceConverter(oqparam.investigation_time, oqparam.rupture_mesh_spacing, oqparam.complex_fault_mesh_spacing, oqparam.width_of_mfd_bin, oqparam.area_source_discretization, oqparam.minimum_magnitude, not spinning_off, oqparam.source_id)
if oqparam.calculation_mode.startswith('ucerf'):
[grp] = nrml.to_python(oqparam.inputs['source_model'], converter) # depends on [control=['if'], data=[]]
elif in_memory:
logging.info('Reading the source model(s) in parallel')
smap = parallel.Starmap(nrml.read_source_models, monitor=monitor, distribute=dist)
for sm in source_model_lt.gen_source_models(gsim_lt):
for name in sm.names.split():
fname = os.path.abspath(os.path.join(smlt_dir, name))
smap.submit([fname], converter) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['sm']]
dic = {sm.fname: sm for sm in smap} # depends on [control=['if'], data=[]]
# consider only the effective realizations
nr = 0
idx = 0
grp_id = 0
if monitor.hdf5:
sources = hdf5.create(monitor.hdf5, 'source_info', source_info_dt)
hdf5.create(monitor.hdf5, 'source_geom', point3d)
filename = getattr(srcfilter, 'filename', None) if oqparam.prefilter_sources == 'no' else None # depends on [control=['if'], data=[]]
source_ids = set()
for sm in source_model_lt.gen_source_models(gsim_lt):
apply_unc = functools.partial(source_model_lt.apply_uncertainties, sm.path)
src_groups = []
for name in sm.names.split():
fname = os.path.abspath(os.path.join(smlt_dir, name))
if oqparam.calculation_mode.startswith('ucerf'):
sg = copy.copy(grp)
sg.id = grp_id
src = sg[0].new(sm.ordinal, sm.names) # one source
source_ids.add(src.source_id)
src.src_group_id = grp_id
src.id = idx
if oqparam.number_of_logic_tree_samples:
src.samples = sm.samples # depends on [control=['if'], data=[]]
sg.sources = [src]
src_groups.append(sg)
idx += 1
grp_id += 1
data = [(sg.id, src.source_id, src.code, 0, 0, src.num_ruptures, 0, 0, 0)]
hdf5.extend(sources, numpy.array(data, source_info_dt)) # depends on [control=['if'], data=[]]
elif in_memory:
newsm = make_sm(fname, dic[fname], apply_unc, oqparam.investigation_time)
for sg in newsm:
nr += sum((src.num_ruptures for src in sg))
# sample a source for each group
if os.environ.get('OQ_SAMPLE_SOURCES'):
sg.sources = random_filtered_sources(sg.sources, srcfilter, sg.id + oqparam.random_seed) # depends on [control=['if'], data=[]]
for src in sg:
source_ids.add(src.source_id)
src.src_group_id = grp_id
src.id = idx
idx += 1 # depends on [control=['for'], data=['src']]
sg.id = grp_id
grp_id += 1
src_groups.append(sg) # depends on [control=['for'], data=['sg']]
if monitor.hdf5:
store_sm(newsm, filename, monitor) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # just collect the TRT models
groups = logictree.read_source_groups(fname)
for group in groups:
source_ids.update((src['id'] for src in group)) # depends on [control=['for'], data=['group']]
src_groups.extend(groups) # depends on [control=['for'], data=['name']]
if grp_id >= TWO16:
# the limit is really needed only for event based calculations
raise ValueError('There is a limit of %d src groups!' % TWO16) # depends on [control=['if'], data=['TWO16']]
for (brid, srcids) in source_model_lt.info.applytosources.items():
for srcid in srcids:
if srcid not in source_ids:
raise ValueError('The source %s is not in the source model, please fix applyToSources in %s or the source model' % (srcid, source_model_lt.filename)) # depends on [control=['if'], data=['srcid']] # depends on [control=['for'], data=['srcid']] # depends on [control=['for'], data=[]]
num_sources = sum((len(sg.sources) for sg in src_groups))
sm.src_groups = src_groups
trts = [mod.trt for mod in src_groups]
source_model_lt.tectonic_region_types.update(trts)
logging.info('Processed source model %d with %d gsim path(s) and %d sources', sm.ordinal + 1, sm.num_gsim_paths, num_sources)
gsim_file = oqparam.inputs.get('gsim_logic_tree')
if gsim_file: # check TRTs
for src_group in src_groups:
if src_group.trt not in gsim_lt.values:
raise ValueError('Found in %r a tectonic region type %r inconsistent with the ones in %r' % (sm, src_group.trt, gsim_file)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['src_group']] # depends on [control=['if'], data=[]]
yield sm # depends on [control=['for'], data=['sm']]
logging.info('The composite source model has {:,d} ruptures'.format(nr))
# log if some source file is being used more than once
dupl = 0
for (fname, hits) in make_sm.fname_hits.items():
if hits > 1:
logging.info('%s has been considered %d times', fname, hits)
if not make_sm.changes:
dupl += hits # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['hits']] # depends on [control=['for'], data=[]]
if dupl and (not oqparam.optimize_same_id_sources) and (not oqparam.is_event_based()):
logging.warning('You are doing redundant calculations: please make sure that different sources have different IDs and set optimize_same_id_sources=true in your .ini file') # depends on [control=['if'], data=[]]
if make_sm.changes:
logging.info('Applied %d changes to the composite source model', make_sm.changes) # depends on [control=['if'], data=[]]
|
def show(target, *args, **kwargs):
"""
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
"""
if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver, )):
plot_number_observer(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)):
plot_trajectory(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)):
plot_world(target, *args, **kwargs)
elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)):
dump_model(target)
elif isinstance(target, str):
try:
w = simulation.load_world(target)
except RuntimeError as e:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
else:
show(w, *args, **kwargs)
else:
raise ValueError("The given target [{}] is not supported.".format(repr(target)))
|
def function[show, parameter[target]]:
constant[
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
]
if call[name[isinstance], parameter[name[target], tuple[[<ast.Attribute object at 0x7da1b0f0e350>, <ast.Attribute object at 0x7da1b0f0e830>, <ast.Attribute object at 0x7da1b0f0ef80>]]]] begin[:]
call[name[plot_number_observer], parameter[name[target], <ast.Starred object at 0x7da1b0f0ef20>]]
|
keyword[def] identifier[show] ( identifier[target] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[target] ,( identifier[ecell4_base] . identifier[core] . identifier[FixedIntervalNumberObserver] , identifier[ecell4_base] . identifier[core] . identifier[NumberObserver] , identifier[ecell4_base] . identifier[core] . identifier[TimingNumberObserver] ,)):
identifier[plot_number_observer] ( identifier[target] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[target] ,( identifier[ecell4_base] . identifier[core] . identifier[FixedIntervalTrajectoryObserver] , identifier[ecell4_base] . identifier[core] . identifier[FixedIntervalTrackingObserver] )):
identifier[plot_trajectory] ( identifier[target] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[target] ,( identifier[ecell4_base] . identifier[ode] . identifier[ODEWorld] , identifier[ecell4_base] . identifier[gillespie] . identifier[GillespieWorld] , identifier[ecell4_base] . identifier[spatiocyte] . identifier[SpatiocyteWorld] , identifier[ecell4_base] . identifier[meso] . identifier[MesoscopicWorld] , identifier[ecell4_base] . identifier[bd] . identifier[BDWorld] , identifier[ecell4_base] . identifier[egfrd] . identifier[EGFRDWorld] )):
identifier[plot_world] ( identifier[target] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[target] ,( identifier[ecell4_base] . identifier[core] . identifier[Model] , identifier[ecell4_base] . identifier[core] . identifier[NetworkModel] , identifier[ecell4_base] . identifier[core] . identifier[NetfreeModel] )):
identifier[dump_model] ( identifier[target] )
keyword[elif] identifier[isinstance] ( identifier[target] , identifier[str] ):
keyword[try] :
identifier[w] = identifier[simulation] . identifier[load_world] ( identifier[target] )
keyword[except] identifier[RuntimeError] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[target] )))
keyword[else] :
identifier[show] ( identifier[w] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[target] )))
|
def show(target, *args, **kwargs):
"""
An utility function to display the given target object in the proper way.
Paramters
---------
target : NumberObserver, TrajectoryObserver, World, str
When a NumberObserver object is given, show it with viz.plot_number_observer.
When a TrajectoryObserver object is given, show it with viz.plot_trajectory_observer.
When a World or a filename suggesting HDF5 is given, show it with viz.plot_world.
"""
if isinstance(target, (ecell4_base.core.FixedIntervalNumberObserver, ecell4_base.core.NumberObserver, ecell4_base.core.TimingNumberObserver)):
plot_number_observer(target, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(target, (ecell4_base.core.FixedIntervalTrajectoryObserver, ecell4_base.core.FixedIntervalTrackingObserver)):
plot_trajectory(target, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(target, (ecell4_base.ode.ODEWorld, ecell4_base.gillespie.GillespieWorld, ecell4_base.spatiocyte.SpatiocyteWorld, ecell4_base.meso.MesoscopicWorld, ecell4_base.bd.BDWorld, ecell4_base.egfrd.EGFRDWorld)):
plot_world(target, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(target, (ecell4_base.core.Model, ecell4_base.core.NetworkModel, ecell4_base.core.NetfreeModel)):
dump_model(target) # depends on [control=['if'], data=[]]
elif isinstance(target, str):
try:
w = simulation.load_world(target) # depends on [control=['try'], data=[]]
except RuntimeError as e:
raise ValueError('The given target [{}] is not supported.'.format(repr(target))) # depends on [control=['except'], data=[]]
else:
show(w, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
raise ValueError('The given target [{}] is not supported.'.format(repr(target)))
|
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s?([\.\-\d]+)+", pots)
pots = [float(f) for f in pots]
self.electrostatic_potential = pots
|
def function[read_electrostatic_potential, parameter[self]]:
constant[
Parses the eletrostatic potential for the last ionic step
]
variable[pattern] assign[=] dictionary[[<ast.Constant object at 0x7da2047eace0>], [<ast.Constant object at 0x7da2047ea920>]]
call[name[self].read_pattern, parameter[name[pattern]]]
name[self].ngf assign[=] call[call[name[self].data.get, parameter[constant[ngf], list[[<ast.List object at 0x7da2047e8430>]]]]][constant[0]]
variable[pattern] assign[=] dictionary[[<ast.Constant object at 0x7da2047e8be0>], [<ast.Constant object at 0x7da2047e8e80>]]
call[name[self].read_pattern, parameter[name[pattern]]]
name[self].sampling_radii assign[=] <ast.ListComp object at 0x7da2047e8d90>
variable[header_pattern] assign[=] constant[\(the norm of the test charge is\s+[\.\-\d]+\)]
variable[table_pattern] assign[=] constant[((?:\s+\d+\s*[\.\-\d]+)+)]
variable[footer_pattern] assign[=] constant[\s+E-fermi :]
variable[pots] assign[=] call[name[self].read_table_pattern, parameter[name[header_pattern], name[table_pattern], name[footer_pattern]]]
variable[pots] assign[=] call[constant[].join, parameter[call[name[itertools].chain.from_iterable, parameter[name[pots]]]]]
variable[pots] assign[=] call[name[re].findall, parameter[constant[\s+\d+\s?([\.\-\d]+)+], name[pots]]]
variable[pots] assign[=] <ast.ListComp object at 0x7da2047e9450>
name[self].electrostatic_potential assign[=] name[pots]
|
keyword[def] identifier[read_electrostatic_potential] ( identifier[self] ):
literal[string]
identifier[pattern] ={ literal[string] : literal[string] }
identifier[self] . identifier[read_pattern] ( identifier[pattern] , identifier[postprocess] = identifier[int] )
identifier[self] . identifier[ngf] = identifier[self] . identifier[data] . identifier[get] ( literal[string] ,[[]])[ literal[int] ]
identifier[pattern] ={ literal[string] : literal[string] }
identifier[self] . identifier[read_pattern] ( identifier[pattern] , identifier[reverse] = keyword[True] , identifier[terminate_on_match] = keyword[True] , identifier[postprocess] = identifier[str] )
identifier[self] . identifier[sampling_radii] =[ identifier[float] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[data] [ literal[string] ][ literal[int] ][ literal[int] ]. identifier[split] ()]
identifier[header_pattern] = literal[string]
identifier[table_pattern] = literal[string]
identifier[footer_pattern] = literal[string]
identifier[pots] = identifier[self] . identifier[read_table_pattern] ( identifier[header_pattern] , identifier[table_pattern] , identifier[footer_pattern] )
identifier[pots] = literal[string] . identifier[join] ( identifier[itertools] . identifier[chain] . identifier[from_iterable] ( identifier[pots] ))
identifier[pots] = identifier[re] . identifier[findall] ( literal[string] , identifier[pots] )
identifier[pots] =[ identifier[float] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[pots] ]
identifier[self] . identifier[electrostatic_potential] = identifier[pots]
|
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {'ngf': '\\s+dimension x,y,z NGXF=\\s+([\\.\\-\\d]+)\\sNGYF=\\s+([\\.\\-\\d]+)\\sNGZF=\\s+([\\.\\-\\d]+)'}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get('ngf', [[]])[0]
pattern = {'radii': 'the test charge radii are((?:\\s+[\\.\\-\\d]+)+)'}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data['radii'][0][0].split()]
header_pattern = '\\(the norm of the test charge is\\s+[\\.\\-\\d]+\\)'
table_pattern = '((?:\\s+\\d+\\s*[\\.\\-\\d]+)+)'
footer_pattern = '\\s+E-fermi :'
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = ''.join(itertools.chain.from_iterable(pots))
pots = re.findall('\\s+\\d+\\s?([\\.\\-\\d]+)+', pots)
pots = [float(f) for f in pots]
self.electrostatic_potential = pots
|
def make_measurement(name,
channels,
lumi=1.0, lumi_rel_error=0.1,
output_prefix='./histfactory',
POI=None,
const_params=None,
verbose=False):
"""
Create a Measurement from a list of Channels
"""
if verbose:
llog = log['make_measurement']
llog.info("creating measurement {0}".format(name))
if not isinstance(channels, (list, tuple)):
channels = [channels]
# Create the measurement
meas = Measurement('measurement_{0}'.format(name), '')
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, string_types):
if verbose:
llog.info("setting POI {0}".format(POI))
meas.SetPOI(POI)
else:
if verbose:
llog.info("adding POIs {0}".format(', '.join(POI)))
for p in POI:
meas.AddPOI(p)
if verbose:
llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error))
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info("adding channel {0}".format(channel.GetName()))
meas.AddChannel(channel)
if const_params is not None:
if verbose:
llog.info("adding constant parameters {0}".format(
', '.join(const_params)))
for param in const_params:
meas.AddConstantParam(param)
return meas
|
def function[make_measurement, parameter[name, channels, lumi, lumi_rel_error, output_prefix, POI, const_params, verbose]]:
constant[
Create a Measurement from a list of Channels
]
if name[verbose] begin[:]
variable[llog] assign[=] call[name[log]][constant[make_measurement]]
call[name[llog].info, parameter[call[constant[creating measurement {0}].format, parameter[name[name]]]]]
if <ast.UnaryOp object at 0x7da1b11bf100> begin[:]
variable[channels] assign[=] list[[<ast.Name object at 0x7da1b11bf400>]]
variable[meas] assign[=] call[name[Measurement], parameter[call[constant[measurement_{0}].format, parameter[name[name]]], constant[]]]
call[name[meas].SetOutputFilePrefix, parameter[name[output_prefix]]]
if compare[name[POI] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[POI], name[string_types]]] begin[:]
if name[verbose] begin[:]
call[name[llog].info, parameter[call[constant[setting POI {0}].format, parameter[name[POI]]]]]
call[name[meas].SetPOI, parameter[name[POI]]]
if name[verbose] begin[:]
call[name[llog].info, parameter[call[constant[setting lumi={0:f} +/- {1:f}].format, parameter[name[lumi], name[lumi_rel_error]]]]]
name[meas].lumi assign[=] name[lumi]
name[meas].lumi_rel_error assign[=] name[lumi_rel_error]
for taget[name[channel]] in starred[name[channels]] begin[:]
if name[verbose] begin[:]
call[name[llog].info, parameter[call[constant[adding channel {0}].format, parameter[call[name[channel].GetName, parameter[]]]]]]
call[name[meas].AddChannel, parameter[name[channel]]]
if compare[name[const_params] is_not constant[None]] begin[:]
if name[verbose] begin[:]
call[name[llog].info, parameter[call[constant[adding constant parameters {0}].format, parameter[call[constant[, ].join, parameter[name[const_params]]]]]]]
for taget[name[param]] in starred[name[const_params]] begin[:]
call[name[meas].AddConstantParam, parameter[name[param]]]
return[name[meas]]
|
keyword[def] identifier[make_measurement] ( identifier[name] ,
identifier[channels] ,
identifier[lumi] = literal[int] , identifier[lumi_rel_error] = literal[int] ,
identifier[output_prefix] = literal[string] ,
identifier[POI] = keyword[None] ,
identifier[const_params] = keyword[None] ,
identifier[verbose] = keyword[False] ):
literal[string]
keyword[if] identifier[verbose] :
identifier[llog] = identifier[log] [ literal[string] ]
identifier[llog] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[channels] ,( identifier[list] , identifier[tuple] )):
identifier[channels] =[ identifier[channels] ]
identifier[meas] = identifier[Measurement] ( literal[string] . identifier[format] ( identifier[name] ), literal[string] )
identifier[meas] . identifier[SetOutputFilePrefix] ( identifier[output_prefix] )
keyword[if] identifier[POI] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[POI] , identifier[string_types] ):
keyword[if] identifier[verbose] :
identifier[llog] . identifier[info] ( literal[string] . identifier[format] ( identifier[POI] ))
identifier[meas] . identifier[SetPOI] ( identifier[POI] )
keyword[else] :
keyword[if] identifier[verbose] :
identifier[llog] . identifier[info] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[POI] )))
keyword[for] identifier[p] keyword[in] identifier[POI] :
identifier[meas] . identifier[AddPOI] ( identifier[p] )
keyword[if] identifier[verbose] :
identifier[llog] . identifier[info] ( literal[string] . identifier[format] ( identifier[lumi] , identifier[lumi_rel_error] ))
identifier[meas] . identifier[lumi] = identifier[lumi]
identifier[meas] . identifier[lumi_rel_error] = identifier[lumi_rel_error]
keyword[for] identifier[channel] keyword[in] identifier[channels] :
keyword[if] identifier[verbose] :
identifier[llog] . identifier[info] ( literal[string] . identifier[format] ( identifier[channel] . identifier[GetName] ()))
identifier[meas] . identifier[AddChannel] ( identifier[channel] )
keyword[if] identifier[const_params] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[verbose] :
identifier[llog] . identifier[info] ( literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[const_params] )))
keyword[for] identifier[param] keyword[in] identifier[const_params] :
identifier[meas] . identifier[AddConstantParam] ( identifier[param] )
keyword[return] identifier[meas]
|
def make_measurement(name, channels, lumi=1.0, lumi_rel_error=0.1, output_prefix='./histfactory', POI=None, const_params=None, verbose=False):
"""
Create a Measurement from a list of Channels
"""
if verbose:
llog = log['make_measurement']
llog.info('creating measurement {0}'.format(name)) # depends on [control=['if'], data=[]]
if not isinstance(channels, (list, tuple)):
channels = [channels] # depends on [control=['if'], data=[]]
# Create the measurement
meas = Measurement('measurement_{0}'.format(name), '')
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, string_types):
if verbose:
llog.info('setting POI {0}'.format(POI)) # depends on [control=['if'], data=[]]
meas.SetPOI(POI) # depends on [control=['if'], data=[]]
else:
if verbose:
llog.info('adding POIs {0}'.format(', '.join(POI))) # depends on [control=['if'], data=[]]
for p in POI:
meas.AddPOI(p) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=['POI']]
if verbose:
llog.info('setting lumi={0:f} +/- {1:f}'.format(lumi, lumi_rel_error)) # depends on [control=['if'], data=[]]
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info('adding channel {0}'.format(channel.GetName())) # depends on [control=['if'], data=[]]
meas.AddChannel(channel) # depends on [control=['for'], data=['channel']]
if const_params is not None:
if verbose:
llog.info('adding constant parameters {0}'.format(', '.join(const_params))) # depends on [control=['if'], data=[]]
for param in const_params:
meas.AddConstantParam(param) # depends on [control=['for'], data=['param']] # depends on [control=['if'], data=['const_params']]
return meas
|
def periodic(self, period_seconds, callback):
"""This method allows to invoke the callback periodically, with specified time intervals.
Note that the scheduler features zero phase drift.
:returns: EventHandle object. Call .remove() on it to cancel the event.
"""
priority = 0
def caller(scheduled_deadline):
# Event MUST be re-registered first in order to ensure that it can be cancelled from the callback
scheduled_deadline += period_seconds
event_holder[0] = self._scheduler.enterabs(scheduled_deadline, priority, caller, (scheduled_deadline,))
callback()
first_deadline = self._scheduler.timefunc() + period_seconds
event_holder = [self._scheduler.enterabs(first_deadline, priority, caller, (first_deadline,))]
return self._make_sched_handle(lambda: event_holder[0])
|
def function[periodic, parameter[self, period_seconds, callback]]:
constant[This method allows to invoke the callback periodically, with specified time intervals.
Note that the scheduler features zero phase drift.
:returns: EventHandle object. Call .remove() on it to cancel the event.
]
variable[priority] assign[=] constant[0]
def function[caller, parameter[scheduled_deadline]]:
<ast.AugAssign object at 0x7da18f812cb0>
call[name[event_holder]][constant[0]] assign[=] call[name[self]._scheduler.enterabs, parameter[name[scheduled_deadline], name[priority], name[caller], tuple[[<ast.Name object at 0x7da18f813e20>]]]]
call[name[callback], parameter[]]
variable[first_deadline] assign[=] binary_operation[call[name[self]._scheduler.timefunc, parameter[]] + name[period_seconds]]
variable[event_holder] assign[=] list[[<ast.Call object at 0x7da18f813be0>]]
return[call[name[self]._make_sched_handle, parameter[<ast.Lambda object at 0x7da18bc73460>]]]
|
keyword[def] identifier[periodic] ( identifier[self] , identifier[period_seconds] , identifier[callback] ):
literal[string]
identifier[priority] = literal[int]
keyword[def] identifier[caller] ( identifier[scheduled_deadline] ):
identifier[scheduled_deadline] += identifier[period_seconds]
identifier[event_holder] [ literal[int] ]= identifier[self] . identifier[_scheduler] . identifier[enterabs] ( identifier[scheduled_deadline] , identifier[priority] , identifier[caller] ,( identifier[scheduled_deadline] ,))
identifier[callback] ()
identifier[first_deadline] = identifier[self] . identifier[_scheduler] . identifier[timefunc] ()+ identifier[period_seconds]
identifier[event_holder] =[ identifier[self] . identifier[_scheduler] . identifier[enterabs] ( identifier[first_deadline] , identifier[priority] , identifier[caller] ,( identifier[first_deadline] ,))]
keyword[return] identifier[self] . identifier[_make_sched_handle] ( keyword[lambda] : identifier[event_holder] [ literal[int] ])
|
def periodic(self, period_seconds, callback):
"""This method allows to invoke the callback periodically, with specified time intervals.
Note that the scheduler features zero phase drift.
:returns: EventHandle object. Call .remove() on it to cancel the event.
"""
priority = 0
def caller(scheduled_deadline):
# Event MUST be re-registered first in order to ensure that it can be cancelled from the callback
scheduled_deadline += period_seconds
event_holder[0] = self._scheduler.enterabs(scheduled_deadline, priority, caller, (scheduled_deadline,))
callback()
first_deadline = self._scheduler.timefunc() + period_seconds
event_holder = [self._scheduler.enterabs(first_deadline, priority, caller, (first_deadline,))]
return self._make_sched_handle(lambda : event_holder[0])
|
def EmitSignal(self, interface, name, signature, args):
'''Emit a signal from the object.
interface: D-Bus interface to send the signal from. For convenience you
can specify '' here to add the method to the object's main
interface (as specified on construction).
name: Name of the signal
signature: Signature of input arguments; for example "ias" for a signal
that takes an int32 and a string array as arguments; see
http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures
args: variant array with signal arguments; must match order and type in
"signature"
'''
if not interface:
interface = self.interface
# convert types of arguments according to signature, using
# MethodCallMessage.append(); this will also provide type/length
# checks, except for the case of an empty signature
if signature == '' and len(args) > 0:
raise TypeError('Fewer items found in D-Bus signature than in Python arguments')
m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a')
m.append(signature=signature, *args)
args = m.get_args_list()
fn = lambda self, *args: self.log('emit %s.%s%s' % (interface, name, self.format_args(args)))
fn.__name__ = str(name)
dbus_fn = dbus.service.signal(interface)(fn)
dbus_fn._dbus_signature = signature
dbus_fn._dbus_args = ['arg%i' % i for i in range(1, len(args) + 1)]
dbus_fn(self, *args)
|
def function[EmitSignal, parameter[self, interface, name, signature, args]]:
constant[Emit a signal from the object.
interface: D-Bus interface to send the signal from. For convenience you
can specify '' here to add the method to the object's main
interface (as specified on construction).
name: Name of the signal
signature: Signature of input arguments; for example "ias" for a signal
that takes an int32 and a string array as arguments; see
http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures
args: variant array with signal arguments; must match order and type in
"signature"
]
if <ast.UnaryOp object at 0x7da18f09ded0> begin[:]
variable[interface] assign[=] name[self].interface
if <ast.BoolOp object at 0x7da18f09dc60> begin[:]
<ast.Raise object at 0x7da18f09f2e0>
variable[m] assign[=] call[name[dbus].connection.MethodCallMessage, parameter[constant[a.b], constant[/a], constant[a.b], constant[a]]]
call[name[m].append, parameter[<ast.Starred object at 0x7da18f09dfc0>]]
variable[args] assign[=] call[name[m].get_args_list, parameter[]]
variable[fn] assign[=] <ast.Lambda object at 0x7da18f09ea40>
name[fn].__name__ assign[=] call[name[str], parameter[name[name]]]
variable[dbus_fn] assign[=] call[call[name[dbus].service.signal, parameter[name[interface]]], parameter[name[fn]]]
name[dbus_fn]._dbus_signature assign[=] name[signature]
name[dbus_fn]._dbus_args assign[=] <ast.ListComp object at 0x7da20c76e5f0>
call[name[dbus_fn], parameter[name[self], <ast.Starred object at 0x7da20c76d660>]]
|
keyword[def] identifier[EmitSignal] ( identifier[self] , identifier[interface] , identifier[name] , identifier[signature] , identifier[args] ):
literal[string]
keyword[if] keyword[not] identifier[interface] :
identifier[interface] = identifier[self] . identifier[interface]
keyword[if] identifier[signature] == literal[string] keyword[and] identifier[len] ( identifier[args] )> literal[int] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[m] = identifier[dbus] . identifier[connection] . identifier[MethodCallMessage] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[m] . identifier[append] ( identifier[signature] = identifier[signature] ,* identifier[args] )
identifier[args] = identifier[m] . identifier[get_args_list] ()
identifier[fn] = keyword[lambda] identifier[self] ,* identifier[args] : identifier[self] . identifier[log] ( literal[string] %( identifier[interface] , identifier[name] , identifier[self] . identifier[format_args] ( identifier[args] )))
identifier[fn] . identifier[__name__] = identifier[str] ( identifier[name] )
identifier[dbus_fn] = identifier[dbus] . identifier[service] . identifier[signal] ( identifier[interface] )( identifier[fn] )
identifier[dbus_fn] . identifier[_dbus_signature] = identifier[signature]
identifier[dbus_fn] . identifier[_dbus_args] =[ literal[string] % identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[args] )+ literal[int] )]
identifier[dbus_fn] ( identifier[self] ,* identifier[args] )
|
def EmitSignal(self, interface, name, signature, args):
"""Emit a signal from the object.
interface: D-Bus interface to send the signal from. For convenience you
can specify '' here to add the method to the object's main
interface (as specified on construction).
name: Name of the signal
signature: Signature of input arguments; for example "ias" for a signal
that takes an int32 and a string array as arguments; see
http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures
args: variant array with signal arguments; must match order and type in
"signature"
"""
if not interface:
interface = self.interface # depends on [control=['if'], data=[]]
# convert types of arguments according to signature, using
# MethodCallMessage.append(); this will also provide type/length
# checks, except for the case of an empty signature
if signature == '' and len(args) > 0:
raise TypeError('Fewer items found in D-Bus signature than in Python arguments') # depends on [control=['if'], data=[]]
m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a')
m.append(*args, signature=signature)
args = m.get_args_list()
fn = lambda self, *args: self.log('emit %s.%s%s' % (interface, name, self.format_args(args)))
fn.__name__ = str(name)
dbus_fn = dbus.service.signal(interface)(fn)
dbus_fn._dbus_signature = signature
dbus_fn._dbus_args = ['arg%i' % i for i in range(1, len(args) + 1)]
dbus_fn(self, *args)
|
def check(text):
"""Check the text."""
err = "misc.tense_present"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
# "and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(u"\s{}\s".format(i), text, flags=re.U | re.I):
txt = m.group(0).strip()
errors.append((
m.start() + 1,
m.end(),
err,
msg.format(txt),
None))
return errors
|
def function[check, parameter[text]]:
constant[Check the text.]
variable[err] assign[=] constant[misc.tense_present]
variable[msg] assign[=] constant['{}'.]
variable[illogics] assign[=] list[[<ast.Constant object at 0x7da1b08f9900>, <ast.Constant object at 0x7da1b08f9d50>, <ast.Constant object at 0x7da1b08f9660>, <ast.Constant object at 0x7da1b08fbc40>, <ast.Constant object at 0x7da1b08f8f10>, <ast.Constant object at 0x7da1b08f9600>, <ast.Constant object at 0x7da1b08fa3b0>, <ast.Constant object at 0x7da1b08f9ba0>, <ast.Constant object at 0x7da1b08f8850>, <ast.Constant object at 0x7da1b08f9000>]]
variable[errors] assign[=] list[[]]
for taget[name[i]] in starred[name[illogics]] begin[:]
for taget[name[m]] in starred[call[name[re].finditer, parameter[call[constant[\s{}\s].format, parameter[name[i]]], name[text]]]] begin[:]
variable[txt] assign[=] call[call[name[m].group, parameter[constant[0]]].strip, parameter[]]
call[name[errors].append, parameter[tuple[[<ast.BinOp object at 0x7da1b0639840>, <ast.Call object at 0x7da1b0639870>, <ast.Name object at 0x7da1b063af50>, <ast.Call object at 0x7da1b063b640>, <ast.Constant object at 0x7da1b063a260>]]]]
return[name[errors]]
|
keyword[def] identifier[check] ( identifier[text] ):
literal[string]
identifier[err] = literal[string]
identifier[msg] = literal[string]
identifier[illogics] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[errors] =[]
keyword[for] identifier[i] keyword[in] identifier[illogics] :
keyword[for] identifier[m] keyword[in] identifier[re] . identifier[finditer] ( literal[string] . identifier[format] ( identifier[i] ), identifier[text] , identifier[flags] = identifier[re] . identifier[U] | identifier[re] . identifier[I] ):
identifier[txt] = identifier[m] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[errors] . identifier[append] ((
identifier[m] . identifier[start] ()+ literal[int] ,
identifier[m] . identifier[end] (),
identifier[err] ,
identifier[msg] . identifier[format] ( identifier[txt] ),
keyword[None] ))
keyword[return] identifier[errors]
|
def check(text):
"""Check the text."""
err = 'misc.tense_present'
msg = u"'{}'."
# "and so",
illogics = [u'up to \\d{1,3}% ?[-—–]{0,3} ?(?:or|and) more\\W?', 'between you and I', 'on accident', 'somewhat of a', "all it's own", 'reason is because', 'audible to the ear', 'in regards to', 'would of', "i ?(?:feel|am feeling|am|'m|'m feeling) nauseous"]
errors = []
for i in illogics:
for m in re.finditer(u'\\s{}\\s'.format(i), text, flags=re.U | re.I):
txt = m.group(0).strip()
errors.append((m.start() + 1, m.end(), err, msg.format(txt), None)) # depends on [control=['for'], data=['m']] # depends on [control=['for'], data=['i']]
return errors
|
def step( self ):
"""
Returns the step value for this ruler. If the cached value is None,
then a default value will be specified based on the ruler type.
:return <variant>
"""
if ( self._step is not None ):
return self._step
elif ( self.rulerType() == XChartRuler.Type.Number ):
self._step = int((self.maximum() - self.minimum()) / 10.0)
elif ( self.rulerType() == XChartRuler.Type.Date ):
self._step = int(self.minimum().daysTo(self.maximum()) / 10.0) - 1
elif ( self.rulerType() & (XChartRuler.Type.Time | \
XChartRuler.Type.Datetime) ):
self._step = int(self.minimum().secsTo(self.maximum()) / 10.0) - 1
return self._step
|
def function[step, parameter[self]]:
constant[
Returns the step value for this ruler. If the cached value is None,
then a default value will be specified based on the ruler type.
:return <variant>
]
if compare[name[self]._step is_not constant[None]] begin[:]
return[name[self]._step]
return[name[self]._step]
|
keyword[def] identifier[step] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_step] keyword[is] keyword[not] keyword[None] ):
keyword[return] identifier[self] . identifier[_step]
keyword[elif] ( identifier[self] . identifier[rulerType] ()== identifier[XChartRuler] . identifier[Type] . identifier[Number] ):
identifier[self] . identifier[_step] = identifier[int] (( identifier[self] . identifier[maximum] ()- identifier[self] . identifier[minimum] ())/ literal[int] )
keyword[elif] ( identifier[self] . identifier[rulerType] ()== identifier[XChartRuler] . identifier[Type] . identifier[Date] ):
identifier[self] . identifier[_step] = identifier[int] ( identifier[self] . identifier[minimum] (). identifier[daysTo] ( identifier[self] . identifier[maximum] ())/ literal[int] )- literal[int]
keyword[elif] ( identifier[self] . identifier[rulerType] ()&( identifier[XChartRuler] . identifier[Type] . identifier[Time] | identifier[XChartRuler] . identifier[Type] . identifier[Datetime] )):
identifier[self] . identifier[_step] = identifier[int] ( identifier[self] . identifier[minimum] (). identifier[secsTo] ( identifier[self] . identifier[maximum] ())/ literal[int] )- literal[int]
keyword[return] identifier[self] . identifier[_step]
|
def step(self):
"""
Returns the step value for this ruler. If the cached value is None,
then a default value will be specified based on the ruler type.
:return <variant>
"""
if self._step is not None:
return self._step # depends on [control=['if'], data=[]]
elif self.rulerType() == XChartRuler.Type.Number:
self._step = int((self.maximum() - self.minimum()) / 10.0) # depends on [control=['if'], data=[]]
elif self.rulerType() == XChartRuler.Type.Date:
self._step = int(self.minimum().daysTo(self.maximum()) / 10.0) - 1 # depends on [control=['if'], data=[]]
elif self.rulerType() & (XChartRuler.Type.Time | XChartRuler.Type.Datetime):
self._step = int(self.minimum().secsTo(self.maximum()) / 10.0) - 1 # depends on [control=['if'], data=[]]
return self._step
|
def ks_unif_durbin_recurrence_rational(samples, statistic):
"""
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
"""
t = statistic * samples
# Python 3: int()s can be skipped.
ft1 = int(floor(t)) + 1
fmt1 = int(floor(-t)) + 1
fdt1 = int(floor(2 * t)) + 1
qs = [Fraction(i ** i, factorial(i)) for i in range(ft1)]
qs.extend(Fraction(i ** i, factorial(i)) - 2 * t *
sum((t + j) ** (j - 1) / factorial(j) *
(i - t - j) ** (i - j) / factorial(i - j)
for j in range(i + fmt1))
for i in range(ft1, fdt1))
qs.extend(-sum((-1) ** j * (2 * t - j) ** j / factorial(j) * qs[i - j]
for j in range(1, fdt1))
for i in range(fdt1, samples + 1))
return qs[samples] * factorial(samples) / samples ** samples
|
def function[ks_unif_durbin_recurrence_rational, parameter[samples, statistic]]:
constant[
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
]
variable[t] assign[=] binary_operation[name[statistic] * name[samples]]
variable[ft1] assign[=] binary_operation[call[name[int], parameter[call[name[floor], parameter[name[t]]]]] + constant[1]]
variable[fmt1] assign[=] binary_operation[call[name[int], parameter[call[name[floor], parameter[<ast.UnaryOp object at 0x7da1b1878550>]]]] + constant[1]]
variable[fdt1] assign[=] binary_operation[call[name[int], parameter[call[name[floor], parameter[binary_operation[constant[2] * name[t]]]]]] + constant[1]]
variable[qs] assign[=] <ast.ListComp object at 0x7da1b187beb0>
call[name[qs].extend, parameter[<ast.GeneratorExp object at 0x7da1b18790c0>]]
call[name[qs].extend, parameter[<ast.GeneratorExp object at 0x7da1b1878130>]]
return[binary_operation[binary_operation[call[name[qs]][name[samples]] * call[name[factorial], parameter[name[samples]]]] / binary_operation[name[samples] ** name[samples]]]]
|
keyword[def] identifier[ks_unif_durbin_recurrence_rational] ( identifier[samples] , identifier[statistic] ):
literal[string]
identifier[t] = identifier[statistic] * identifier[samples]
identifier[ft1] = identifier[int] ( identifier[floor] ( identifier[t] ))+ literal[int]
identifier[fmt1] = identifier[int] ( identifier[floor] (- identifier[t] ))+ literal[int]
identifier[fdt1] = identifier[int] ( identifier[floor] ( literal[int] * identifier[t] ))+ literal[int]
identifier[qs] =[ identifier[Fraction] ( identifier[i] ** identifier[i] , identifier[factorial] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ft1] )]
identifier[qs] . identifier[extend] ( identifier[Fraction] ( identifier[i] ** identifier[i] , identifier[factorial] ( identifier[i] ))- literal[int] * identifier[t] *
identifier[sum] (( identifier[t] + identifier[j] )**( identifier[j] - literal[int] )/ identifier[factorial] ( identifier[j] )*
( identifier[i] - identifier[t] - identifier[j] )**( identifier[i] - identifier[j] )/ identifier[factorial] ( identifier[i] - identifier[j] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] + identifier[fmt1] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ft1] , identifier[fdt1] ))
identifier[qs] . identifier[extend] (- identifier[sum] ((- literal[int] )** identifier[j] *( literal[int] * identifier[t] - identifier[j] )** identifier[j] / identifier[factorial] ( identifier[j] )* identifier[qs] [ identifier[i] - identifier[j] ]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[fdt1] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[fdt1] , identifier[samples] + literal[int] ))
keyword[return] identifier[qs] [ identifier[samples] ]* identifier[factorial] ( identifier[samples] )/ identifier[samples] ** identifier[samples]
|
def ks_unif_durbin_recurrence_rational(samples, statistic):
"""
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
"""
t = statistic * samples
# Python 3: int()s can be skipped.
ft1 = int(floor(t)) + 1
fmt1 = int(floor(-t)) + 1
fdt1 = int(floor(2 * t)) + 1
qs = [Fraction(i ** i, factorial(i)) for i in range(ft1)]
qs.extend((Fraction(i ** i, factorial(i)) - 2 * t * sum(((t + j) ** (j - 1) / factorial(j) * (i - t - j) ** (i - j) / factorial(i - j) for j in range(i + fmt1))) for i in range(ft1, fdt1)))
qs.extend((-sum(((-1) ** j * (2 * t - j) ** j / factorial(j) * qs[i - j] for j in range(1, fdt1))) for i in range(fdt1, samples + 1)))
return qs[samples] * factorial(samples) / samples ** samples
|
def create_relationship(self, relationship_form):
"""Creates a new ``Relationship``.
arg: relationship_form (osid.relationship.RelationshipForm):
the form for this ``Relationship``
return: (osid.relationship.Relationship) - the new
``Relationship``
raise: IllegalState - ``relationship_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``relationship_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_form`` did not originate
from ``get_relationship_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.create_resource_template
collection = JSONClientValidated('relationship',
collection='Relationship',
runtime=self._runtime)
if not isinstance(relationship_form, ABCRelationshipForm):
raise errors.InvalidArgument('argument type is not an RelationshipForm')
if relationship_form.is_for_update():
raise errors.InvalidArgument('the RelationshipForm is for update only, not create')
try:
if self._forms[relationship_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('relationship_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('relationship_form did not originate from this session')
if not relationship_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
insert_result = collection.insert_one(relationship_form._my_map)
self._forms[relationship_form.get_id().get_identifier()] = CREATED
result = objects.Relationship(
osid_object_map=collection.find_one({'_id': insert_result.inserted_id}),
runtime=self._runtime,
proxy=self._proxy)
return result
|
def function[create_relationship, parameter[self, relationship_form]]:
constant[Creates a new ``Relationship``.
arg: relationship_form (osid.relationship.RelationshipForm):
the form for this ``Relationship``
return: (osid.relationship.Relationship) - the new
``Relationship``
raise: IllegalState - ``relationship_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``relationship_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_form`` did not originate
from ``get_relationship_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[relationship]]]
if <ast.UnaryOp object at 0x7da18f8115d0> begin[:]
<ast.Raise object at 0x7da18f8135e0>
if call[name[relationship_form].is_for_update, parameter[]] begin[:]
<ast.Raise object at 0x7da18f811900>
<ast.Try object at 0x7da18f810ac0>
if <ast.UnaryOp object at 0x7da18f813c10> begin[:]
<ast.Raise object at 0x7da18f8104c0>
variable[insert_result] assign[=] call[name[collection].insert_one, parameter[name[relationship_form]._my_map]]
call[name[self]._forms][call[call[name[relationship_form].get_id, parameter[]].get_identifier, parameter[]]] assign[=] name[CREATED]
variable[result] assign[=] call[name[objects].Relationship, parameter[]]
return[name[result]]
|
keyword[def] identifier[create_relationship] ( identifier[self] , identifier[relationship_form] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[relationship_form] , identifier[ABCRelationshipForm] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[if] identifier[relationship_form] . identifier[is_for_update] ():
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[try] :
keyword[if] identifier[self] . identifier[_forms] [ identifier[relationship_form] . identifier[get_id] (). identifier[get_identifier] ()]== identifier[CREATED] :
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[errors] . identifier[Unsupported] ( literal[string] )
keyword[if] keyword[not] identifier[relationship_form] . identifier[is_valid] ():
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
identifier[insert_result] = identifier[collection] . identifier[insert_one] ( identifier[relationship_form] . identifier[_my_map] )
identifier[self] . identifier[_forms] [ identifier[relationship_form] . identifier[get_id] (). identifier[get_identifier] ()]= identifier[CREATED]
identifier[result] = identifier[objects] . identifier[Relationship] (
identifier[osid_object_map] = identifier[collection] . identifier[find_one] ({ literal[string] : identifier[insert_result] . identifier[inserted_id] }),
identifier[runtime] = identifier[self] . identifier[_runtime] ,
identifier[proxy] = identifier[self] . identifier[_proxy] )
keyword[return] identifier[result]
|
def create_relationship(self, relationship_form):
"""Creates a new ``Relationship``.
arg: relationship_form (osid.relationship.RelationshipForm):
the form for this ``Relationship``
return: (osid.relationship.Relationship) - the new
``Relationship``
raise: IllegalState - ``relationship_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``relationship_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_form`` did not originate
from ``get_relationship_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.create_resource_template
collection = JSONClientValidated('relationship', collection='Relationship', runtime=self._runtime)
if not isinstance(relationship_form, ABCRelationshipForm):
raise errors.InvalidArgument('argument type is not an RelationshipForm') # depends on [control=['if'], data=[]]
if relationship_form.is_for_update():
raise errors.InvalidArgument('the RelationshipForm is for update only, not create') # depends on [control=['if'], data=[]]
try:
if self._forms[relationship_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('relationship_form already used in a create transaction') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
raise errors.Unsupported('relationship_form did not originate from this session') # depends on [control=['except'], data=[]]
if not relationship_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid') # depends on [control=['if'], data=[]]
insert_result = collection.insert_one(relationship_form._my_map)
self._forms[relationship_form.get_id().get_identifier()] = CREATED
result = objects.Relationship(osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy)
return result
|
async def get_entry(config, url):
""" Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) """
previous = config.cache.get(
'entry', url,
schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error("Could not get entry %s: %d", url,
request.status if request else -1)
return None, previous, False
# cache hit
if request.cached:
return previous, previous, False
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current)
return current, previous, (not previous
or previous.digest != current.digest
or previous.status != current.status)
|
<ast.AsyncFunctionDef object at 0x7da1b2344250>
|
keyword[async] keyword[def] identifier[get_entry] ( identifier[config] , identifier[url] ):
literal[string]
identifier[previous] = identifier[config] . identifier[cache] . identifier[get] (
literal[string] , identifier[url] ,
identifier[schema_version] = identifier[SCHEMA_VERSION] ) keyword[if] identifier[config] . identifier[cache] keyword[else] keyword[None]
identifier[headers] = identifier[previous] . identifier[caching] keyword[if] identifier[previous] keyword[else] keyword[None]
identifier[request] = keyword[await] identifier[utils] . identifier[retry_get] ( identifier[config] , identifier[url] , identifier[headers] = identifier[headers] )
keyword[if] keyword[not] identifier[request] keyword[or] keyword[not] identifier[request] . identifier[success] :
identifier[LOGGER] . identifier[error] ( literal[string] , identifier[url] ,
identifier[request] . identifier[status] keyword[if] identifier[request] keyword[else] - literal[int] )
keyword[return] keyword[None] , identifier[previous] , keyword[False]
keyword[if] identifier[request] . identifier[cached] :
keyword[return] identifier[previous] , identifier[previous] , keyword[False]
identifier[current] = identifier[Entry] ( identifier[request] )
keyword[if] identifier[config] . identifier[cache] :
identifier[config] . identifier[cache] . identifier[set] ( literal[string] , identifier[url] , identifier[current] )
keyword[return] identifier[current] , identifier[previous] ,( keyword[not] identifier[previous]
keyword[or] identifier[previous] . identifier[digest] != identifier[current] . identifier[digest]
keyword[or] identifier[previous] . identifier[status] != identifier[current] . identifier[status] )
|
async def get_entry(config, url):
""" Given an entry URL, return the entry
Arguments:
config -- the configuration
url -- the URL of the entry
Returns: 3-tuple of (current, previous, updated) """
previous = config.cache.get('entry', url, schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
LOGGER.error('Could not get entry %s: %d', url, request.status if request else -1)
return (None, previous, False) # depends on [control=['if'], data=[]]
# cache hit
if request.cached:
return (previous, previous, False) # depends on [control=['if'], data=[]]
current = Entry(request)
# Content updated
if config.cache:
config.cache.set('entry', url, current) # depends on [control=['if'], data=[]]
return (current, previous, not previous or previous.digest != current.digest or previous.status != current.status)
|
def clear(self):
"""
Initializes the device memory with an empty (blank) image.
"""
self.display(Image.new(self.mode, self.size))
|
def function[clear, parameter[self]]:
constant[
Initializes the device memory with an empty (blank) image.
]
call[name[self].display, parameter[call[name[Image].new, parameter[name[self].mode, name[self].size]]]]
|
keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[self] . identifier[display] ( identifier[Image] . identifier[new] ( identifier[self] . identifier[mode] , identifier[self] . identifier[size] ))
|
def clear(self):
"""
Initializes the device memory with an empty (blank) image.
"""
self.display(Image.new(self.mode, self.size))
|
def n_list_comp_pypy27(self, node):
"""List comprehensions in PYPY."""
p = self.prec
self.prec = 27
if node[-1].kind == 'list_iter':
n = node[-1]
elif self.is_pypy and node[-1] == 'JUMP_BACK':
n = node[-2]
list_expr = node[1]
if len(node) >= 3:
store = node[3]
elif self.is_pypy and n[0] == 'list_for':
store = n[0][2]
assert n == 'list_iter'
assert store == 'store'
# Find the list comprehension body. It is the inner-most
# node.
# FIXME: DRY with other use
while n == 'list_iter':
n = n[0] # iterate one nesting deeper
if n == 'list_for': n = n[3]
elif n == 'list_if': n = n[2]
elif n == 'list_if_not': n = n[2]
assert n == 'lc_body'
self.write( '[ ')
expr = n[0]
if self.is_pypy and node[-1] == 'JUMP_BACK':
list_iter = node[-2]
else:
list_iter = node[-1]
assert expr == 'expr'
assert list_iter == 'list_iter'
# FIXME: use source line numbers for directing line breaks
self.preorder(expr)
self.preorder(list_expr)
self.write( ' ]')
self.prec = p
self.prune()
|
def function[n_list_comp_pypy27, parameter[self, node]]:
constant[List comprehensions in PYPY.]
variable[p] assign[=] name[self].prec
name[self].prec assign[=] constant[27]
if compare[call[name[node]][<ast.UnaryOp object at 0x7da2044c12a0>].kind equal[==] constant[list_iter]] begin[:]
variable[n] assign[=] call[name[node]][<ast.UnaryOp object at 0x7da2044c1cf0>]
variable[list_expr] assign[=] call[name[node]][constant[1]]
if compare[call[name[len], parameter[name[node]]] greater_or_equal[>=] constant[3]] begin[:]
variable[store] assign[=] call[name[node]][constant[3]]
assert[compare[name[n] equal[==] constant[list_iter]]]
assert[compare[name[store] equal[==] constant[store]]]
while compare[name[n] equal[==] constant[list_iter]] begin[:]
variable[n] assign[=] call[name[n]][constant[0]]
if compare[name[n] equal[==] constant[list_for]] begin[:]
variable[n] assign[=] call[name[n]][constant[3]]
assert[compare[name[n] equal[==] constant[lc_body]]]
call[name[self].write, parameter[constant[[ ]]]
variable[expr] assign[=] call[name[n]][constant[0]]
if <ast.BoolOp object at 0x7da20cabeef0> begin[:]
variable[list_iter] assign[=] call[name[node]][<ast.UnaryOp object at 0x7da20cabcbe0>]
assert[compare[name[expr] equal[==] constant[expr]]]
assert[compare[name[list_iter] equal[==] constant[list_iter]]]
call[name[self].preorder, parameter[name[expr]]]
call[name[self].preorder, parameter[name[list_expr]]]
call[name[self].write, parameter[constant[ ]]]]
name[self].prec assign[=] name[p]
call[name[self].prune, parameter[]]
|
keyword[def] identifier[n_list_comp_pypy27] ( identifier[self] , identifier[node] ):
literal[string]
identifier[p] = identifier[self] . identifier[prec]
identifier[self] . identifier[prec] = literal[int]
keyword[if] identifier[node] [- literal[int] ]. identifier[kind] == literal[string] :
identifier[n] = identifier[node] [- literal[int] ]
keyword[elif] identifier[self] . identifier[is_pypy] keyword[and] identifier[node] [- literal[int] ]== literal[string] :
identifier[n] = identifier[node] [- literal[int] ]
identifier[list_expr] = identifier[node] [ literal[int] ]
keyword[if] identifier[len] ( identifier[node] )>= literal[int] :
identifier[store] = identifier[node] [ literal[int] ]
keyword[elif] identifier[self] . identifier[is_pypy] keyword[and] identifier[n] [ literal[int] ]== literal[string] :
identifier[store] = identifier[n] [ literal[int] ][ literal[int] ]
keyword[assert] identifier[n] == literal[string]
keyword[assert] identifier[store] == literal[string]
keyword[while] identifier[n] == literal[string] :
identifier[n] = identifier[n] [ literal[int] ]
keyword[if] identifier[n] == literal[string] : identifier[n] = identifier[n] [ literal[int] ]
keyword[elif] identifier[n] == literal[string] : identifier[n] = identifier[n] [ literal[int] ]
keyword[elif] identifier[n] == literal[string] : identifier[n] = identifier[n] [ literal[int] ]
keyword[assert] identifier[n] == literal[string]
identifier[self] . identifier[write] ( literal[string] )
identifier[expr] = identifier[n] [ literal[int] ]
keyword[if] identifier[self] . identifier[is_pypy] keyword[and] identifier[node] [- literal[int] ]== literal[string] :
identifier[list_iter] = identifier[node] [- literal[int] ]
keyword[else] :
identifier[list_iter] = identifier[node] [- literal[int] ]
keyword[assert] identifier[expr] == literal[string]
keyword[assert] identifier[list_iter] == literal[string]
identifier[self] . identifier[preorder] ( identifier[expr] )
identifier[self] . identifier[preorder] ( identifier[list_expr] )
identifier[self] . identifier[write] ( literal[string] )
identifier[self] . identifier[prec] = identifier[p]
identifier[self] . identifier[prune] ()
|
def n_list_comp_pypy27(self, node):
"""List comprehensions in PYPY."""
p = self.prec
self.prec = 27
if node[-1].kind == 'list_iter':
n = node[-1] # depends on [control=['if'], data=[]]
elif self.is_pypy and node[-1] == 'JUMP_BACK':
n = node[-2] # depends on [control=['if'], data=[]]
list_expr = node[1]
if len(node) >= 3:
store = node[3] # depends on [control=['if'], data=[]]
elif self.is_pypy and n[0] == 'list_for':
store = n[0][2] # depends on [control=['if'], data=[]]
assert n == 'list_iter'
assert store == 'store'
# Find the list comprehension body. It is the inner-most
# node.
# FIXME: DRY with other use
while n == 'list_iter':
n = n[0] # iterate one nesting deeper
if n == 'list_for':
n = n[3] # depends on [control=['if'], data=['n']]
elif n == 'list_if':
n = n[2] # depends on [control=['if'], data=['n']]
elif n == 'list_if_not':
n = n[2] # depends on [control=['if'], data=['n']] # depends on [control=['while'], data=['n']]
assert n == 'lc_body'
self.write('[ ')
expr = n[0]
if self.is_pypy and node[-1] == 'JUMP_BACK':
list_iter = node[-2] # depends on [control=['if'], data=[]]
else:
list_iter = node[-1]
assert expr == 'expr'
assert list_iter == 'list_iter'
# FIXME: use source line numbers for directing line breaks
self.preorder(expr)
self.preorder(list_expr)
self.write(' ]')
self.prec = p
self.prune()
|
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
destination = _table_arg_to_table_ref(destination, default_project=self.project)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json())
|
def function[load_table_from_file, parameter[self, file_obj, destination, rewind, size, num_retries, job_id, job_id_prefix, location, project, job_config]]:
constant[Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
]
variable[job_id] assign[=] call[name[_make_job_id], parameter[name[job_id], name[job_id_prefix]]]
if compare[name[project] is constant[None]] begin[:]
variable[project] assign[=] name[self].project
if compare[name[location] is constant[None]] begin[:]
variable[location] assign[=] name[self].location
variable[destination] assign[=] call[name[_table_arg_to_table_ref], parameter[name[destination]]]
variable[job_ref] assign[=] call[name[job]._JobReference, parameter[name[job_id]]]
variable[load_job] assign[=] call[name[job].LoadJob, parameter[name[job_ref], constant[None], name[destination], name[self], name[job_config]]]
variable[job_resource] assign[=] call[name[load_job].to_api_repr, parameter[]]
if name[rewind] begin[:]
call[name[file_obj].seek, parameter[constant[0], name[os].SEEK_SET]]
call[name[_check_mode], parameter[name[file_obj]]]
<ast.Try object at 0x7da204344f10>
return[call[name[self].job_from_resource, parameter[call[name[response].json, parameter[]]]]]
|
keyword[def] identifier[load_table_from_file] (
identifier[self] ,
identifier[file_obj] ,
identifier[destination] ,
identifier[rewind] = keyword[False] ,
identifier[size] = keyword[None] ,
identifier[num_retries] = identifier[_DEFAULT_NUM_RETRIES] ,
identifier[job_id] = keyword[None] ,
identifier[job_id_prefix] = keyword[None] ,
identifier[location] = keyword[None] ,
identifier[project] = keyword[None] ,
identifier[job_config] = keyword[None] ,
):
literal[string]
identifier[job_id] = identifier[_make_job_id] ( identifier[job_id] , identifier[job_id_prefix] )
keyword[if] identifier[project] keyword[is] keyword[None] :
identifier[project] = identifier[self] . identifier[project]
keyword[if] identifier[location] keyword[is] keyword[None] :
identifier[location] = identifier[self] . identifier[location]
identifier[destination] = identifier[_table_arg_to_table_ref] ( identifier[destination] , identifier[default_project] = identifier[self] . identifier[project] )
identifier[job_ref] = identifier[job] . identifier[_JobReference] ( identifier[job_id] , identifier[project] = identifier[project] , identifier[location] = identifier[location] )
identifier[load_job] = identifier[job] . identifier[LoadJob] ( identifier[job_ref] , keyword[None] , identifier[destination] , identifier[self] , identifier[job_config] )
identifier[job_resource] = identifier[load_job] . identifier[to_api_repr] ()
keyword[if] identifier[rewind] :
identifier[file_obj] . identifier[seek] ( literal[int] , identifier[os] . identifier[SEEK_SET] )
identifier[_check_mode] ( identifier[file_obj] )
keyword[try] :
keyword[if] identifier[size] keyword[is] keyword[None] keyword[or] identifier[size] >= identifier[_MAX_MULTIPART_SIZE] :
identifier[response] = identifier[self] . identifier[_do_resumable_upload] (
identifier[file_obj] , identifier[job_resource] , identifier[num_retries]
)
keyword[else] :
identifier[response] = identifier[self] . identifier[_do_multipart_upload] (
identifier[file_obj] , identifier[job_resource] , identifier[size] , identifier[num_retries]
)
keyword[except] identifier[resumable_media] . identifier[InvalidResponse] keyword[as] identifier[exc] :
keyword[raise] identifier[exceptions] . identifier[from_http_response] ( identifier[exc] . identifier[response] )
keyword[return] identifier[self] . identifier[job_from_resource] ( identifier[response] . identifier[json] ())
|
def load_table_from_file(self, file_obj, destination, rewind=False, size=None, num_retries=_DEFAULT_NUM_RETRIES, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project # depends on [control=['if'], data=['project']]
if location is None:
location = self.location # depends on [control=['if'], data=['location']]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET) # depends on [control=['if'], data=[]]
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(file_obj, job_resource, num_retries) # depends on [control=['if'], data=[]]
else:
response = self._do_multipart_upload(file_obj, job_resource, size, num_retries) # depends on [control=['try'], data=[]]
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response) # depends on [control=['except'], data=['exc']]
return self.job_from_resource(response.json())
|
def format_request_email_title(increq, **ctx):
"""Format the email message title for inclusion request notification.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Email message title.
:rtype: str
"""
template = current_app.config["COMMUNITIES_REQUEST_EMAIL_TITLE_TEMPLATE"],
return format_request_email_templ(increq, template, **ctx)
|
def function[format_request_email_title, parameter[increq]]:
constant[Format the email message title for inclusion request notification.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Email message title.
:rtype: str
]
variable[template] assign[=] tuple[[<ast.Subscript object at 0x7da2054a4580>]]
return[call[name[format_request_email_templ], parameter[name[increq], name[template]]]]
|
keyword[def] identifier[format_request_email_title] ( identifier[increq] ,** identifier[ctx] ):
literal[string]
identifier[template] = identifier[current_app] . identifier[config] [ literal[string] ],
keyword[return] identifier[format_request_email_templ] ( identifier[increq] , identifier[template] ,** identifier[ctx] )
|
def format_request_email_title(increq, **ctx):
"""Format the email message title for inclusion request notification.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Email message title.
:rtype: str
"""
template = (current_app.config['COMMUNITIES_REQUEST_EMAIL_TITLE_TEMPLATE'],)
return format_request_email_templ(increq, template, **ctx)
|
def reverse_char(self, hints):
"""Return QuerySet of objects from SQLAlchemy of results.
Parameters
----------
hints: list of str
strings to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
reverse matches
"""
if isinstance(hints, string_types):
hints = [hints]
Unihan = self.sql.base.classes.Unihan
columns = Unihan.__table__.columns
return self.sql.session.query(Unihan).filter(
or_(*[column.contains(hint) for column in columns for hint in hints])
)
|
def function[reverse_char, parameter[self, hints]]:
constant[Return QuerySet of objects from SQLAlchemy of results.
Parameters
----------
hints: list of str
strings to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
reverse matches
]
if call[name[isinstance], parameter[name[hints], name[string_types]]] begin[:]
variable[hints] assign[=] list[[<ast.Name object at 0x7da1b1909090>]]
variable[Unihan] assign[=] name[self].sql.base.classes.Unihan
variable[columns] assign[=] name[Unihan].__table__.columns
return[call[call[name[self].sql.session.query, parameter[name[Unihan]]].filter, parameter[call[name[or_], parameter[<ast.Starred object at 0x7da1b195cd60>]]]]]
|
keyword[def] identifier[reverse_char] ( identifier[self] , identifier[hints] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[hints] , identifier[string_types] ):
identifier[hints] =[ identifier[hints] ]
identifier[Unihan] = identifier[self] . identifier[sql] . identifier[base] . identifier[classes] . identifier[Unihan]
identifier[columns] = identifier[Unihan] . identifier[__table__] . identifier[columns]
keyword[return] identifier[self] . identifier[sql] . identifier[session] . identifier[query] ( identifier[Unihan] ). identifier[filter] (
identifier[or_] (*[ identifier[column] . identifier[contains] ( identifier[hint] ) keyword[for] identifier[column] keyword[in] identifier[columns] keyword[for] identifier[hint] keyword[in] identifier[hints] ])
)
|
def reverse_char(self, hints):
"""Return QuerySet of objects from SQLAlchemy of results.
Parameters
----------
hints: list of str
strings to lookup
Returns
-------
:class:`sqlalchemy.orm.query.Query` :
reverse matches
"""
if isinstance(hints, string_types):
hints = [hints] # depends on [control=['if'], data=[]]
Unihan = self.sql.base.classes.Unihan
columns = Unihan.__table__.columns
return self.sql.session.query(Unihan).filter(or_(*[column.contains(hint) for column in columns for hint in hints]))
|
def request_tokens(self, amount, account):
"""
Request an amount of tokens for a particular address.
This transaction has gas cost
:param amount: Amount of tokens, int
:param account: Account instance
:raise OceanInvalidTransaction: Transaction failed
:return: bool
"""
address = account.address
try:
tx_hash = self.send_transaction(
'requestTokens',
(amount,),
transact={'from': address,
'passphrase': account.password}
)
logging.debug(f'{address} requests {amount} tokens, returning receipt')
try:
receipt = Web3Provider.get_web3().eth.waitForTransactionReceipt(
tx_hash, timeout=20)
logging.debug(f'requestTokens receipt: {receipt}')
except Timeout:
receipt = None
if not receipt:
return False
if receipt.status == 0:
logging.warning(f'request tokens failed: Tx-receipt={receipt}')
logging.warning(f'request tokens failed: account {address}')
return False
# check for emitted events:
rfe = EventFilter(
'RequestFrequencyExceeded',
self.events.RequestFrequencyExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rfe.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestFrequencyExceeded')
logging.info(f'RequestFrequencyExceeded event logs: {logs}')
return False
rle = EventFilter(
'RequestLimitExceeded',
self.events.RequestLimitExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rle.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestLimitExceeded')
logging.info(f'RequestLimitExceeded event logs: {logs}')
return False
return True
except ValueError as err:
raise OceanInvalidTransaction(
f'Requesting {amount} tokens'
f' to {address} failed with error: {err}'
)
|
def function[request_tokens, parameter[self, amount, account]]:
constant[
Request an amount of tokens for a particular address.
This transaction has gas cost
:param amount: Amount of tokens, int
:param account: Account instance
:raise OceanInvalidTransaction: Transaction failed
:return: bool
]
variable[address] assign[=] name[account].address
<ast.Try object at 0x7da2043444c0>
|
keyword[def] identifier[request_tokens] ( identifier[self] , identifier[amount] , identifier[account] ):
literal[string]
identifier[address] = identifier[account] . identifier[address]
keyword[try] :
identifier[tx_hash] = identifier[self] . identifier[send_transaction] (
literal[string] ,
( identifier[amount] ,),
identifier[transact] ={ literal[string] : identifier[address] ,
literal[string] : identifier[account] . identifier[password] }
)
identifier[logging] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[receipt] = identifier[Web3Provider] . identifier[get_web3] (). identifier[eth] . identifier[waitForTransactionReceipt] (
identifier[tx_hash] , identifier[timeout] = literal[int] )
identifier[logging] . identifier[debug] ( literal[string] )
keyword[except] identifier[Timeout] :
identifier[receipt] = keyword[None]
keyword[if] keyword[not] identifier[receipt] :
keyword[return] keyword[False]
keyword[if] identifier[receipt] . identifier[status] == literal[int] :
identifier[logging] . identifier[warning] ( literal[string] )
identifier[logging] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
identifier[rfe] = identifier[EventFilter] (
literal[string] ,
identifier[self] . identifier[events] . identifier[RequestFrequencyExceeded] ,
identifier[argument_filters] ={ literal[string] : identifier[Web3Provider] . identifier[get_web3] (). identifier[toBytes] ( identifier[hexstr] = identifier[address] )},
identifier[from_block] = literal[string] ,
identifier[to_block] = literal[string] ,
)
identifier[logs] = identifier[rfe] . identifier[get_all_entries] ( identifier[max_tries] = literal[int] )
keyword[if] identifier[logs] :
identifier[logging] . identifier[warning] ( literal[string] )
identifier[logging] . identifier[info] ( literal[string] )
keyword[return] keyword[False]
identifier[rle] = identifier[EventFilter] (
literal[string] ,
identifier[self] . identifier[events] . identifier[RequestLimitExceeded] ,
identifier[argument_filters] ={ literal[string] : identifier[Web3Provider] . identifier[get_web3] (). identifier[toBytes] ( identifier[hexstr] = identifier[address] )},
identifier[from_block] = literal[string] ,
identifier[to_block] = literal[string] ,
)
identifier[logs] = identifier[rle] . identifier[get_all_entries] ( identifier[max_tries] = literal[int] )
keyword[if] identifier[logs] :
identifier[logging] . identifier[warning] ( literal[string] )
identifier[logging] . identifier[info] ( literal[string] )
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
keyword[raise] identifier[OceanInvalidTransaction] (
literal[string]
literal[string]
)
|
def request_tokens(self, amount, account):
"""
Request an amount of tokens for a particular address.
This transaction has gas cost
:param amount: Amount of tokens, int
:param account: Account instance
:raise OceanInvalidTransaction: Transaction failed
:return: bool
"""
address = account.address
try:
tx_hash = self.send_transaction('requestTokens', (amount,), transact={'from': address, 'passphrase': account.password})
logging.debug(f'{address} requests {amount} tokens, returning receipt')
try:
receipt = Web3Provider.get_web3().eth.waitForTransactionReceipt(tx_hash, timeout=20)
logging.debug(f'requestTokens receipt: {receipt}') # depends on [control=['try'], data=[]]
except Timeout:
receipt = None # depends on [control=['except'], data=[]]
if not receipt:
return False # depends on [control=['if'], data=[]]
if receipt.status == 0:
logging.warning(f'request tokens failed: Tx-receipt={receipt}')
logging.warning(f'request tokens failed: account {address}')
return False # depends on [control=['if'], data=[]]
# check for emitted events:
rfe = EventFilter('RequestFrequencyExceeded', self.events.RequestFrequencyExceeded, argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)}, from_block='latest', to_block='latest')
logs = rfe.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestFrequencyExceeded')
logging.info(f'RequestFrequencyExceeded event logs: {logs}')
return False # depends on [control=['if'], data=[]]
rle = EventFilter('RequestLimitExceeded', self.events.RequestLimitExceeded, argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)}, from_block='latest', to_block='latest')
logs = rle.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestLimitExceeded')
logging.info(f'RequestLimitExceeded event logs: {logs}')
return False # depends on [control=['if'], data=[]]
return True # depends on [control=['try'], data=[]]
except ValueError as err:
raise OceanInvalidTransaction(f'Requesting {amount} tokens to {address} failed with error: {err}') # depends on [control=['except'], data=['err']]
|
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
"""
key_path = registry_key.path.upper()
if self._key_path_prefix and self._key_path_suffix:
if (key_path.startswith(self._key_path_prefix) and
key_path.endswith(self._key_path_suffix)):
key_path_segment = key_path[
len(self._key_path_prefix):-len(self._key_path_suffix)]
if key_path_segment.startswith('ControlSet'.upper()):
try:
control_set = int(key_path_segment[10:], 10)
except ValueError:
control_set = None
# TODO: check if control_set is in bounds.
return control_set is not None
return key_path in (self._key_path_upper, self._wow64_key_path_upper)
|
def function[Match, parameter[self, registry_key]]:
constant[Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
]
variable[key_path] assign[=] call[name[registry_key].path.upper, parameter[]]
if <ast.BoolOp object at 0x7da20c7cb100> begin[:]
if <ast.BoolOp object at 0x7da20c7c9960> begin[:]
variable[key_path_segment] assign[=] call[name[key_path]][<ast.Slice object at 0x7da20c7caaa0>]
if call[name[key_path_segment].startswith, parameter[call[constant[ControlSet].upper, parameter[]]]] begin[:]
<ast.Try object at 0x7da20c7c8c40>
return[compare[name[control_set] is_not constant[None]]]
return[compare[name[key_path] in tuple[[<ast.Attribute object at 0x7da20c7c8100>, <ast.Attribute object at 0x7da20c7c8880>]]]]
|
keyword[def] identifier[Match] ( identifier[self] , identifier[registry_key] ):
literal[string]
identifier[key_path] = identifier[registry_key] . identifier[path] . identifier[upper] ()
keyword[if] identifier[self] . identifier[_key_path_prefix] keyword[and] identifier[self] . identifier[_key_path_suffix] :
keyword[if] ( identifier[key_path] . identifier[startswith] ( identifier[self] . identifier[_key_path_prefix] ) keyword[and]
identifier[key_path] . identifier[endswith] ( identifier[self] . identifier[_key_path_suffix] )):
identifier[key_path_segment] = identifier[key_path] [
identifier[len] ( identifier[self] . identifier[_key_path_prefix] ):- identifier[len] ( identifier[self] . identifier[_key_path_suffix] )]
keyword[if] identifier[key_path_segment] . identifier[startswith] ( literal[string] . identifier[upper] ()):
keyword[try] :
identifier[control_set] = identifier[int] ( identifier[key_path_segment] [ literal[int] :], literal[int] )
keyword[except] identifier[ValueError] :
identifier[control_set] = keyword[None]
keyword[return] identifier[control_set] keyword[is] keyword[not] keyword[None]
keyword[return] identifier[key_path] keyword[in] ( identifier[self] . identifier[_key_path_upper] , identifier[self] . identifier[_wow64_key_path_upper] )
|
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
"""
key_path = registry_key.path.upper()
if self._key_path_prefix and self._key_path_suffix:
if key_path.startswith(self._key_path_prefix) and key_path.endswith(self._key_path_suffix):
key_path_segment = key_path[len(self._key_path_prefix):-len(self._key_path_suffix)]
if key_path_segment.startswith('ControlSet'.upper()):
try:
control_set = int(key_path_segment[10:], 10) # depends on [control=['try'], data=[]]
except ValueError:
control_set = None # depends on [control=['except'], data=[]]
# TODO: check if control_set is in bounds.
return control_set is not None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return key_path in (self._key_path_upper, self._wow64_key_path_upper)
|
def get(cls, **kwargs):
"""Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_with_http_info(**kwargs)
else:
(data) = cls._get_with_http_info(**kwargs)
return data
|
def function[get, parameter[cls]]:
constant[Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._get_with_http_info, parameter[]]]
|
keyword[def] identifier[get] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_get_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_get_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data]
|
def get(cls, **kwargs):
"""Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._get_with_http_info(**kwargs)
return data
|
def best_buy_2(self):
"""量縮價不跌
"""
result = self.data.capacity[-1] < self.data.capacity[-2] and \
self.data.price[-1] > self.data.price[-2]
return result
|
def function[best_buy_2, parameter[self]]:
constant[量縮價不跌
]
variable[result] assign[=] <ast.BoolOp object at 0x7da1b19cf610>
return[name[result]]
|
keyword[def] identifier[best_buy_2] ( identifier[self] ):
literal[string]
identifier[result] = identifier[self] . identifier[data] . identifier[capacity] [- literal[int] ]< identifier[self] . identifier[data] . identifier[capacity] [- literal[int] ] keyword[and] identifier[self] . identifier[data] . identifier[price] [- literal[int] ]> identifier[self] . identifier[data] . identifier[price] [- literal[int] ]
keyword[return] identifier[result]
|
def best_buy_2(self):
"""量縮價不跌
"""
result = self.data.capacity[-1] < self.data.capacity[-2] and self.data.price[-1] > self.data.price[-2]
return result
|
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
|
def function[get_config_h_filename, parameter[]]:
constant[Return the path of pyconfig.h.]
if name[_PYTHON_BUILD] begin[:]
if compare[name[os].name equal[==] constant[nt]] begin[:]
variable[inc_dir] assign[=] call[name[os].path.join, parameter[name[_PROJECT_BASE], constant[PC]]]
return[call[name[os].path.join, parameter[name[inc_dir], constant[pyconfig.h]]]]
|
keyword[def] identifier[get_config_h_filename] ():
literal[string]
keyword[if] identifier[_PYTHON_BUILD] :
keyword[if] identifier[os] . identifier[name] == literal[string] :
identifier[inc_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[_PROJECT_BASE] , literal[string] )
keyword[else] :
identifier[inc_dir] = identifier[_PROJECT_BASE]
keyword[else] :
identifier[inc_dir] = identifier[get_path] ( literal[string] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[inc_dir] , literal[string] )
|
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == 'nt':
inc_dir = os.path.join(_PROJECT_BASE, 'PC') # depends on [control=['if'], data=[]]
else:
inc_dir = _PROJECT_BASE # depends on [control=['if'], data=[]]
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
|
def segment(self):
""" Associated (full) :class:`stravalib.model.Segment` object. """
if self._segment is None:
self.assert_bind_client()
if self.id is not None:
self._segment = self.bind_client.get_segment(self.id)
return self._segment
|
def function[segment, parameter[self]]:
constant[ Associated (full) :class:`stravalib.model.Segment` object. ]
if compare[name[self]._segment is constant[None]] begin[:]
call[name[self].assert_bind_client, parameter[]]
if compare[name[self].id is_not constant[None]] begin[:]
name[self]._segment assign[=] call[name[self].bind_client.get_segment, parameter[name[self].id]]
return[name[self]._segment]
|
keyword[def] identifier[segment] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_segment] keyword[is] keyword[None] :
identifier[self] . identifier[assert_bind_client] ()
keyword[if] identifier[self] . identifier[id] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_segment] = identifier[self] . identifier[bind_client] . identifier[get_segment] ( identifier[self] . identifier[id] )
keyword[return] identifier[self] . identifier[_segment]
|
def segment(self):
""" Associated (full) :class:`stravalib.model.Segment` object. """
if self._segment is None:
self.assert_bind_client()
if self.id is not None:
self._segment = self.bind_client.get_segment(self.id) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._segment
|
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname, ))
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug("Logger: %s <= %s" % (streamname, value))
c = self.database.cursor()
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, time.time(), value))
|
def function[insert, parameter[self, streamname, value]]:
constant[Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB]
if compare[name[streamname] <ast.NotIn object at 0x7da2590d7190> name[self].streams] begin[:]
<ast.Raise object at 0x7da18f810700>
call[name[validate], parameter[name[value], call[name[self].streams][name[streamname]]]]
variable[value] assign[=] call[name[json].dumps, parameter[name[value]]]
call[name[logging].debug, parameter[binary_operation[constant[Logger: %s <= %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8123e0>, <ast.Name object at 0x7da18f8112d0>]]]]]
variable[c] assign[=] call[name[self].database.cursor, parameter[]]
call[name[c].execute, parameter[constant[INSERT INTO cache VALUES (?,?,?);], tuple[[<ast.Name object at 0x7da18f813760>, <ast.Call object at 0x7da18f813580>, <ast.Name object at 0x7da18f812d70>]]]]
|
keyword[def] identifier[insert] ( identifier[self] , identifier[streamname] , identifier[value] ):
literal[string]
keyword[if] identifier[streamname] keyword[not] keyword[in] identifier[self] . identifier[streams] :
keyword[raise] identifier[Exception] ( literal[string] %( identifier[streamname] ,))
identifier[validate] ( identifier[value] , identifier[self] . identifier[streams] [ identifier[streamname] ])
identifier[value] = identifier[json] . identifier[dumps] ( identifier[value] )
identifier[logging] . identifier[debug] ( literal[string] %( identifier[streamname] , identifier[value] ))
identifier[c] = identifier[self] . identifier[database] . identifier[cursor] ()
identifier[c] . identifier[execute] ( literal[string] ,
( identifier[streamname] , identifier[time] . identifier[time] (), identifier[value] ))
|
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname,)) # depends on [control=['if'], data=['streamname']]
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug('Logger: %s <= %s' % (streamname, value))
c = self.database.cursor()
c.execute('INSERT INTO cache VALUES (?,?,?);', (streamname, time.time(), value))
|
def read_version():
"""Read version from curdling/version.py without loading any files"""
finder = VersionFinder()
finder.visit(ast.parse(local_file('curdling', 'version.py')))
return finder.version
|
def function[read_version, parameter[]]:
constant[Read version from curdling/version.py without loading any files]
variable[finder] assign[=] call[name[VersionFinder], parameter[]]
call[name[finder].visit, parameter[call[name[ast].parse, parameter[call[name[local_file], parameter[constant[curdling], constant[version.py]]]]]]]
return[name[finder].version]
|
keyword[def] identifier[read_version] ():
literal[string]
identifier[finder] = identifier[VersionFinder] ()
identifier[finder] . identifier[visit] ( identifier[ast] . identifier[parse] ( identifier[local_file] ( literal[string] , literal[string] )))
keyword[return] identifier[finder] . identifier[version]
|
def read_version():
"""Read version from curdling/version.py without loading any files"""
finder = VersionFinder()
finder.visit(ast.parse(local_file('curdling', 'version.py')))
return finder.version
|
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_append_cell_value]
:end-before: [END bigtable_row_append_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
"""
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id, column_qualifier=column, append_value=value
)
self._rule_pb_list.append(rule_pb)
|
def function[append_cell_value, parameter[self, column_family_id, column, value]]:
constant[Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_append_cell_value]
:end-before: [END bigtable_row_append_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
]
variable[column] assign[=] call[name[_to_bytes], parameter[name[column]]]
variable[value] assign[=] call[name[_to_bytes], parameter[name[value]]]
variable[rule_pb] assign[=] call[name[data_v2_pb2].ReadModifyWriteRule, parameter[]]
call[name[self]._rule_pb_list.append, parameter[name[rule_pb]]]
|
keyword[def] identifier[append_cell_value] ( identifier[self] , identifier[column_family_id] , identifier[column] , identifier[value] ):
literal[string]
identifier[column] = identifier[_to_bytes] ( identifier[column] )
identifier[value] = identifier[_to_bytes] ( identifier[value] )
identifier[rule_pb] = identifier[data_v2_pb2] . identifier[ReadModifyWriteRule] (
identifier[family_name] = identifier[column_family_id] , identifier[column_qualifier] = identifier[column] , identifier[append_value] = identifier[value]
)
identifier[self] . identifier[_rule_pb_list] . identifier[append] ( identifier[rule_pb] )
|
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_append_cell_value]
:end-before: [END bigtable_row_append_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
"""
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_v2_pb2.ReadModifyWriteRule(family_name=column_family_id, column_qualifier=column, append_value=value)
self._rule_pb_list.append(rule_pb)
|
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_src_port = ET.SubElement(trunk_list_member, "trunk-list-src-port")
trunk_list_src_port.text = kwargs.pop('trunk_list_src_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def function[show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[show_fabric_trunk_info] assign[=] call[name[ET].Element, parameter[constant[show_fabric_trunk_info]]]
variable[config] assign[=] name[show_fabric_trunk_info]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[show_fabric_trunk_info], constant[output]]]
variable[show_trunk_list] assign[=] call[name[ET].SubElement, parameter[name[output], constant[show-trunk-list]]]
variable[trunk_list_groups] assign[=] call[name[ET].SubElement, parameter[name[show_trunk_list], constant[trunk-list-groups]]]
variable[trunk_list_member] assign[=] call[name[ET].SubElement, parameter[name[trunk_list_groups], constant[trunk-list-member]]]
variable[trunk_list_src_port] assign[=] call[name[ET].SubElement, parameter[name[trunk_list_member], constant[trunk-list-src-port]]]
name[trunk_list_src_port].text assign[=] call[name[kwargs].pop, parameter[constant[trunk_list_src_port]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]]
|
keyword[def] identifier[show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[show_fabric_trunk_info] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[show_fabric_trunk_info]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[show_fabric_trunk_info] , literal[string] )
identifier[show_trunk_list] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[trunk_list_groups] = identifier[ET] . identifier[SubElement] ( identifier[show_trunk_list] , literal[string] )
identifier[trunk_list_member] = identifier[ET] . identifier[SubElement] ( identifier[trunk_list_groups] , literal[string] )
identifier[trunk_list_src_port] = identifier[ET] . identifier[SubElement] ( identifier[trunk_list_member] , literal[string] )
identifier[trunk_list_src_port] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] )
|
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
show_fabric_trunk_info = ET.Element('show_fabric_trunk_info')
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, 'output')
show_trunk_list = ET.SubElement(output, 'show-trunk-list')
trunk_list_groups = ET.SubElement(show_trunk_list, 'trunk-list-groups')
trunk_list_member = ET.SubElement(trunk_list_groups, 'trunk-list-member')
trunk_list_src_port = ET.SubElement(trunk_list_member, 'trunk-list-src-port')
trunk_list_src_port.text = kwargs.pop('trunk_list_src_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
def to_pattern_matrix(D):
"""Returns the Boolean matrix in the same shape as `D` with ones exactly
where there are nonzero entries in `D`.
`D` must be a NumPy array.
"""
result = np.zeros_like(D)
# This is a cleverer way of doing
#
# for (u, v) in zip(*(D.nonzero())):
# result[u, v] = 1
#
result[D.nonzero()] = 1
return result
|
def function[to_pattern_matrix, parameter[D]]:
constant[Returns the Boolean matrix in the same shape as `D` with ones exactly
where there are nonzero entries in `D`.
`D` must be a NumPy array.
]
variable[result] assign[=] call[name[np].zeros_like, parameter[name[D]]]
call[name[result]][call[name[D].nonzero, parameter[]]] assign[=] constant[1]
return[name[result]]
|
keyword[def] identifier[to_pattern_matrix] ( identifier[D] ):
literal[string]
identifier[result] = identifier[np] . identifier[zeros_like] ( identifier[D] )
identifier[result] [ identifier[D] . identifier[nonzero] ()]= literal[int]
keyword[return] identifier[result]
|
def to_pattern_matrix(D):
"""Returns the Boolean matrix in the same shape as `D` with ones exactly
where there are nonzero entries in `D`.
`D` must be a NumPy array.
"""
result = np.zeros_like(D)
# This is a cleverer way of doing
#
# for (u, v) in zip(*(D.nonzero())):
# result[u, v] = 1
#
result[D.nonzero()] = 1
return result
|
def add_to_team(self, **kw):
"""
We override this method to add our additional participation
policy groups, as detailed in available_groups above
"""
group = self.context.participant_policy.title()
data = kw.copy()
if "groups" in data:
data["groups"].add(group)
else:
data["groups"] = set([group])
super(PloneIntranetWorkspace, self).add_to_team(**data)
|
def function[add_to_team, parameter[self]]:
constant[
We override this method to add our additional participation
policy groups, as detailed in available_groups above
]
variable[group] assign[=] call[name[self].context.participant_policy.title, parameter[]]
variable[data] assign[=] call[name[kw].copy, parameter[]]
if compare[constant[groups] in name[data]] begin[:]
call[call[name[data]][constant[groups]].add, parameter[name[group]]]
call[call[name[super], parameter[name[PloneIntranetWorkspace], name[self]]].add_to_team, parameter[]]
|
keyword[def] identifier[add_to_team] ( identifier[self] ,** identifier[kw] ):
literal[string]
identifier[group] = identifier[self] . identifier[context] . identifier[participant_policy] . identifier[title] ()
identifier[data] = identifier[kw] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]. identifier[add] ( identifier[group] )
keyword[else] :
identifier[data] [ literal[string] ]= identifier[set] ([ identifier[group] ])
identifier[super] ( identifier[PloneIntranetWorkspace] , identifier[self] ). identifier[add_to_team] (** identifier[data] )
|
def add_to_team(self, **kw):
"""
We override this method to add our additional participation
policy groups, as detailed in available_groups above
"""
group = self.context.participant_policy.title()
data = kw.copy()
if 'groups' in data:
data['groups'].add(group) # depends on [control=['if'], data=['data']]
else:
data['groups'] = set([group])
super(PloneIntranetWorkspace, self).add_to_team(**data)
|
def set_xml(self):
"""Set document xml just rendered already
validated against xsd to be signed.
:params boolean debug_mode: Either if you want
the rendered template to be saved either it
is valid or not with the given schema.
:returns boolean: Either was valid or not the generated document.
"""
cached = StringIO()
document = u''
try:
document = self.template.render(inv=self)
except UndefinedError as ups:
self.ups = ups
# TODO: Here should be called the cleanup 'Just before the validation'.
valid = self.validate(self.schema, document)
self.document = document
if valid:
document = etree.XML(document)
self.document = etree.tostring(document,
pretty_print=True,
xml_declaration=True,
encoding='utf-8')
# TODO: When Document Generated, this this should not fail either.
# Caching just when valid then.
cached.write(self.document is not None and self.document or u'')
cached.seek(0)
self.document_path = cached
|
def function[set_xml, parameter[self]]:
constant[Set document xml just rendered already
validated against xsd to be signed.
:params boolean debug_mode: Either if you want
the rendered template to be saved either it
is valid or not with the given schema.
:returns boolean: Either was valid or not the generated document.
]
variable[cached] assign[=] call[name[StringIO], parameter[]]
variable[document] assign[=] constant[]
<ast.Try object at 0x7da20c6ab610>
variable[valid] assign[=] call[name[self].validate, parameter[name[self].schema, name[document]]]
name[self].document assign[=] name[document]
if name[valid] begin[:]
variable[document] assign[=] call[name[etree].XML, parameter[name[document]]]
name[self].document assign[=] call[name[etree].tostring, parameter[name[document]]]
call[name[cached].write, parameter[<ast.BoolOp object at 0x7da18bcc8880>]]
call[name[cached].seek, parameter[constant[0]]]
name[self].document_path assign[=] name[cached]
|
keyword[def] identifier[set_xml] ( identifier[self] ):
literal[string]
identifier[cached] = identifier[StringIO] ()
identifier[document] = literal[string]
keyword[try] :
identifier[document] = identifier[self] . identifier[template] . identifier[render] ( identifier[inv] = identifier[self] )
keyword[except] identifier[UndefinedError] keyword[as] identifier[ups] :
identifier[self] . identifier[ups] = identifier[ups]
identifier[valid] = identifier[self] . identifier[validate] ( identifier[self] . identifier[schema] , identifier[document] )
identifier[self] . identifier[document] = identifier[document]
keyword[if] identifier[valid] :
identifier[document] = identifier[etree] . identifier[XML] ( identifier[document] )
identifier[self] . identifier[document] = identifier[etree] . identifier[tostring] ( identifier[document] ,
identifier[pretty_print] = keyword[True] ,
identifier[xml_declaration] = keyword[True] ,
identifier[encoding] = literal[string] )
identifier[cached] . identifier[write] ( identifier[self] . identifier[document] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[document] keyword[or] literal[string] )
identifier[cached] . identifier[seek] ( literal[int] )
identifier[self] . identifier[document_path] = identifier[cached]
|
def set_xml(self):
"""Set document xml just rendered already
validated against xsd to be signed.
:params boolean debug_mode: Either if you want
the rendered template to be saved either it
is valid or not with the given schema.
:returns boolean: Either was valid or not the generated document.
"""
cached = StringIO()
document = u''
try:
document = self.template.render(inv=self) # depends on [control=['try'], data=[]]
except UndefinedError as ups:
self.ups = ups # depends on [control=['except'], data=['ups']]
# TODO: Here should be called the cleanup 'Just before the validation'.
valid = self.validate(self.schema, document)
self.document = document
if valid:
document = etree.XML(document)
self.document = etree.tostring(document, pretty_print=True, xml_declaration=True, encoding='utf-8')
# TODO: When Document Generated, this this should not fail either.
# Caching just when valid then.
cached.write(self.document is not None and self.document or u'')
cached.seek(0)
self.document_path = cached # depends on [control=['if'], data=[]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.