code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
def Gamma2VSWR( Gamma ):
"""
Reflection coefficient to Voltage Standing Wave Ratio conversion
Gamma is the reflection coefficient.
"""
return np.divide(1+abs(Gamma), 1-abs(Gamma))
def pow2normdb( mag ):
"""
Conversion between linear magnitude (voltage etc.) normalized logaithmic decibel scale
"""
vec = 10*np.log10( mag )
return 10*np.log10( mag )-np.max(10*np.log10( mag ))
def mag2db( mag ):
"""
Conversion between linear magnitude (voltage etc.) and logaithmic decibel scale
"""
return 20*np.log10( mag )
def db2mag( dB ):
"""
Conversion between logaithmic decibel and linear scale
"""
return np.power( 10, np.divide( dB, 20 ) )
def pow2db( power ):
"""
Conversion between linear power (Watt etc.) and logaithmic decibel scale
"""
return 10*np.log10( power )
def db2pow( dB ):
"""
Conversion between logaithmic decibel and linear scale
"""
return np.power( 10, np.divide( dB, 10) )
def powerdB( x ):
"""
Calculate the average signal power in dBW.
"""
return pow2db(np.mean(np.power(np.abs(x), 2)))
def energy( x ):
"""
Calculate the signal energy.
"""
return np.sum(np.power(np.abs(x), 2))
def wgndB( x, dB ):
"""
Apply white Gaussian noise of specified power in dBW to signal.
"""
# Apply complex noise to comples signals. Half power noise to each component.
if np.iscomplexobj(x)==True:
wRe = np.random.normal(scale=np.sqrt(db2pow(dB)/2), size=np.shape(x))
wIm = np.random.normal(scale=np.sqrt(db2pow(dB)/2), size=np.shape(x))
w = wRe + 1j*wIm
else:
w = np.random.normal(scale=np.sqrt(db2pow(dB)), size=np.shape(x))
return np.add(x,w)
def wgnSnr( x, SNRdB ):
"""
Add noise to obtain an intendet SNR.
x is the input array.
SNRdB is the target SNR in dB
"""
power = powerdB(x)
noisePowerdB = power - SNRdB
return wgndB( x, noisePowerdB )
def periodogram(sig, Fs, nfft = 2048):
"""
Power spectral density
sig is the signal to be analyzed
Fs is the sampling frequency [Hz]
nfft is the length of the FFT
"""
if nfft < len(sig):
nfft = len(sig)
sig_f = np.fft.fft(sig,nfft)/nfft
# Shift and normalize
sig_f = np.power(np.abs(np.fft.fftshift(sig_f)), 2)
# Remove infinitesimally small components
sig_f = pow2db(np.maximum(sig_f, 1e-16))
# Generate frequency axis
f = np.linspace(-Fs/2, Fs/2, len(sig_f))
# Plot
plt.figure(figsize=(10, 3))
plt.plot(f, sig_f)
plt.xlim([-Fs/2, Fs/2])
plt.title("Power Spectral Density")
plt.ylabel("Power density [dBW/Hz]")
plt.xlabel("Frequency [Hz]")
plt.tight_layout()
def welch(x, Fs, nfft = 2048):
"""
Wrapper for welch spectral power estimate.
sig is the signal to be analyzed
Fs is the sampling frequency [Hz]
nfft is the length of the FFT
"""
f, Pxx_den = signal.welch(x, Fs, nperseg=nfft, return_onesided=False)
# Remove infinitesimally small components
sig_f = pow2db(np.maximum(Pxx_den, 1e-16))
# Plot
plt.figure(figsize=(10, 3))
plt.plot(f, sig_f)
plt.xlim([-Fs/2, Fs/2])
plt.title("Welch Power Spectral Density Estimate")
plt.ylabel("Power density [dBW/Hz]")
plt.xlabel("Frequency [Hz]")
plt.tight_layout()
return f, Pxx_den
def magnitudeSpectrum(sig, Fs, nfft = 2048, plot = False):
"""
Normalized magnitude spectrumPower spectral density
sig is the signal to be analyzed
Fs is the sampling frequency [Hz]
nfft is the length of the FFT. Is set to the singal length if the nfft<len(sig)
"""
if nfft < len(sig):
nfft = len(sig)
sig_f = np.fft.fft(sig,nfft)/nfft
# Shift and normalize
sig_f = np.abs(np.fft.fftshift(sig_f / abs(sig_f).max()))
# Generate frequency axis
f = np.linspace(-Fs/2, Fs/2, len(sig_f))
# Plot
if plot == True:
# Remove infinitesimally small components
sig_f = mag2db(np.maximum(sig_f, 1e-10))
plt.figure(figsize=(10, 3))
plt.plot(f, sig_f)
plt.xlim([-Fs/2, Fs/2])
plt.title("Frequency response")
plt.ylabel("Normalized magnitude [dB]")
plt.xlabel("Frequency [Hz]")
return f, sig_f
def indefIntegration( x_t, dt ):
"""
Indefinite-like numerical integration. Takes in a vector (function), returns the integrated vector(function).
"""
Sx_tdt = np.cumsum(x_t)*dt
return Sx_tdt | /rf-tool-0.0.17.tar.gz/rf-tool-0.0.17/rftool/utility.py | 0.776369 | 0.838151 | utility.py | pypi |
from scipy.stats import norm
import numpy as np
import scipy.constants as const
import rftool.utility as util
def Q( x ):
"""
The Q-function. (just a translation for readability).
"""
return norm.sf(x)
def errorProbabilityBpsk( EbN0 ):
"""
Probability of error in AWGN as a function of Eb/N0 for Binary Phase Shift Keying (BPSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return Q(np.sqrt( 2*EbN0 ))
def errorProbabilityQpsk( EbN0 ):
"""
Probability of error in AWGN as a function of Eb/N0 for Quadrature Phase Shift Keying (QPSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return errorProbabilityBpsk( EbN0 )
def errorProbabilityMPsk( EbN0, M ):
"""
Probability of error in AWGN as a function of Eb/N0 for M-Ary Phase Shift Keying (M-PSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return 2*Q( np.sqrt( 2*EbN0*np.log(M) )*np.sin(np.divide(const.pi, M)) ) # Technically "less than or equal"
def errorProbabilityFsk( EbN0 ):
"""
Probability of error in AWGN as a function of Eb/N0 for non-coherent Frequency Shift Keying (FSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return np.divide(1,2)*np.exp(-2*EbN0)
def errorProbabilityCoherentFsk( EbN0 ):
"""
Probability of error in AWGN as a function of Eb/N0 for coherent Frequency Shift Keying (FSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return Q(np.sqrt( EbN0 ))
def errorProbabilityCoherentMFsk( EbN0, M ):
"""
Probability of error in AWGN as a function of Eb/N0 for coherent M-ary Frequency Shift Keying (M-FSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return (1-M)*Q(np.sqrt( EbN0*np.log(M) )) # Technically "less than or equal"
def errorProbabilityGMSK( EbN0 ):
"""
Probability of error in AWGN as a function of Eb/N0 and the 3-dB bandwidth bit-dutation product, BT = 0.25 for Gaussian Minimum Shift Keying (GMSK).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
gamma = 0.68
return Q(np.sqrt(2*gamma*EbN0))
def errorProbabilityQam( EbN0 , M ):
"""
Probability of error in AWGN as a function of Eb/N0 with the minimum Eb and order M for Quadrature Amplitude Modulaion (QAM).
- T. S. Rappaport, Wireless Communications Principles and Practice, 2nd ed, Prentice Hall, 1995
"""
return 4*(1-np.divide(1, np.sqrt(M)))*Q( np.sqrt( 2*EbN0 ) )
def EbN0toSNRdB(EbN0, M, Fs, Fsymb):
"""
Calculte the necessary SNR in order to obtain a target Eb/N0
EbN0 is the intended ratio (scalar or vector)
M is the order of the modultion
Fs is the sample rate of the signal
Fsymb is the symbol rate of the signal (pulse rate)
"""
return util.pow2db(np.multiply(util.db2pow(EbN0), Fsymb*np.log2(M)/Fs)) | /rf-tool-0.0.17.tar.gz/rf-tool-0.0.17/rftool/communications.py | 0.680985 | 0.737584 | communications.py | pypi |
import scipy.signal as signal
import scipy.optimize as optimize
import scipy.integrate as integrate
import scipy.special as special
import scipy.ndimage as ndimage
import numpy as np
import numpy.polynomial.polynomial as poly
from mpl_toolkits.mplot3d import axes3d # 3D plot
import matplotlib.pyplot as plt
from pyhht.visualization import plot_imfs # Hilbert-Huang TF analysis
from pyhht import EMD # Hilbert-Huang TF analysis
import rftool.utility as util
import rftool.estimation as estimate
def Albersheim( Pfa, Pd, N ):
"""
Calculate required SNR for non-coherent integration over N pulses, by use of Albersheims equation.
Pd is the probability of detection (linear)
Pfa is the probability of false alarm (linear)
N is the number of non-coherently integrated pulses
Returns SNR in dB
Accurate within 0.2 dB for:
10^-7 < Pfa < 10^-3
0.1 < Pd < 0.9
1 <= N < 8096
- M. A. Richards and J. A. Scheer and W. A. Holm, Principles of Modern Radar, SciTech Publishing, 2010
"""
A = np.log(np.divide(0.62, Pfa))
B = np.log(np.divide(Pd,1-Pd))
SNRdB = -5*np.log10(N)+(6.2+np.divide(4.54, np.sqrt(N+0.44)))*np.log10(A+(0.12*A*B)+(0.7*B))
return SNRdB
def Shnidman( Pfa, Pd, N, SW ):
"""
Calculate required SNR for non-coherent integration over N pulses for swerling cases, by use of Shnidman equation.
Pd is the probability of detection (linear)
Pfa is the probability of false alarm (linear)
N is the number of non-coherently integrated pulses
SW is the swerling model: 0-4 (zero is non-swerling)
Returns SNR in dB
Accurate within 1 dB for:
10^-7 < Pfa < 10^-3
0.1 <= Pd =< 0.99*10^-9
1 <= N < 100
- M. A. Richards and J. A. Scheer and W. A. Holm, Principles of Modern Radar, SciTech Publishing, 2010
"""
C = 0
alpha = 0
if N < 40:
alpha = 0
elif N > 40:
alpha = np.divide(1,4)
eta = np.sqrt( -0.8*np.log(4*Pfa*(1-Pfa)) ) + np.sign(Pd-0.5)*np.sqrt( -0.8*np.log(4*Pd*(1-Pd)) )
X_inf = eta*( eta + 2*np.sqrt( np.divide(N,2)+(alpha-np.divide(1,4)) ) )
if SW==0: # Non swerling case
C = 1
else: # Swerling case
def K(SW):
if SW==1:
K = 1
elif SW==2:
K = N
elif SW==3:
2
else:
K = 2*N
C1 = np.divide( ( (17.7006*Pd-18.4496)*Pd+14.5339 )*Pd-3.525, K(SW) )
C2 = np.divide(1,K(SW))*( np.exp( 27.31*Pd-25.14 ) + (Pd-0.8)*( 0.7*np.log( np.divide(10e-5, Pfa) ) + np.divide( 2*N-20, 80 ) ) )
CdB = 0
if Pd < 0.8872:
CdB = C1
elif Pd > 0.99:
CdB = C1 + C2
C = np.power(10, np.divide(CdB,10) )
X1= np.divide(C*X_inf, N)
SNRdB = 10*np.log10(X1)
return SNRdB
def upconvert( sig, f_c, Fs=1 ):
"""
Upconvert baseband waveform to IF
f_c is the IF center frequency
Fs is the sample frequency
"""
a = np.linspace(0, sig.shape[0]-1, sig.shape[0])
# Angular increments per sample times sample number
phi_j = 2*np.pi*np.divide(f_c,Fs)*a
# Complex IF carrier
sig = np.multiply(sig, np.exp(1j*phi_j))
return sig
def ACF(x, Fs, plot = True, *args, **kwargs):
"""
Normalized autocorrelation Function of input x.
x is the signal being analyzed. If x is a matrix, the correlation is performed columnwise.
plot decides wether to plot the result.
label is the plot label for each vector. ['label1', 'label1']
The output is 2*len(x)-1 long. If the input is a matrix, the output is a matrix.
"""
plotLabel = kwargs.get('label', None)
# If x is a vector, ensure it is a column vector.
if x.ndim < 2:
x = np.expand_dims(x, axis=1)
# Iterate through columns
r_xx = np.empty( shape=[2*np.size(x,0)-1, np.size(x,1)], dtype=complex )
for n in range(0, np.size(x,1)):
#r_xx[:,n] = np.correlate(x[:,n], x[:,n], mode='full')
r_xx[:,n] = signal.correlate(x[:,n], x[:,n], method='fft')
if plot == True:
tau = np.linspace(-np.floor(len(r_xx)/2)/Fs, np.floor(len(r_xx)/2)/Fs, len(r_xx))
# Plot
fig, ax = plt.subplots()
for i, column in enumerate(r_xx.T):
# Normalize
column = np.absolute(column / abs(column).max())
ax.plot(tau, util.mag2db(column), label=plotLabel[i])
ax.ticklabel_format(useMathText=True, scilimits=(0,3))
plt.legend()
yMin = np.maximum(-100, np.min(r_xx))
ax.set_ylim([yMin, 0])
#plt.title("Autocorrelation Function")
ax.set_ylabel("Normalized Correlation [dB]")
ax.set_xlabel("$t$ [s]")
plt.tight_layout()
return r_xx
class chirp:
"""
Object for generating linear and non-linear chirp generation.
"""
t = None
T = None
def __init__( self, Fs=1 ):
"""
t_i is the chirp duration [s].
Fs is the intended sampling frequency [Hz]. Fs must be at last twice the highest frequency in the input PSD. If Fs < 2*max(f), then Fs = 2*max(f)
"""
self.Fs = Fs
self.dt = 1/self.Fs
def checkSampleRate(self):
"""
Check that the sample rate is within the nyquist criterion. I.e . that no phase difference between two consecutive samples exceeds pi.
"""
errorVecotr = np.abs(np.gradient(self.targetOmega_t)*2*np.pi)-np.pi
if 0 < np.max(errorVecotr):
print("Warning, sample rate too low. Maximum phase change is", np.pi+np.max(errorVecotr), "Maximum allowed is pi." )
def genFromPoly( self, direction = None ):
"""
Generate Non-Linear Frequency Modualted (NLFM) chirps based on a polynomial of arbitrary order.
direction controls the chirp direction. 'inverted' inverts the chirp direction.
"""
dt = 1/self.Fs # seconds
polyOmega = np.poly1d(self.c)
omega_t = polyOmega(self.t)
if direction == 'inverted':
omega_t = np.max(omega_t) - (omega_t-np.min(omega_t))
phi_t = util.indefIntegration( omega_t, dt )
sig = np.exp(np.multiply(1j*2*np.pi, phi_t))
return sig
def genNumerical( self, direction = None ):
"""
Generate Non.Linear Frequency Modualted (NLFM) chirps.
direction controls the chirp direction. 'inverted' inverts the chirp direction.
"""
dt = 1/self.Fs # seconds
omega_t = self.targetOmega_t
if direction == 'inverted':
omega_t = np.max(omega_t) - (omega_t-np.min(omega_t))
phi_t = util.indefIntegration( omega_t, dt )
sig = np.exp(np.multiply(1j*2*np.pi, phi_t))
return sig
def getInstFreq(self, poly=True, plot=True):
# Calculate the instantaneous frequency as a function of time
if poly == True:
# Calculate the instantaneous frequency based on polynoimial coefficients.
polyOmega = np.poly1d(self.c)
omega_t = polyOmega(self.t)
else:
# Calculate the instantaneous frequency based on phase vector.
omega_t = np.gradient(self.phi_t, self.t)
if plot == True:
plt.figure()
plt.plot(self.t, omega_t)
plt.plot(self.t, self.targetOmega_t)
plt.xlabel('t [s]')
plt.ylabel('f [Hz]')
plt.title("Instantaneous Frequency")
plt.show()
return omega_t
def getChirpRate(self, poly=True, plot=True):
# Calculate the chirp rate as a function of time
if poly == True:
# Calculate the chirp rate based on polynoimial coefficients.
gamma_t = 0
for n in range(2, len(self.c)):
polyOmega = np.poly1d(self.c)
polyGamma = np.polyder(polyOmega)
omega_t = polyOmega(self.t)
else:
# Calculate the chirp rate based on phase vector.
gamma_t = np.gradient(self.getInstFreq(plot=False), self.t)
if plot == True:
plt.figure()
plt.plot(self.t, gamma_t)
plt.xlabel('t [s]')
plt.ylabel('f [Hz]')
plt.title("Chirp Rate")
plt.show()
return gamma_t
def PSD( self, sig_t, plot=False ):
"""
Calculates Power Spectral Density in dBW/Hz.
"""
f, psd = signal.welch(sig_t, fs=self.Fs, nfft=self.fftLen, nperseg=self.fftLen, window = signal.blackmanharris(self.fftLen),
noverlap = self.fftLen/4, return_onesided=False)
if plot == True:
#f = np.linspace(-self.Fs/2, self.Fs/2, len(psd))
# Remove infinitesimally small components
#psd_dB = util.pow2db(np.maximum(psd, 1e-14))
psd_dB = util.pow2db(psd)
plt.plot(f, psd_dB)
plt.title("Welch's PSD Estimate")
plt.ylabel("dBW/Hz")
plt.xlabel("Frequency [Hz]")
plt.show()
return psd
def W( self, omega ):
"""
Lookup table for the W function. Takes instantaneous frequency as input.
"""
delta_omega_W = self.omega_W[1]-self.omega_W[0]
W_omega = np.empty((len(omega)))
for i in range(0, len(omega)):
index = np.intc(omega[i]/delta_omega_W)+np.intc(len(self.window)/2)
if index<0:
index = 0
elif len(self.window)-1<index:
index = len(self.window)-1
W_omega[i] = self.window[index]
return W_omega
def gamma_t_objective( self, scale ):
"""
Objective function for finding gamma_t that meets the constraints.
scale scales the gamma function.
"""
self.iterationCount = self.iterationCount +1
if self.iterationCount % 10 == 0:
print("Iteration",self.iterationCount)
# Calculate gamma_t for this iteration
self.gamma_t = self.gamma_t_initial*scale
# omega_t = integrate.cumtrapz(self.gamma_t, x =self.t ) # resulthas one less cell
omega_t = np.cumsum(self.gamma_t)*self.dt # Ghetto integral
# Place center frequency at omega_0
omega_t = omega_t + (self.omega_0 - omega_t[np.intc(len(omega_t)/2)])
# Scale W function to enclose omega_t
self.omega_W = np.linspace(omega_t[0]-self.omega_0, omega_t[-1]-self.omega_0, len(self.window))
# Calculate NLFM gamma function
self.gamma_t = self.gamma_t[np.intc(len(self.gamma_t)/2)]/self.W(omega_t-self.omega_0)
self.targetOmega_t = util.indefIntegration(self.gamma_t, self.dt)
self.targetOmega_t = self.targetOmega_t + (self.omega_0 - self.targetOmega_t[np.intc(len(self.targetOmega_t)/2)])
OmegaIteration = np.trapz(self.gamma_t, dx=self.dt)
cost = np.abs(self.Omega - OmegaIteration)
return cost
def getCoefficients( self, window, T=1e-3, targetBw=10e3, centerFreq=20e3, order=48):
"""
Calculate the necessary coefficients in order to generate a NLFM chirp with a specific magnitude envelope (in frequency domain). Chirp generated using rftool.radar.generate().
Coefficients are found through non-linear optimization.
Window_ is the window function for the target PSD. It is used as a LUT based function from -Omega/2 to Omega/2, where Omega=targetBw.
T is the pulse duration [s].
targetBw is the taget bandwidth of the chirp [Hz].
centerFreq is the center frequency of the chirp [Hz].
order is the oder of the phase polynomial used to generate the chirp frequency characteristics [integer].
pints is the number of points used t evaluate the chirp function. Not to be confused with the number of samples in the genrerated IF chirp.
"""
self.Omega = targetBw
self.window = np.maximum(window, 1e-8)
self.omega_0 = centerFreq
self.T = T
self.points = np.intc(self.Fs*T)
self.t = np.linspace(-self.T/2, self.T/2, self.points)
self.dt = self.T/(self.points-1)
# optimization routine
# Count iterations
self.iterationCount = 0
# Calculate LFM chirp rate (initial gamma_t)
self.gamma_t_initial = np.full(self.points, self.Omega/self.T)
# Initial scaling
p0=np.array([1])
# Optimize gamma_t curve with window
print("Initiating chirp instantaneous frequency optimization.")
chirpOpt = optimize.minimize(self.gamma_t_objective, p0, method='L-BFGS-B')
# optimization routine
# Count iterations
self.iterationCount = 0
# TODO: Remove if functioning properly
"""# Order and initial conditions
c0 = np.zeros( order )"""
# Resample time series to improve the fitting result.
omegaFit = signal.decimate(self.targetOmega_t, 16, ftype='iir', zero_phase=True)
timeFit = np.linspace(-self.T/2, self.T/2, len(omegaFit))
self.c = np.polyfit(timeFit, omegaFit, order)
return self.c
def modulate( self, bitstream=np.array([1,0,1,0])):
"""
Modulate bit stream to a chirp. One chirp per bit. A 1 is represented as a forward time chirp.
A zero is represented as a time-reversed chirp.
bitStream is the bitstream to be modulated (numpy array).
"""
# Generate t and T if it doesn't exist
if self.T is None:
self.T = (self.points-1)*self.dt
if self.t is None:
self.t = np.linspace(-self.T/2, self.T/2, self.points)
# Calculate length of signal
sigLen = len(bitstream)*self.points
# generate frame
waveform = np.empty([sigLen], dtype=complex)
sig = self.genNumerical()
sigInv = self.genNumerical('inverted')
# Iterate through bitstream and add to waveform
for m, bit in enumerate(bitstream):
if bit==1:
waveform[m*self.points:(m+1)*self.points] = sig
elif bit==0:
waveform[m*self.points:(m+1)*self.points] = sigInv
return waveform | /rf-tool-0.0.17.tar.gz/rf-tool-0.0.17/rftool/radar.py | 0.755005 | 0.581957 | radar.py | pypi |
import numpy as np
import scipy.optimize as optimize
def chamberImpedance( x ):
"""
Dimension calculations for an open TEM cell.
S. M. Satav et al., Do-it-Yourself Fabrication of an Open TEM Cell for EMC Pre-compliance, Indian Institute of Technology-Bombay, 2008
Taking in a parameter vector for the different dimensions.
Chamber below, with ceptum in the center.
_______
/_______\
\_______/
"""
d = x[0] # Height from center conductor to top [m]. Entire chamber is 2d high.
W = x[1] # Width of the center conductor (septum) [m].
L = x[2] # Length of the test area [m].
# Assuming air in the chamber
e_r = 1 # Permittivity in the chaimber (Air)
targetImpedance = 50 #Ohm
t = 1.6e-3 #the thickness of the center conductor (ceptum).
C_f = L*0.053e-10 # Fringing capacitance per unit length [F/m] (0.053 pF/cm). Unshure what is ment by the unit length.
#Assuming length of the straight (non-tapered) portionn of the chamber.
Z0=np.divide(94.15,np.sqrt(e_r)*( np.divide(W,2*d*(1-np.divide(t,2*d))) + np.divide(C_f, 0.0885*e_r) ))
cost = np.power(np.abs(targetImpedance-Z0), 2)
return cost
def chamberDimensions( minHeight=10e-2, minWidth=30e-2, minLength=30e-2 ):
"""
Optimization routine for solving the physical dimensions for the chamber which satisifies a 50 ohm impedance.
minHeight is the height of the intended test object.
minWidth is the x and y dimention of the intended test area.
"""
x0 = np.array([minHeight, minWidth, minLength])
"""
d = x[0] # Height from center conductor to top [m]. Entire chamber is 2d high.
W = x[1] # Width of the center conductor (septum) [m].
L = x[2] # Length of the test area [m].
"""
bnds = ((minHeight, None), (minWidth, None), (minLength, None))
res = optimize.minimize( chamberImpedance, x0, bounds=bnds, method='L-BFGS-B' )
x = res.x #Optimized parameters
print("Test area height, d =", x[0])
print("Septum Width (test area width), W =", x[1])
print("Test area Length, L =", x[2]) | /rf-tool-0.0.17.tar.gz/rf-tool-0.0.17/rftool/temcell.py | 0.594787 | 0.703588 | temcell.py | pypi |
try:
from typing import Any, Dict, List, Optional, Protocol, Union
except ImportError:
from typing import Dict, List, Optional, Union
from typing_extensions import Protocol
class Layer(Protocol):
"""This Protocol describes a simple information container for nerwork layers.
Args:
name: name of the layer
kernel_size: the kernel size of convolution operation.
Non convolutional layers are treated as having
an infinite kernel size.
stride_size: The stride size of the convolution operation.
Fully connected layers are treated as having
a stride-size of 1.
filters: The number of filters in the layer,
only set if convolutional
units: The units of a fully connected layers, only
set if kernel size is infinite.
"""
name: str
kernel_size: Optional[int]
stride_size: int
filters: Optional[int]
units: Optional[int]
@staticmethod
def from_dict(**config) -> "Layer":
"""Create a layer object from the dictionary.
Args:
**config: keyword arguments for the constructor.
Returns:
A layer instance.
"""
...
def to_dict(self) -> Dict[str, Union[int, str]]:
"""Create a json-serializable dictionary from which
the object can be reconstructed.
Returns:
A dictionary from which the layer can be reconstructed.
"""
...
class Node(Protocol):
"""This instances is the core component the a graph is
constructed from.
Args:
name: the name of the node
layer_type: the layer information container
predecessor_list: the list of predecessor nodes
"""
name: str
layer_type: Layer
predecessor_list: List["Node"]
def is_in(self, container: Union[List["Node"], Dict["Node", Any]]) -> bool:
"""Checks if this particular node is in a container based on the object-id.
Args:
container: the container to search through.
The search is non-recursive and thus
does not look into nested containers.
Returns:
True this object is in the container, else False.
"""
... | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/domain.py | 0.934275 | 0.632446 | domain.py | pypi |
from typing import Sequence, Union
import graphviz
import numpy as np
from rfa_toolbox.graphs import EnrichedNetworkNode
def node_id(node: EnrichedNetworkNode) -> str:
"""Provide a unique string for each node based on its name and object id.
This makes the node-id human readable while also easy to process since it contains
human interpretable elements while also being unique.
Args:
node: the EnrichedNetworkNode-instance the unique id shall be obtained
Returns:
A unique node id as a string of the following format ${node.name}-${id(node}
"""
return f"{node.name}-{id(node)}"
def _feature_map_size_label(feature_map_size: Union[int, Sequence[int]]) -> str:
if not isinstance(feature_map_size, Sequence) and not isinstance(
feature_map_size, np.ndarray
):
return (
f"\\nFeature Map Res.: {max(feature_map_size, 1)} "
f"x {max(feature_map_size, 1)}"
)
else:
fm = np.asarray(feature_map_size)
fm[fm < 1] = 1
return "\\nFeature Map Res.: " f"{' x '.join(fm.astype(int).astype(str))}"
def visualize_node(
node: EnrichedNetworkNode,
dot: graphviz.Digraph,
input_res: int,
color_border: bool,
color_critical: bool,
include_rf_info: bool = True,
filter_kernel_size_1: bool = False,
include_fm_info: bool = True,
) -> None:
"""Create a node in a graphviz-graph based on an EnrichedNetworkNode instance.
Also creates all edges that lead from predecessor nodes to this node.
Args:
node: The node in question
dot: The graphviz-graph
input_res: The input resolution of the model - required for
coloring critical and border layers
color_border: The color used for marking border layer
color_critical: The color used for marking critical layers
include_rf_info: If True the receptive field information is
included in the node description
Returns:
Nothing.
"""
color = "white"
if (
node.is_border(
input_resolution=input_res, filter_kernel_size_1=filter_kernel_size_1
)
and color_border
):
color = "red"
elif (
np.all(np.asarray(node.receptive_field_min) > np.asarray(input_res))
and color_critical
and not node.is_border(
input_resolution=input_res, filter_kernel_size_1=filter_kernel_size_1
)
):
color = "orange"
elif (
np.any(np.asarray(node.receptive_field_min) > np.asarray(input_res))
and color_critical
and not node.is_border(
input_resolution=input_res, filter_kernel_size_1=filter_kernel_size_1
)
):
color = "yellow"
l_name = node.layer_info.name
rf_info = (
"\\n" + f"r(min)={node.receptive_field_min}, r(max)={node.receptive_field_max}"
)
filters = f"\\n{node.layer_info.filters} filters"
units = f"\\n{node.layer_info.units} units"
feature_map_size = (
_feature_map_size_label(
np.asarray(input_res) // np.asarray(node.get_maximum_scale_factor())
)
if node.kernel_size != np.inf
else ""
)
label = l_name
if node.layer_info.filters is not None:
label += filters
elif node.layer_info.units is not None:
label += units
if include_rf_info:
label += rf_info
if include_fm_info:
label += feature_map_size
dot.node(
f"{node.name}-{id(node)}",
label=label,
fillcolor=color,
style="filled",
)
for pred in node.predecessors:
dot.edge(node_id(pred), node_id(node), label="")
def visualize_architecture(
output_node: EnrichedNetworkNode,
model_name: str,
input_res: int = 224,
color_critical: bool = True,
color_border: bool = True,
include_rf_info: bool = True,
filter_kernel_size_1: bool = False,
include_fm_info: bool = True,
) -> graphviz.Digraph:
"""Visualize an architecture using graphviz
and mark critical and border layers in the graph visualization.
Args:
output_node: an EnrichedNetworkNode-instance that belong to the
network graph to visualize. This function can handle
architectures with arbitrary many output
and one input node.
model_name: the name of the model
input_res: the input resolution (used for determining
critical and border layers)
color_critical: if True the critical layers are colored orange, True by default.
color_border: if True the border layers are colored red, True by default.
include_rf_info: if True the receptive field information is included in the node
description
Returns:
A graphviz.Digraph object that can visualize the network architecture.
"""
f = graphviz.Digraph(model_name, filename=".gv")
f.attr(rankdir="TB")
f.attr("node", shape="rectangle")
all_nodes = output_node.all_layers
for node in all_nodes:
visualize_node(
node,
dot=f,
input_res=input_res,
color_border=color_border,
color_critical=color_critical,
include_rf_info=include_rf_info,
filter_kernel_size_1=filter_kernel_size_1,
include_fm_info=include_fm_info,
)
return f | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/vizualize.py | 0.942639 | 0.588475 | vizualize.py | pypi |
from typing import Callable, List
from rfa_toolbox.graphs import EnrichedNetworkNode, LayerDefinition
def conv_batch_norm_relu(
predecessor: EnrichedNetworkNode, idx: str, strides: int = 1
) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-Conv3x3-BatchNorm-ReLU",
layer_info=LayerDefinition(
name="Conv3x3-BatchNorm-ReLU", kernel_size=3, stride_size=strides
),
predecessors=[predecessor],
)
def conv_batch_norm_relu_squeeze(
predecessor: EnrichedNetworkNode, idx: str, strides: int = 1
) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-Conv1x1-BatchNorm-ReLU",
layer_info=LayerDefinition(
name="Conv1x1-BatchNorm-ReLU", kernel_size=1, stride_size=strides
),
predecessors=[predecessor],
)
def addition(predecessor: List[EnrichedNetworkNode], idx: str) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-Addition",
layer_info=LayerDefinition(name="Addition", kernel_size=1, stride_size=1),
predecessors=predecessor,
)
def skip_downsample(predecessor: EnrichedNetworkNode, idx: str) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-SkipDownsample",
layer_info=LayerDefinition(
name="1x1Conv-Projection", kernel_size=1, stride_size=2
),
predecessors=[predecessor],
)
def stem() -> EnrichedNetworkNode:
conv = EnrichedNetworkNode(
name="StemConv",
layer_info=LayerDefinition(name="Conv7x7", kernel_size=7, stride_size=2),
predecessors=[],
)
pool = EnrichedNetworkNode(
name="StemPool",
layer_info=LayerDefinition(name="MaxPool3x3", kernel_size=3, stride_size=2),
predecessors=[conv],
)
return pool
def small_stem() -> EnrichedNetworkNode:
conv = EnrichedNetworkNode(
name="StemConv",
layer_info=LayerDefinition(name="Conv3x3", kernel_size=3, stride_size=1),
predecessors=[],
)
return conv
def mediun_stem() -> EnrichedNetworkNode:
conv = EnrichedNetworkNode(
name="StemConv",
layer_info=LayerDefinition(name="Conv3x3", kernel_size=3, stride_size=2),
predecessors=[],
)
return conv
def residual_block(
input_node: EnrichedNetworkNode, idx: int, i: int, strides: int = 1
) -> EnrichedNetworkNode:
conv1 = conv_batch_norm_relu(input_node, f"Stage{idx}-Block{i}-{0}", strides)
conv2 = conv_batch_norm_relu(conv1, f"Stage{idx}-Block{i}-{1}", 1)
residual = (
input_node
if strides == 1
else skip_downsample(predecessor=input_node, idx=f"BlockSkip{idx}-")
)
add = addition([residual, conv2], f"Stage{idx}-Block{i}")
return add
def bottleneck(
input_node: EnrichedNetworkNode, idx: int, i: int, strides: int = 1
) -> EnrichedNetworkNode:
conv1 = conv_batch_norm_relu_squeeze(
input_node, f"Stage{idx}-Block{i}-{0}", strides
)
print(f"\tStage{idx}-Block{i}-{0}")
conv2 = conv_batch_norm_relu(conv1, f"Stage{idx}-Block{i}-{1}", 1)
print(f"\tStage{idx}-Block{i}-{1}")
conv3 = conv_batch_norm_relu_squeeze(conv2, f"Stage{idx}-Block{i}-{2}", 1)
print(f"\tStage{idx}-Block{i}-{2}")
residual = (
input_node
if strides == 1
else skip_downsample(predecessor=input_node, idx=f"BlockSkip{idx}-")
)
add = addition([residual, conv3], f"Stage{idx}-Block{i}")
print(f"\tStage{idx}-Block{i}-Add")
return add
def head(feature_extractor: EnrichedNetworkNode) -> EnrichedNetworkNode:
readout = EnrichedNetworkNode(
name="GlobalAveragePooling",
layer_info=LayerDefinition(
name="Global Average Pooling", kernel_size=1, stride_size=None
),
predecessors=[feature_extractor],
)
print("GAP")
softmax = EnrichedNetworkNode(
name="Softmax",
layer_info=LayerDefinition(
name="DenseLayer", kernel_size=None, stride_size=None
),
predecessors=[readout],
)
print("Softmax")
return softmax
def stage(
predecessor: EnrichedNetworkNode,
idx: int,
num_blocks: int,
strides: int,
block: Callable[[EnrichedNetworkNode, int, int, int], EnrichedNetworkNode],
) -> EnrichedNetworkNode:
current_block = block(predecessor, idx, 0, strides)
for i in range(1, num_blocks):
print("Bulding stage", idx, "block", i)
current_block = block(current_block, idx, i, 1)
return current_block
def resnet(
stage_config: List[int],
block: Callable[[EnrichedNetworkNode, int, int], EnrichedNetworkNode],
stem_factory=stem,
) -> EnrichedNetworkNode:
stem_out = stem_factory()
c_block = stem_out
for i, c_stage in enumerate(stage_config):
c_block = stage(
predecessor=c_block,
idx=i,
num_blocks=c_stage,
strides=1 if i == 0 else 2,
block=block,
)
output = head(c_block)
return output
def resnet18() -> EnrichedNetworkNode:
return resnet(stage_config=[2, 2, 2, 2], block=residual_block)
def resnet36() -> EnrichedNetworkNode:
return resnet(stage_config=[3, 4, 6, 3], block=residual_block)
def resnet50() -> EnrichedNetworkNode:
return resnet(stage_config=[3, 4, 6, 3], block=bottleneck)
def resnet101() -> EnrichedNetworkNode:
return resnet(stage_config=[3, 4, 23, 3], block=bottleneck)
def resnet152() -> EnrichedNetworkNode:
return resnet(stage_config=[3, 8, 36, 3], block=bottleneck) | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/architectures/resnet.py | 0.879289 | 0.539954 | resnet.py | pypi |
from typing import Optional
from rfa_toolbox.graphs import EnrichedNetworkNode, LayerDefinition
def conv_batch_norm_relu(
predecessor: EnrichedNetworkNode, idx: int, filters: Optional[int] = None
) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-Conv3x3-BatchNorm-ReLU",
layer_info=LayerDefinition(
name="Conv3x3-BatchNorm-ReLU", kernel_size=3, stride_size=1, filters=filters
),
predecessors=[predecessor],
)
def max_pooling(predecessor: EnrichedNetworkNode, idx: int) -> EnrichedNetworkNode:
return EnrichedNetworkNode(
name=f"{idx}-MaxPooling",
layer_info=LayerDefinition(name="MaxPooling", kernel_size=2, stride_size=2),
predecessors=[predecessor],
)
def head(pool: EnrichedNetworkNode) -> EnrichedNetworkNode:
readout = EnrichedNetworkNode(
name="Readout",
layer_info=LayerDefinition(
name="DenseLayer", kernel_size=None, stride_size=None
),
predecessors=[pool],
)
dense = EnrichedNetworkNode(
name="Dense",
layer_info=LayerDefinition(
name="DenseLayer", kernel_size=None, stride_size=None
),
predecessors=[readout],
)
softmax = EnrichedNetworkNode(
name="Softmax",
layer_info=LayerDefinition(
name="DenseLayer", kernel_size=None, stride_size=None
),
predecessors=[dense],
)
return softmax
def vgg11() -> EnrichedNetworkNode:
input_node = EnrichedNetworkNode(
name="input",
layer_info=LayerDefinition(name="Input", kernel_size=1, stride_size=1),
predecessors=[],
)
# stage 1
conv = conv_batch_norm_relu(input_node, 0)
pool = max_pooling(conv, 0)
# stage 2
conv = conv_batch_norm_relu(pool, 1)
pool = max_pooling(conv, 1)
# stage 3
conv = conv_batch_norm_relu(pool, 2)
conv = conv_batch_norm_relu(conv, 3)
pool = max_pooling(conv, 2)
# stage 4
conv = conv_batch_norm_relu(pool, 4)
conv = conv_batch_norm_relu(conv, 5)
pool = max_pooling(conv, 3)
# stage 5
conv = conv_batch_norm_relu(pool, 6)
conv = conv_batch_norm_relu(conv, 7)
pool = max_pooling(conv, 4)
# head
output = head(pool)
return output
def vgg13() -> EnrichedNetworkNode:
input_node = EnrichedNetworkNode(
name="input",
layer_info=LayerDefinition(name="Input", kernel_size=1, stride_size=1),
predecessors=[],
)
# stage 1
conv = conv_batch_norm_relu(input_node, 0)
conv = conv_batch_norm_relu(conv, 1)
pool = max_pooling(conv, 0)
# stage 2
conv = conv_batch_norm_relu(pool, 2)
conv = conv_batch_norm_relu(conv, 3)
pool = max_pooling(conv, 1)
# stage 3
conv = conv_batch_norm_relu(pool, 4)
conv = conv_batch_norm_relu(conv, 5)
pool = max_pooling(conv, 2)
# stage 4
conv = conv_batch_norm_relu(pool, 6)
conv = conv_batch_norm_relu(conv, 7)
pool = max_pooling(conv, 3)
# stage 5
conv = conv_batch_norm_relu(pool, 8)
conv = conv_batch_norm_relu(conv, 9)
pool = max_pooling(conv, 4)
# head
output = head(pool)
return output
def vgg16() -> EnrichedNetworkNode:
input_node = EnrichedNetworkNode(
name="input",
layer_info=LayerDefinition(name="Input", kernel_size=1, stride_size=1),
predecessors=[],
)
# stage 1
conv = conv_batch_norm_relu(input_node, 0)
conv = conv_batch_norm_relu(conv, 1)
pool = max_pooling(conv, 0)
# stage 2
conv = conv_batch_norm_relu(pool, 2)
conv = conv_batch_norm_relu(conv, 3)
pool = max_pooling(conv, 1)
# stage 3
conv = conv_batch_norm_relu(pool, 4)
conv = conv_batch_norm_relu(conv, 5)
conv = conv_batch_norm_relu(conv, 6)
pool = max_pooling(conv, 2)
# stage 4
conv = conv_batch_norm_relu(pool, 7)
conv = conv_batch_norm_relu(conv, 8)
conv = conv_batch_norm_relu(conv, 9)
pool = max_pooling(conv, 3)
# stage 5
conv = conv_batch_norm_relu(pool, 10)
conv = conv_batch_norm_relu(conv, 11)
conv = conv_batch_norm_relu(conv, 12)
pool = max_pooling(conv, 4)
# head
output = head(pool)
return output
def vgg19() -> EnrichedNetworkNode:
input_node = EnrichedNetworkNode(
name="input",
layer_info=LayerDefinition(name="Input", kernel_size=1, stride_size=1),
predecessors=[],
)
# stage 1
conv = conv_batch_norm_relu(input_node, 0)
conv = conv_batch_norm_relu(conv, 1)
pool = max_pooling(conv, 0)
# stage 2
conv = conv_batch_norm_relu(pool, 2)
conv = conv_batch_norm_relu(conv, 3)
pool = max_pooling(conv, 1)
# stage 3
conv = conv_batch_norm_relu(pool, 4)
conv = conv_batch_norm_relu(conv, 5)
conv = conv_batch_norm_relu(conv, 6)
conv = conv_batch_norm_relu(conv, 7)
pool = max_pooling(conv, 2)
# stage 4
conv = conv_batch_norm_relu(pool, 8)
conv = conv_batch_norm_relu(conv, 9)
conv = conv_batch_norm_relu(conv, 10)
conv = conv_batch_norm_relu(conv, 11)
pool = max_pooling(conv, 3)
# stage 5
conv = conv_batch_norm_relu(pool, 12)
conv = conv_batch_norm_relu(conv, 13)
conv = conv_batch_norm_relu(conv, 14)
conv = conv_batch_norm_relu(conv, 15)
pool = max_pooling(conv, 4)
# head
output = head(pool)
return output | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/architectures/vgg.py | 0.899193 | 0.563198 | vgg.py | pypi |
from typing import List, Sequence, Tuple, Union
import numpy as np
from rfa_toolbox.graphs import EnrichedNetworkNode
def obtain_all_nodes(output_node: EnrichedNetworkNode) -> List[EnrichedNetworkNode]:
"""Fetch all nodes from a single node of the compute graph.
Args:
output_node: output node of the graph
Returns:
A List containing all EnrichedNetworkNodes.
"""
return output_node.all_layers
def obtain_border_layers(
output_node: EnrichedNetworkNode, input_resolution: int, filter_dense: bool = True
) -> List[EnrichedNetworkNode]:
"""Obtain all border layers.
Args:
output_node: a node of the compute graph
input_resolution: the input resolution for which the
border layer should be computed
filter_dense: exclude all layers with infinite receptive field size
(essentially all layers that are fully connected
or successors of fully connected layers)
This is True by default.
Returns:
All layers predicted to be unproductive.
"""
all_nodes = obtain_all_nodes(output_node)
result = [node for node in all_nodes if node.is_border(input_resolution)]
return filters_non_convolutional_node(result) if filter_dense else result
def obtain_all_critical_layers(
output_node: EnrichedNetworkNode, input_resolution: int, filter_dense: bool = True
) -> List[EnrichedNetworkNode]:
"""Obtain all critical layers.
A layer is defined as critical if it has a receptive field size LARGER
than the input resolution. Critical layers have substantial
probability of being unproductive.
Args:
output_node: a node of the compute graph
input_resolution: the input resolution for which the critical
layers shall be computed
filter_dense: exclude all layers with infinite receptive field size
(essentially all layers that are
fully connected or successors of fully connected layers)
This is True by default.
Returns:
All layers predicted to be critical.
"""
all_nodes = obtain_all_nodes(output_node)
result = [node for node in all_nodes if node.receptive_field_min > input_resolution]
return filters_non_convolutional_node(result) if filter_dense else result
def filters_non_convolutional_node(
nodes: List[EnrichedNetworkNode],
) -> List[EnrichedNetworkNode]:
"""Filter all components that are not part of the feature extractor.
Args:
nodes: the list of noodes that shall be filtered.
Returns:
A list of all layers that are part of the feature extractor.
This is decided by the kernel size, which is non-infinite
for layers that are part of the feature extractor.
Please note that layers like Dropout, BatchNormalization,
which are agnostic towards the input shape,
are treated like a convolutional layer with a kernel
and stride size of 1.
"""
result = []
for node in nodes:
if isinstance(node.receptive_field_min, Sequence) or isinstance(
node.receptive_field_min, tuple
):
if not np.any(np.isinf(node.receptive_field_min)):
result.append(node)
elif node.receptive_field_min != np.inf:
result.append(node)
return result
def filters_non_infinite_rf_sizes(
rf_sizes: List[Union[Tuple[int, ...], int]],
) -> List[Union[Tuple[int, ...], int]]:
"""Filter all components that are not part of the feature extractor.
Args:
nodes: the list of noodes that shall be filtered.
Returns:
A list of all layers that are part of the feature extractor.
This is decided by the kernel size, which is non-infinite
for layers that are part of the feature extractor.
Please note that layers like Dropout, BatchNormalization,
which are agnostic towards the input shape,
are treated like a convolutional layer with a kernel
and stride size of 1.
"""
result = []
for rf_size in rf_sizes:
if isinstance(rf_size, Sequence) or isinstance(rf_size, tuple):
if not np.any(np.isinf(rf_size)):
result.append(rf_size)
elif rf_size != np.inf:
result.append(rf_size)
return result
def filter_layers_not_expanding_receptive_field(
nodes: List[EnrichedNetworkNode],
) -> List[EnrichedNetworkNode]:
"""Filter all layers with a kernel size greater than 1.
In essence, all layers that expand the receptive field.
"""
result = []
for node in nodes:
k_size = np.asarray(node.kernel_size)
if np.all(np.isinf(k_size)) or np.all(k_size == 1):
continue
result.append(node)
return result
def _find_highest_cardinality(arrays: Union[int, Sequence, np.ndarray, Tuple]) -> int:
"""Find the highest cardinality of the given array.
Args:
arrays: a list of arrays or a single array
Returns:
The highest cardinality of the given array.
"""
return max([len(array) for array in arrays if hasattr(array, "__len__")] + [1])
def _func_rf_for_dim(
cardinality: int,
prev_rf: List[Union[int, Sequence[int]]],
func=min,
default: int = 0,
) -> int:
"""Find the minimum receptive field size for the given dim.
Args:
cardinality: the cardinality of the receptive field
prev_rf: the previous receptive field sizes
Returns:
The minimum receptive field size for the given cardinality.
"""
result: List[int] = []
for rf in prev_rf:
result.append(rf[cardinality]) if isinstance(rf, Sequence) or isinstance(
rf, np.ndarray
) else result.append(rf)
return func(result) if result else default
def unproductive_resolution(node: EnrichedNetworkNode) -> Union[int, Tuple[int, ...]]:
"""Obtain the resolution that is unproductive.
A layer is unproductive if it has a receptive field size
that is smaller than the input resolution.
"""
predecessors = node.predecessors
prev_rf = [p.receptive_field_min for p in predecessors]
cardinality = max(_find_highest_cardinality(prev_rf), 1)
result = []
for i in range(cardinality):
result.append(_func_rf_for_dim(i, prev_rf))
return result[0] if cardinality == 1 else tuple(result)
def find_smallest_resolution_with_no_unproductive_layer(
graph: EnrichedNetworkNode,
) -> Tuple[int, ...]:
"""Find the smallest resolution for which no layer is unproductive.
This is the case if no layer with a kernel size greater than
1 has predecessors with a receptive
field size larger than the input resolution.
"""
all_nodes = obtain_all_nodes(graph)
expanding_nodes = filter_layers_not_expanding_receptive_field(all_nodes)
unproductive_resolutions = [
unproductive_resolution(node) for node in expanding_nodes
]
cardinality = min(_find_highest_cardinality(unproductive_resolutions), 1)
result = []
for i in range(cardinality):
result.append(_func_rf_for_dim(i, unproductive_resolutions, max))
return tuple(result)
def input_resolution_range(
graph: EnrichedNetworkNode,
filter_all_inf_rf: bool = True,
filter_kernel_size_1: bool = False,
cardinality: int = 2,
lower_bound: bool = False,
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""Obtain the smallest and largest feasible input resolution.
The smallest feasible input resolution is defined as the input smallest input
resolution with no unproductive convolutional layers.
The largest feasible input resolution is defined as the input
resolution with at least one convolutional layer with a maximum
receptive field large enough to grasp the entire image.
These can be considered upper and lower bound for potential input resolutions.
Everything smaller than the provided input resolution will result
in unproductive layers, any resolution larger than the large feasible
input resolution will result in potential patterns being undetectable due to
a to small receptive field size.
Args:
graph: The neural network
filter_all_inf_rf: filters ALL infinite receptive field sizes before
computing the result, this may come in handy
if you want to ignore the influence
of attention mechanisms like SE-modules,
which technically adds a global context to the image
(increasing the maximum receptive field
size to infinity in the process).
However this can be somewhat
misleading, since these types of modules are not realy
build to extract features from the image.
This functionality is disabled by default.
cardinality: The tensor shape, which is 2D by default.
lower_bound: Disabled by default. If disabled, returns the lowest
resolution which utilizes the entire
network receptive field expansion.
If enabled it returns the lowest resolution exspected
to yield no unproductive, weighted layers.
Returns:
Smallest and largest feasible input resolution.
"""
all_nodes = obtain_all_nodes(graph)
all_nodes = filters_non_convolutional_node(all_nodes)
kerneL_rf_min = [
np.asarray(node.kernel_size) * np.asarray(node.get_maximum_scale_factor())
for node in all_nodes
]
if filter_kernel_size_1:
all_nodes = [node for node in all_nodes if node.kernel_size > 1]
if not filter_all_inf_rf:
rf_min = [xi.receptive_field_min for x in all_nodes for xi in x.predecessors]
rf_max = [x.receptive_field_max for x in all_nodes]
else:
rf_min = [
x._apply_function_on_receptive_field_sizes(
lambda x: min(filters_non_infinite_rf_sizes(x), default=0)
)
for x in all_nodes
]
rf_max = [
x._apply_function_on_receptive_field_sizes(
lambda x: max(filters_non_infinite_rf_sizes(x), default=0)
)
for x in all_nodes
]
rf_min = rf_min + kerneL_rf_min
def find_max(
rf: List[Union[Tuple[int, ...], int]],
axis: int = 0,
) -> int:
"""Find the maximum value of a list of tuples or integers.
Args:
rf: a list of tuples or integers
axis: the axis along which the maximum shall be found
Returns:
The maximum value of the list.
"""
rf_no_tuples = {
x[axis] if isinstance(x, Sequence) or isinstance(x, np.ndarray) else x
for x in rf
}
return max(rf_no_tuples)
r_max = tuple(find_max(rf_max, i) for i in range(cardinality))
if lower_bound:
res = find_smallest_resolution_with_no_unproductive_layer(graph)
kernel_r_min = tuple(find_max(kerneL_rf_min, i) for i in range(cardinality))
rf_r_min = tuple(res[i] if len(res) < i else res[0] for i in range(cardinality))
r_min = tuple(max(x, y) for x, y in zip(rf_r_min, kernel_r_min))
else:
r_min = tuple(find_max(rf_min, i) for i in range(cardinality))
return r_min, r_max | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/utils/graph_utils.py | 0.960091 | 0.771198 | graph_utils.py | pypi |
import warnings
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from attr import attrib, attrs
from graphviz import Digraph as GraphVizDigraph
from rfa_toolbox.encodings.pytorch.domain import LayerInfoHandler, NodeSubstitutor
from rfa_toolbox.encodings.pytorch.layer_handlers import (
AnyAdaptivePool,
AnyConv,
AnyHandler,
AnyPool,
ConvNormActivationHandler,
FlattenHandler,
FunctionalKernelHandler,
LinearHandler,
SqueezeExcitationHandler,
)
from rfa_toolbox.encodings.pytorch.substitutors import (
input_substitutor,
numeric_substitutor,
output_substitutor,
)
from rfa_toolbox.graphs import (
KNOWN_FILTER_MAPPING,
EnrichedNetworkNode,
LayerDefinition,
ReceptiveFieldInfo,
)
RESOLVING_STRATEGY = [
ConvNormActivationHandler(),
SqueezeExcitationHandler(),
AnyConv(),
AnyPool(),
AnyAdaptivePool(),
FlattenHandler(),
LinearHandler(),
FunctionalKernelHandler(),
AnyHandler(),
]
SUBSTITUTION_STRATEGY = [
numeric_substitutor(),
input_substitutor(),
output_substitutor(),
]
@attrs(auto_attribs=True, slots=True)
class Digraph:
"""This digraph object is used to transform the j
it-compiled digraph into the graph-representation
of this library.
Args:
ref_mod: the neural network model in a non-jit-compiled
variant
"""
ref_mod: torch.nn.Module
format: str = ""
graph_attr: Dict[str, str] = attrib(factory=dict)
edge_collection: List[Tuple[str, str]] = attrib(factory=list)
raw_nodes: Dict[str, Tuple[str, str]] = attrib(factory=dict)
layer_definitions: Dict[str, LayerDefinition] = attrib(factory=dict)
layer_info_handlers: List[LayerInfoHandler] = attrib(
factory=lambda: RESOLVING_STRATEGY
)
layer_substitutors: List[NodeSubstitutor] = attrib(
factory=lambda: SUBSTITUTION_STRATEGY
)
filter_rf: Callable[
[Tuple[ReceptiveFieldInfo, ...]], Tuple[ReceptiveFieldInfo, ...]
] = KNOWN_FILTER_MAPPING[None]
def _find_predecessors(self, name: str) -> List[str]:
return [e[0] for e in self.edge_collection if e[1] == name]
def _get_layer_definition(
self,
label: str,
kernel_size: Optional[Union[Tuple[int, ...], int]] = None,
stride_size: Optional[Union[Tuple[int, ...], int]] = None,
) -> LayerDefinition:
resolvable = self._get_resolvable(label)
name = self._get_name(label)
for handler in self.layer_info_handlers:
if handler.can_handle(label):
return handler(
model=self.ref_mod,
resolvable_string=resolvable,
name=name,
kernel_size=kernel_size,
stride_size=stride_size,
)
raise ValueError(f"Did not find a way to handle the following layer: {name}")
def attr(self, label: str) -> None:
"""This is a dummy function to mimic the behavior
of a digraph-object from Graphviz with no functionality."""
pass
def edge(self, node_id1: str, node_id2: str) -> None:
"""Creates an directed edge in the compute graph
from one node to the other in the current Digraph-Instance
Args:
node_id1: the id of the start node
node_id2: the id of the target node
Returns:
Nothing.
"""
self.edge_collection.append((node_id1, node_id2))
def node(
self,
name: str,
label: Optional[str] = None,
shape: str = "box",
style: Optional[str] = None,
kernel_size: Optional[Union[Tuple[int, ...], int]] = None,
stride_size: Optional[Union[Tuple[int, ...], int]] = None,
units: Optional[int] = None,
filters: Optional[int] = None,
) -> None:
"""Creates a node in the digraph-instance.
Args:
name: the name of the node, the name must be unique
to properly identify the node.
label: the label is descriptive for the functionality
of the node
shape: unused variable for compatibility with GraphViz
style: unused variable for compatibility with GraphViz
Returns:
Nothing.
"""
label = name if label is None else label
layer_definition = self._get_layer_definition(
label, kernel_size=kernel_size, stride_size=stride_size
)
self.layer_definitions[name] = layer_definition
def subgraph(self, name: str) -> GraphVizDigraph:
"""This is a dummy function to mimic the behavior
of a digraph-object from Graphviz with no functionality."""
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def _is_resolvable(
self, predecessors: List[str], resolved_nodes: Dict[str, EnrichedNetworkNode]
) -> bool:
if not predecessors:
return True
else:
return all([pred in resolved_nodes for pred in predecessors])
def _find_resolvable_node(
self,
node_to_pred_map: Dict[str, List[str]],
resolved_nodes: Dict[str, EnrichedNetworkNode],
) -> Optional[str]:
for name, preds in node_to_pred_map.items():
if name not in resolved_nodes and self._is_resolvable(
preds, resolved_nodes
):
return name
return None
def _substitute(self, node: EnrichedNetworkNode):
all_Layers = node.all_layers[:]
for substitutor in self.layer_substitutors:
for nd in all_Layers:
if substitutor.can_handle(nd.layer_info.name):
substitutor(nd)
continue
return
def _check_for_lone_node(self, resolved_nodes: Dict[str, EnrichedNetworkNode]):
for name, node in resolved_nodes.items():
if len(node.predecessors) == 0 and len(node.succecessors) == 0:
warnings.warn(
f"Found a node with no predecessors and no successors: "
f"'{node.layer_info.name}',"
f" this may be caused by some control-flow in "
f" this node disabling any processing"
f" within the node.",
UserWarning,
)
def to_graph(self) -> EnrichedNetworkNode:
"""Transforms the graph stored in the Digraph-Instance into
a graph consisting of EnrichedNetworkNode-objects.
Allowing the computation of border layers and the visualization of the
graph using the visualize-Module.
Returns:
The output-node of the EnrichedNetworkNode-based graph
"""
node_to_pred_map: Dict[str, List[str]] = {}
for name in self.layer_definitions.keys():
preds = self._find_predecessors(name)
node_to_pred_map[name] = preds
resolved_nodes: Dict[str, EnrichedNetworkNode] = {}
resolved_node = None
while len(resolved_nodes) != len(node_to_pred_map):
resolvable_node_name = self._find_resolvable_node(
node_to_pred_map, resolved_nodes
)
if resolvable_node_name is None:
break
resolved_node = self.create_enriched_node(
resolved_nodes,
node_to_pred_map[resolvable_node_name],
self.layer_definitions[resolvable_node_name],
resolvable_node_name,
)
resolved_nodes[resolvable_node_name] = resolved_node
self._check_for_lone_node(resolved_nodes)
self._substitute(resolved_node)
return resolved_node
def _get_resolvable(self, name: str) -> str:
return name.split(" ")[0]
def _get_name(self, label: str) -> str:
if "(" in label:
return label.split("(")[1].replace(")", "")
else:
return label
def create_enriched_node(
self,
resolved_nodes: Dict[str, EnrichedNetworkNode],
preds: List[str],
layer_def: LayerDefinition,
name: str,
) -> EnrichedNetworkNode:
"""Creates an enriched node from the current graph node.
Args:
resolved_nodes: a dicationary, mapping node-ids to the nodes
to their corresponding EnrichedNetworkNode instances
preds: a list the direct predecessor (ids)
layer_def: the layer definition instance for this node.
name: thr name of the node, used as id
Returns:
The EnrichedNetworkNode instance of the same node
"""
pred_nodes: List[EnrichedNetworkNode] = [resolved_nodes[p] for p in preds]
node = EnrichedNetworkNode(
name=name,
layer_info=layer_def,
predecessors=pred_nodes,
receptive_field_info_filter=self.filter_rf,
)
return node | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/encodings/pytorch/intermediate_graph.py | 0.91722 | 0.406509 | intermediate_graph.py | pypi |
from rfa_toolbox.graphs import EnrichedNetworkNode, LayerDefinition
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol
import torch
class LayerInfoHandler(Protocol):
"""Creates a LayerDefinition from the model and a resolvable string."""
def can_handle(self, name: str) -> bool:
"""Checks if this handler can process the
node in the compute graph of the model.
Args:
name: the name of the compute graph.
Returns:
True if the node can be processed into a
valid LayerDefinition by this handler.
"""
...
def __call__(
self, model: torch.nn.Module, resolvable_string: str, name: str
) -> LayerDefinition:
"""Transform a node in the JIT-compiled compute-graph into a valid LayerDefinition
for further processing and visualization.
Args:
model: the entire model in a non-jit-compiled versionen
resolvable_string: a string extracted from the jit-compiled
version of the model.
the string is formatted in a way that allows
the extraction of the module the compute-node
in the jit-compiled version is referencing.
This string is only resolvable if and only if
the node is actually referencing a module.
This is not the case if the model uses for
example calls from the functional-library
of PyTorch.
This is commonly the case for stateless-elements
of a model like activation functions or
"logistical" operation like concatenations,
and additions etc.
that are necessary for the flow of information.
In such cases the resolvable string only holds some
information of the position of the function call
within the forward-call and the name of the
called function the name of the node in question.
Returns:
A LayerDefinition that reflects the properties of the layer.
"""
...
class NodeSubstitutor(Protocol):
"""This object handles the conditional substituion of adjacent
nodes into one single node in the network graph.
Substitution is not necessary. However, when extracting the
compute-graph from the JIT-compiler artifact-nodes caused by
technical details of the module structure may cause an unecessarily
busy graph-structure.
"""
def can_handle(self, name: str) -> bool:
"""Check if this substituor can handle the node in question.
Args:
name: the name of the EnrichedNetworkNode-object
Returns:
True if this substituor can substitute the node in question.
"""
...
def __call__(self, node: EnrichedNetworkNode) -> EnrichedNetworkNode:
"""Remove the node in question from the compute graph.
Args:
node: the node to remove
Returns:
another node from the repaired compute-graph.
"""
... | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/encodings/pytorch/domain.py | 0.922018 | 0.601535 | domain.py | pypi |
from rfa_toolbox.graphs import LayerDefinition
try:
from typing import Any, Dict, Protocol
except ImportError:
from typing_extensions import Protocol
from attr import attrs
class LayerInfoHandler(Protocol):
"""Creates a LayerDefinition from the model and a resolvable string."""
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Checks if this handler can process the
node in the compute graph of the model.
Args:
node: the node in question
Returns:
True if the node can be processed into a
valid LayerDefinition by this handler.
"""
...
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation of a compute node in the tensorflow-graph
into a LayerDefinition.
Args:
node: the node in question
Returns:
A LayerDefinition that reflects the properties of the layer.
"""
...
@attrs(frozen=True, slots=True, auto_attribs=True)
class KernelBasedHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Handles only layers featuring a kernel_size and filters"""
return "kernel_size" in node["config"]
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation
of a compute node in the tensorflow-graph"""
name = (
f"{node['class_name']} "
f"{'x'.join(str(x) for x in node['config']['kernel_size'])} "
f"/ {node['config']['strides']}"
)
filters = None if "filters" not in node["config"] else node["config"]["filters"]
return LayerDefinition(
name=name,
kernel_size=node["config"]["kernel_size"],
stride_size=node["config"]["strides"],
filters=filters,
)
@attrs(frozen=True, slots=True, auto_attribs=True)
class PoolingBasedHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Handles only layers featuring a pool_size"""
return "pool_size" in node["config"]
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation of a
compute node in the tensorflow-graph"""
name = (
f"{node['class_name']} "
f"{'x'.join(str(x) for x in node['config']['pool_size'])} "
f"/ {node['config']['strides']}"
)
return LayerDefinition(
name=name,
kernel_size=node["config"]["pool_size"],
stride_size=node["config"]["strides"],
)
@attrs(frozen=True, slots=True, auto_attribs=True)
class DenseHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Handles only layers feature units as attribute"""
return "units" in node["config"]
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation
of a compute node in the tensorflow-graph"""
name = node["class_name"]
return LayerDefinition(
name=name, kernel_size=1, stride_size=1, units=node["config"]["units"]
)
@attrs(frozen=True, slots=True, auto_attribs=True)
class GlobalPoolingHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Handles only layers feature units as attribute"""
return "Global" in node["class_name"] and "Pooling" in node["class_name"]
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation
of a compute node in the tensorflow-graph"""
name = node["class_name"]
return LayerDefinition(name=name)
@attrs(frozen=True, slots=True, auto_attribs=True)
class FlattenHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""Handles only layers feature units as attribute"""
return "Flatten" in node["class_name"]
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation
of a compute node in the tensorflow-graph"""
name = node["class_name"]
return LayerDefinition(name=name, kernel_size=None)
@attrs(frozen=True, slots=True, auto_attribs=True)
class InputHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""This is strictly meant for handling input nodes"""
return node["class_name"] == "InputLayer"
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation of
a compute node in the tensorflow-graph"""
return LayerDefinition(name=node["class_name"], kernel_size=1, stride_size=1)
@attrs(frozen=True, slots=True, auto_attribs=True)
class AnyHandler(LayerInfoHandler):
def can_handle(self, node: Dict[str, Any]) -> bool:
"""This is a catch-all handler"""
return True
def __call__(self, node: Dict[str, Any]) -> LayerDefinition:
"""Transform the json-representation of a
compute node in the tensorflow-graph"""
return LayerDefinition(name=node["class_name"], kernel_size=1, stride_size=1) | /rfa_toolbox-1.7.0-py3-none-any.whl/rfa_toolbox/encodings/tensorflow_keras/layer_handlers.py | 0.946001 | 0.482734 | layer_handlers.py | pypi |
.. _getstarted:
Get started
============
The R-factor scripts can be used to:
1. Compute the erosivity :math:`EI_{30}` values for a number of stations and
years.
2. Use the computed :math:`EI_{30}` values to compute an R-value.
From 10' rain data to EI for a single station/year
--------------------------------------------------
The input of the erosivity algorithm is a rainfall time series with a 10'
resolution as a Pandas DataFrame, e.g.
+-----+---------------------+-----------+-----------+
| idx | datetime | rain_mm | station |
+=====+=====================+===========+===========+
| 0 | 2018-01-01 02:10:00 | 0.27 | P01 |
+-----+---------------------+-----------+-----------+
| 1 | 2018-01-01 02:20:00 | 0.02 | P01 |
+-----+---------------------+-----------+-----------+
| 2 | 2018-01-01 03:10:00 | 0.48 | P01 |
+-----+---------------------+-----------+-----------+
| ... | ... | ... | ... |
+-----+---------------------+-----------+-----------+
The data can be derived from any source and contain more columns (e.g. the
measurement station identifier), but the ``datetime``, ``rain_mm`` and
``station`` are required to apply the erosivity algorithm. The data can
contain both data of a single year/station as multiple years/stations (see
further to calculate multiple stations together. Make sure that the
``station`` column is present also for the single station case.
Erosivity (EI30-values) for a single station/year combination can be computed
(make sure to activating the conda environment, ``conda activate rfactor``).
The :func:`rfactor.compute_erosivity` function applies the algorithm to a
DataFrame containing data for a single station/year, e.g. for the data in
DataFrame ``df_rain``.
.. code-block:: python
from rfactor import compute_erosivity, maximum_intensity
erosivity = compute_erosivity(df_rain, maximum_intensity)
.. note::
The :func:`rfactor.rfactor.maximum_intensity` is the default method to
derive the maximum 30min interval rain intensity of an event, but the user
can provide a custom function as well or the original Matlab
implementation version
:func:`rfactor.rfactor.maximum_intensity_matlab_clone` can be used to
compare with the corrected Matlab implementation.
The output is a DataFrame with the intermediate results and the cumulative
erosivity of each of the defined events:
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| event_idx | datetime | ... | all_event_rain_cum | erosivity_cum | station | year |
+=============+=====================+========+======================+=================+=========+======+
| 2 | 2018-01-01 14:30:00 | ... | 1.08 | 5.01878 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 3 | 2018-01-02 16:30:00 | ... | 12.37 | 8.00847 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 4 | 2018-01-04 09:10:00 | ... | 20.13 | 8.33275 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 5 | 2018-01-05 02:20:00 | ... | 22.47 | 8.61462 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| ... | ... | ... | ... | ... | ... | ... |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
As the "erosivity_cum" column contains the cumulative erosity over the events,
the last value is the R-factor of the year/station:
.. code-block:: python
erosivity["erosivity_cum"].iloc[-1]
Other relevant derived statistics, such as the monthly and biweekly based
R-factor can be calculated using the existing Pandas functionalities:
.. code-block:: python
erosivity.resample("M", on="datetime")["erosivity"].sum() # Monthly value
erosivity.resample("SM", on="datetime")["erosivity"].sum() # Biweekly value
Calculating multiple station/year combinations
----------------------------------------------
When data are available from multiple stations over multiple years in a single
DataFrame, the :func:`rfactor.compute_erosivity` function applies the
erosivity algorithm on each year/station combination in the input rain
DataFrame. To do so, an additional column with the ``station`` name is
required:
+-----+---------------------+-----------+---------+
| | datetime | rain_mm | station |
+=====+=====================+===========+=========+
| 0 | 2018-01-01 02:10:00 | 0.27 | P01 |
+-----+---------------------+-----------+---------+
| 1 | 2018-01-01 02:20:00 | 0.02 | P01 |
+-----+---------------------+-----------+---------+
| 2 | 2018-01-01 03:10:00 | 0.48 | P01 |
+-----+---------------------+-----------+---------+
| ... | ... | ... | ... |
+-----+---------------------+-----------+---------+
| 10 | 2019-01-01 01:10:00 | 0.52 | P01 |
+-----+---------------------+-----------+---------+
| 11 | 2019-01-01 01:20:00 | 0.20 | P01 |
+-----+---------------------+-----------+---------+
| ... | ... | ... | ... |
+-----+---------------------+-----------+---------+
| 123 | 2018-01-01 00:10:00 | 0.02 | P02 |
+-----+---------------------+-----------+---------+
| 124 | 2018-01-01 00:20:00 | 0.32 | P02 |
+-----+---------------------+-----------+---------+
| ... | ... | ... | ... |
+-----+---------------------+-----------+---------+
.. code-block:: python
from rfactor import compute_erosivity, maximum_intensity
erosivity = compute_erosivity(df_rain)
The output is very similar to the previous section, but the data contains now
multiple years and/or stations:
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| event_idx | datetime | ... | all_event_rain_cum | erosivity_cum | station | year |
+=============+=====================+========+======================+=================+=========+======+
| 2 | 2018-01-01 14:30:00 | ... | 1.08 | 5.01878 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 3 | 2018-01-02 16:30:00 | ... | 12.37 | 8.00847 | P01 | 2018 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| ... | ... | ... | ... | ... | ... | ... |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 1 | 2019-01-04 09:10:00 | ... | 20.13 | 8.33275 | P01 | 2019 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| 2 | 2019-01-05 02:20:00 | ... | 22.47 | 8.61462 | P01 | 2019 |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
| ... | ... | ... | ... | ... | ... | ... |
+-------------+---------------------+--------+----------------------+-----------------+---------+------+
To derive the R-factor for each year/station in the data set, one can use the
existing Pandas functionalities:
.. code-block:: python
erosivity.groupby(["station", "year"])["erosivity_cum"].last().reset_index()
File handling
-------------
This package provides a number of processing functions in the
:mod:`rfactor.process` module to enable compatibility of the input formar with
the required data format defined in this package (see previous section).
Currently, next processing functions are implemented:
- :func:`rfactor.process.load_rain_file_matlab_legacy`: This is the processing
function used to process the ``Matlab KU-Leuven`` file legacy.
- :func:`rfactor.process.load_rain_file_csv_vmm`: This is the processing
function used to process the ``VMM CSV`` file legacy.
Both file-formats can be loaded with the defined processing function, i.e.
.. code-block:: python
from pathlib import Path
from rfactor.process import load_rain_file_matlab_legacy,
load_rain_file_csv_vmm
# Load a Matlab-file
fname = Path("/PATH/TO/YOUR/RAINFALL/DATA/FOLDER/P01_001_2018.txt")
from_matlab = load_rain_file_matlab_legacy(fname)
# Load a VMM CSV
fname = Path("/PATH/TO/YOUR/RAINFALL/DATA/FOLDER/P01_001.CSV")
from_vmm = load_rain_file_csv_vmm(fname)
Or a folder containing multiple files can be loaded:
.. code-block:: python
from pathlib import Path
from rfactor.process import load_rain_file_matlab_legacy,
load_rain_file_csv_vmm, load_rain_folder
# Load an entire set of Matlab-legacy files
folder = Path("/PATH/FOLDER/CONTAINING/MATLABFORMAT/FILES")
from_matlab = load_rain_folder(folder, load_rain_file_matlab_legacy)
# Load an entire set of VMM CSV-legacy files
folder = Path("/PATH/FOLDER/CONTAINING/VMMCSVFORMAT/FILES")
from_matlab = load_rain_file_csv_vmm(folder, load_rain_file_matlab_legacy)
.. note::
Do not forget to use a :py:class:`pathlib.Path` defined file name or
folder name.
In the next subsections, the specifics for every file-legacy format are
explained.
Matlab KU-Leuven legacy
~~~~~~~~~~~~~~~~~~~~~~~
The input files are defined by text files (extension: ``.txt``) that
hold non-zero rainfall timeseries. The data are split per station and
per year with a specific datafile tag (format: **SOURCE\_STATION\_YEAR.txt**):
- KMI\_6414\_2004.txt
- KMI\_6414\_2005.txt
- ...
- KMI\_6434\_2003.txt
- KMI\_6434\_2004.txt
- ...
The content of each of this file is a **non-zero** rainfall timeseries
(no header, space delimited):
::
9390 1.00
9470 0.20
9480 0.50
10770 0.10
... ...
with the first column being the timestamp from the start of the year
(minutes) , and second the rainfall depth (in mm).
VMM CSV legacy
~~~~~~~~~~~~~~
Starting from 2018, a new input format is defined and used for the analysis of
flanders. The reason for this is two-folded:
1. The new format is compatible with the VMM output from VMM's WISKI-system.
2. The format allows to compute timeseries coverage (see ``diagnostics`` in
:func:`rfactor.process.load_rain_file`). This was not possible with the
Matlab-format as required to be a **non-zero**-timeseries.
The input files are defined by text files (extension: ``.CSV``) that hold
rainfall timeseries. The data are split per station with a specific datafile
tag (format: **IDENTIFIER_STATION.CSV**):
- KMI\_6414.CSV
- KMI\_6434.CSV
- ...
- P07\_006.CSV
- ...
The content of each of this file is a rainfall timeseries (comma delimited):
::
Date/Time,Value [millimeter]
01/01/2019 00:00,"0"
01/01/2019 00:05,"0.03"
01/01/2019 00:10,"0.04"
01/01/2019 00:15,"0"
01/01/2019 00:20,"0"
01/01/2019 00:25,"---"
01/01/2019 00:30,"0"
... ...
Output erosivity
~~~~~~~~~~~~~~~~
To export the resulting DataFrame with erosivity values into the legacy output format:
.. code-block:: python
from pathlib import Path
from rfactor.process import load_rain_folder, load_rain_file
# Works both on a single station/year as multiple station/year combinations
write_erosivity_data(erosivity, Path("/PATH/TO/YOUR/EROSIVITY/OUTPUT"))
Analyse R-values
----------------
The R-value is determined by the number of years and stations the users wishes
to consider to compute the R value. By using Pandas DataFrame to store the
erosivity, all funtionalities for slicing/filtering/plotting/... are available
directly.
For example, consider one wants to compute the R-value for 2017 and 2018, for
Ukkel (stations: KMI\_6447 and KMI\_FS3):
.. code-block:: python
erosivity_selected = erosivity[(erosivity["year"].isin([2017, 2018])) &
(erosivity["station"].isin(['KMI\_6447', 'KMI\_FS3']))]
| /rfactor-0.1.2.tar.gz/rfactor-0.1.2/docs/get-started.rst | 0.924747 | 0.809991 | get-started.rst | pypi |
from abc import ABC, abstractmethod
from typing import Dict, Counter, Iterable, Tuple, Optional, cast, Union
from dataclasses import dataclass, field
from rfb_mc.types import Params, RfBmcTask, RfBmcResult, BmcResult, BmcTask
from threading import Lock
@dataclass
class StoreData:
# general and problem specific parameter for the hash based model counting framework
params: Params
# results from hashed bounded model counting calls
rf_bmc_results_map: Dict[RfBmcTask, Counter[RfBmcResult]] = field(default_factory=dict)
# result from the bounded model count with the highest parameter for "a"
bmc_task_result: Optional[Tuple[BmcTask, BmcResult]] = None
class Store(ABC):
def __init__(self, data: StoreData):
self.data = data
self.data_lock = Lock()
@abstractmethod
def sync(self):
"""
Synchronizes the memory with the storage location
used by the store implementation.
(Possibly causes a blocking operation)
"""
raise NotImplementedError()
@abstractmethod
def _add_results(
self,
bmc_task_result: Optional[Tuple[BmcTask, BmcResult]],
rf_bmc_task_results: Iterable[Tuple[RfBmcTask, RfBmcResult]],
):
"""
Should implement adding the results and synchronizing the external store.
"""
raise NotImplementedError()
def add_results(self, task_results: Iterable[Union[Tuple[RfBmcTask, RfBmcResult], Tuple[BmcTask, BmcResult]]]):
"""
Adds a result of a rf bmc call to the data.
Based on the store implementation this operation should also
synchronize with the storage location.
(Possibly causes a blocking operation)
"""
bmc_task_results: Iterable[Tuple[BmcTask, BmcResult]] = [
cast(Tuple[BmcTask, BmcResult], task_result) for task_result in task_results
if isinstance(task_result[0], BmcTask)
]
rf_bmc_task_results: Iterable[Tuple[RfBmcTask, RfBmcResult]] = [
cast(Tuple[RfBmcTask, RfBmcResult], task_result) for task_result in task_results
if isinstance(task_result[0], RfBmcTask)
]
# only bmc task result with highest "a" value can possibly be set
bmc_task_result = max(bmc_task_results, key=lambda task_result: task_result[0].a, default=None)
with self.data_lock:
for task, result in rf_bmc_task_results:
if task not in self.data.rf_bmc_results_map:
self.data.rf_bmc_results_map[task] = Counter[RfBmcResult]()
self.data.rf_bmc_results_map[task][result] += 1
if (
bmc_task_result is not None
and (self.data.bmc_task_result is None or self.data.bmc_task_result[0].a <= bmc_task_result[0].a)
):
self.data.bmc_task_result = bmc_task_result
self._add_results(bmc_task_result, rf_bmc_task_results) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/store.py | 0.910107 | 0.35056 | store.py | pypi |
from ast import literal_eval
from typing import Dict, Tuple, TypedDict, Any, Literal, Counter, Optional
from rfb_mc.restrictive_formula_module import get_restrictive_formula_module
from rfb_mc.store import StoreData
from rfb_mc.types import RfBmcTask, RfBmcResult, Params, BmcTask, BmcResult
def v1_encode_rf_bmc_task(task: RfBmcTask) -> str:
return repr((
task.rfm_guid,
get_restrictive_formula_module(task.rfm_guid).encode_restrictive_formula_params(
task.rfm_formula_params,
),
task.a,
task.q
))
def v1_decode_rf_bmc_task(task: str) -> RfBmcTask:
rfm_guid, rfm_formula_params, a, q = literal_eval(task)
return RfBmcTask(
rfm_guid=rfm_guid,
rfm_formula_params=get_restrictive_formula_module(rfm_guid).decode_restrictive_formula_params(
rfm_formula_params,
),
a=a,
q=q
)
def v1_encode_rf_bmc_result(result: RfBmcResult) -> str:
return repr(tuple(result))
def v1_decode_rf_bmc_result(result: str) -> RfBmcResult:
return RfBmcResult(*literal_eval(result))
def v1_encode_rf_bmc_task_result(task_result: Tuple[RfBmcTask, RfBmcResult]) -> str:
return repr((
v1_encode_rf_bmc_task(task_result[0]),
v1_encode_rf_bmc_result(task_result[1])
))
def v1_decode_rf_bmc_task_result(task_result: str) -> Tuple[RfBmcTask, RfBmcResult]:
task, result = literal_eval(task_result)
return (
v1_decode_rf_bmc_task(task),
v1_decode_rf_bmc_result(result),
)
def v1_encode_bmc_task(task: BmcTask) -> str:
return repr((
task.a,
))
def v1_decode_bmc_task(task: str) -> BmcTask:
a, = literal_eval(task)
return BmcTask(
a=a,
)
def v1_encode_bmc_result(result: BmcResult) -> str:
return repr(tuple(result))
def v1_decode_bmc_result(result: str) -> BmcResult:
return BmcResult(*literal_eval(result))
def v1_encode_bmc_task_result(task_result: Tuple[BmcTask, BmcResult]) -> str:
return repr((
v1_encode_bmc_task(task_result[0]),
v1_encode_bmc_result(task_result[1])
))
def v1_decode_bmc_task_result(task_result: str) -> Tuple[BmcTask, BmcResult]:
task, result = literal_eval(task_result)
return (
v1_decode_bmc_task(task),
v1_decode_bmc_result(result),
)
SerializedV1RfBmcResultsMap = Dict[str, int]
def v1_encode_rf_bmc_results_map(
rf_bmc_results_map: Dict[RfBmcTask, Counter[RfBmcResult]],
) -> SerializedV1RfBmcResultsMap:
return {
v1_encode_rf_bmc_task_result((task, result)): rf_bmc_results_map[task][result]
for task in rf_bmc_results_map
for result in rf_bmc_results_map[task]
}
def v1_decode_rf_bmc_results_map(
rf_bmc_results_map: SerializedV1RfBmcResultsMap,
) -> Dict[RfBmcTask, Counter[RfBmcResult]]:
task_results = list(map(v1_decode_rf_bmc_task_result, rf_bmc_results_map.keys()))
tasks = set([task for task, _ in task_results])
return {
task: Counter[RfBmcResult]({
result: int(rf_bmc_results_map[v1_encode_rf_bmc_task_result((task, result))])
for result in [task_result[1] for task_result in task_results if task_result[0] == task]
})
for task in tasks
}
class SerializedV1Params(TypedDict):
bit_width_counter: Dict[str, int]
def v1_encode_params(params: Params) -> SerializedV1Params:
return {
"bit_width_counter": {
str(key): params.bit_width_counter[key]
for key in params.bit_width_counter.keys()
}
}
def v1_decode_params(params: SerializedV1Params) -> Params:
return Params(
bit_width_counter=Counter[int]({
int(key): int(params["bit_width_counter"][key])
for key in params["bit_width_counter"]
})
)
# Python representation of the store data with native types,
# i.e. processable using pickle, repr/literal_eval and json.
class SerializedV1StoreData(TypedDict):
version: Literal[1]
params: SerializedV1Params
rf_bmc_results_map: SerializedV1RfBmcResultsMap
bmc_task_result: Optional[str]
def v1_encode_store_data(data: StoreData) -> SerializedV1StoreData:
return SerializedV1StoreData(
version=1,
params=v1_encode_params(data.params),
rf_bmc_results_map=v1_encode_rf_bmc_results_map(
data.rf_bmc_results_map,
),
bmc_task_result=None if data.bmc_task_result is None else v1_encode_bmc_task_result(data.bmc_task_result),
)
def v1_decode_store_data(data: SerializedV1StoreData) -> StoreData:
return StoreData(
params=v1_decode_params(data["params"]),
rf_bmc_results_map=v1_decode_rf_bmc_results_map(data["rf_bmc_results_map"]),
bmc_task_result=None if data["bmc_task_result"] is None else v1_decode_bmc_task_result(data["bmc_task_result"]),
)
def decode_store_data(data: Any) -> Tuple[int, StoreData]:
"""
Decodes a serialized store data item of any known version,
returns the version it was encoded in and the interpreted store data
"""
if data["version"] == 1:
typed_item: SerializedV1StoreData = data
return data["version"], v1_decode_store_data(typed_item)
else:
raise ValueError(f"Unexpected version \"{data['version']}\" in store data entry") | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/serialization.py | 0.844377 | 0.368463 | serialization.py | pypi |
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Dict, Any, Type
from rfb_mc.restrictive_formula_module import register_restrictive_formula_module
from rfb_mc.restrictive_formula_module_implementation import RestrictiveFormulaModuleImplementation, \
RestrictiveFormulaInstance
from rfb_mc.types import Params, RfBmcTask, RfBmcResult, BmcResult, BmcTask
FormulaParams = TypeVar("FormulaParams")
RestrictiveFormulaInstanceGenerationArgs = TypeVar("RestrictiveFormulaInstanceGenerationArgs")
class Runner(ABC, Generic[FormulaParams, RestrictiveFormulaInstanceGenerationArgs, RestrictiveFormulaInstance]):
def __init__(self, params: Params, formula_params: FormulaParams):
self.check_params_and_formula_params_compatibility(params, formula_params)
self.params: Params = params
self.formula_params: FormulaParams = formula_params
restrictive_formula_module_implementation_map: Dict[str, Type[RestrictiveFormulaModuleImplementation]] = {}
"""
Map from restrictive formula module uid to implementation class.
"""
@classmethod
def register_restrictive_formula_module_implementation(cls, rfmi: Type[RestrictiveFormulaModuleImplementation]):
register_restrictive_formula_module(
rfmi.get_restrictive_formula_module(),
)
cls.restrictive_formula_module_implementation_map[rfmi.get_restrictive_formula_module().get_guid()] = rfmi
@classmethod
@abstractmethod
def check_params_and_formula_params_compatibility(cls, params: Params, formula_params: FormulaParams):
"""
Raises an error if the params and formula_params are not compatible.
"""
raise NotImplementedError()
@abstractmethod
def get_restrictive_formula_instance_generation_args(self, q: int) -> RestrictiveFormulaInstanceGenerationArgs:
"""
Returns additional arguments required for generating the restrictive formula instance from the params.
"""
raise NotImplementedError()
def generate_restrictive_formula_instance(
self, rfm_uid: str, rfm_formula_params: Any, q: int,
) -> RestrictiveFormulaInstance:
imp_map = self.restrictive_formula_module_implementation_map
if rfm_uid not in imp_map:
raise RuntimeError(f"Restrictive Formula Module \"{rfm_uid}\" is not implemented")
rfmi = imp_map[rfm_uid]
rfm = rfmi.get_restrictive_formula_module()
instance_params = rfm.generate_restrictive_formula_instance_params(
self.params, rfm_formula_params, q,
)
instance_args = self.get_restrictive_formula_instance_generation_args(q)
return rfmi.generate_restrictive_formula(self.params, instance_params, instance_args)
@abstractmethod
def rf_bmc(self, task: RfBmcTask) -> RfBmcResult:
"""
Performs bounded model counting on the formula resulting from
first replicating the original formula q-times and
then introducing a restrictive formula condition.
"""
raise NotImplementedError()
@abstractmethod
def bmc(self, task: BmcTask) -> BmcResult:
"""
Performs bounded model counting.
"""
raise NotImplementedError() | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/runner.py | 0.873997 | 0.29537 | runner.py | pypi |
from abc import ABC, abstractmethod
from typing import Generic, TypeVar, Hashable, Any, Dict, Type
from rfb_mc.types import Params
RestrictiveFormulaParams = TypeVar("RestrictiveFormulaParams", bound=Hashable)
# parameter that determines all formula generation related values
RestrictiveFormulaProperties = TypeVar("RestrictiveFormulaProperties")
# properties of the restrictive formula that are determined by the parameters
RestrictiveFormulaInstanceParams = TypeVar("RestrictiveFormulaInstanceParams")
# values that parameterize a specific instance generated by the module and will be used by
# the runner implementations to reconstruct the formula in the format of which ever solver is used
class RestrictiveFormulaModule(
Generic[RestrictiveFormulaParams, RestrictiveFormulaProperties, RestrictiveFormulaInstanceParams],
ABC,
):
@classmethod
@abstractmethod
def get_guid(cls) -> str:
"""
UID of the restrictive formula module that needs to be deterministic and
unique across all other restrictive formula module implementations.
An abbreviation of the name that is unlikely to be reused is suggested and possibly a
version number in order to differentiate between different generations of the same module.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def encode_restrictive_formula_params(cls, params: RestrictiveFormulaParams) -> Any:
"""
Encodes the formula parameters into a native python type.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def decode_restrictive_formula_params(cls, params: Any) -> RestrictiveFormulaParams:
"""
Decodes the formula parameters from the native python type generated by the encoder.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def get_restrictive_formula_properties(
cls,
params: Params,
restrictive_formula_params: RestrictiveFormulaParams,
) -> RestrictiveFormulaProperties:
"""
Returns properties that the restrictive formula generation posses with the given parameters.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def generate_restrictive_formula_instance_params(
cls,
params: Params,
restrictive_formula_params: RestrictiveFormulaParams,
q: int,
) -> RestrictiveFormulaInstanceParams:
"""
Generate the restrictive formula instance params for the given params using randomness from the random runner
class.
"""
raise NotImplementedError()
restrictive_formula_module_map: Dict[str, Type[RestrictiveFormulaModule]] = {}
"""
Map from restrictive formula module uid to module class.
"""
def register_restrictive_formula_module(rfm: Type[RestrictiveFormulaModule]):
restrictive_formula_module_map[rfm.get_guid()] = rfm
def get_restrictive_formula_module(uid: str) -> Type[RestrictiveFormulaModule]:
if uid in restrictive_formula_module_map:
return restrictive_formula_module_map[uid]
else:
raise RuntimeError(f"Restrictive formula module \"{uid}\" is required but is not registered") | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/restrictive_formula_module.py | 0.922141 | 0.451629 | restrictive_formula_module.py | pypi |
import z3
from typing import List, NamedTuple, Dict, Optional
CloneExpressionOutput = NamedTuple("CloneExpressionOutput", [
("clones", List[z3.BoolRef]), ("var_map", Dict[z3.ExprRef, List[z3.ExprRef]])
])
def serialize_expression(expression: z3.ExprRef) -> str:
s = z3.Solver()
s.add(expression)
return s.sexpr()
def deserialize_expression(serialized_expression: str, ctx: Optional[z3.Context] = None) -> z3.ExprRef:
return z3.And(z3.parse_smt2_string(serialized_expression, ctx=ctx))
def get_variables(expression: z3.ExprRef) -> List[z3.ExprRef]:
"""
Returns all variables that are contained in the expression.
:param expression: Expression from which variables are extracted
"""
class AstRefKey:
def __init__(self, n):
self.n = n
def __hash__(self):
return self.n.hash()
def __eq__(self, other):
return self.n.eq(other.n)
def __repr__(self):
return str(self.n)
def askey(n):
assert isinstance(n, z3.AstRef)
return AstRefKey(n)
variables = set()
def collect(f):
if z3.is_const(f):
if f.decl().kind() == z3.Z3_OP_UNINTERPRETED and not askey(f) in variables:
variables.add(askey(f))
else:
for c in f.children():
collect(c)
collect(expression)
return [elem.n for elem in variables]
def recreate_variable(key: str, variable: z3.ExprRef) -> z3.ExprRef:
"""
Recreates the variable but renames it with a key that is used
to make it distinct.
:param key:
:param variable:
"""
return z3.Const(f"{key}_{variable}", variable.sort())
def clone_expression(
expression: z3.ExprRef,
q: int,
required_variables: Optional[List[z3.ExprRef]] = None,
) -> CloneExpressionOutput:
"""
Clones expression by generating q instances of the expression where each
variable is substituted by a unique newly generated variable for each variable in each clone.
The output will list each clone and a dictionary where each entry corresponds to
a mapping from variable in the original formula to the substituted cloned variables for each clone
listed in the same order as the clone list.
:param expression: Expression to be cloned
:param q: Amount of clones created
:param required_variables: Variables that should be cloned and put into the var_map even if they are not
contained in the expression.
"""
variables = set(get_variables(expression)).union(set(required_variables or []))
var_map = {
x: [recreate_variable(f"clone{{{i}}}", x) for i in range(q)] for x in variables
}
clones = [z3.substitute(expression, [(x, var_map[x][i]) for x in variables]) for i in range(q)]
return CloneExpressionOutput(
clones=clones,
var_map=var_map,
) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/helper/z3_helper.py | 0.917441 | 0.48182 | z3_helper.py | pypi |
from dataclasses import dataclass
from fractions import Fraction
from functools import lru_cache
from math import log2, ceil, floor, prod
from typing import Tuple, Optional, List, Union
from rfb_mc.component.eamp.eamp_edge_scheduler import EampEdgeScheduler
from rfb_mc.component.eamp.eamp_edge_scheduler_base import EampEdgeSchedulerBase
from rfb_mc.component.eamp.eamp_rfm import EampParams, EampTransformMethod, EampRfm
from rfb_mc.component.eamp.primes import get_closest_prime, get_lowest_prime_above_or_equal_power_of_power_of_two
from rfb_mc.store import Store
@dataclass
class EampEdgeSchedulerSPPartialEampParams:
j: int
c: List[int]
p2_lmu: Optional[Tuple[int, int, int]] = None
class EampEdgeSchedulerSP(EampEdgeSchedulerBase[EampEdgeSchedulerSPPartialEampParams]):
def __init__(
self,
store: Store,
confidence: Union[Fraction, float],
a: int,
q: int,
min_model_count: Optional[int] = None,
max_model_count: Optional[int] = None,
):
super().__init__(store, confidence, a, q, min_model_count, max_model_count)
self._cn: int = int(
floor(log2(log2(self.max_model_count ** self.q / self.lg) + 1) + 1)
) if self.max_model_count ** self.q / self.lg >= 1 else 1
self._p: Tuple[int, ...] = tuple([
get_lowest_prime_above_or_equal_power_of_power_of_two(j)
for j in range(self._cn)
])
self._get_closest_prime = lru_cache(get_closest_prime)
@lru_cache(1)
def _get_upper_bound_on_estimate_iteration_count(self) -> int:
return self._cn - 1 + 10 # TODO: replace by correct formula
@lru_cache(1)
def _get_required_minimal_min_model_count_when_no_lower_bound_could_be_established(self):
return int(ceil(self.g ** (1 / self.q)))
def _make_eamp_params(self, partial_eamp_params: EampEdgeSchedulerSPPartialEampParams) -> EampParams:
j, c, p2_lmu = partial_eamp_params.j, partial_eamp_params.c, partial_eamp_params.p2_lmu
if p2_lmu is not None:
rl, rm, ru = p2_lmu
return EampParams(p=(rm,), c=(1,), transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW)
else:
cp = prod([(2 ** (2 ** i)) ** c[i] for i in range(1, len(c))])
x = self._get_closest_prime(cp) if cp >= 2 else None
em_c: Tuple[int, ...]
em_p: Tuple[int, ...]
if x is None:
em_c = (c[0],)
em_p = (2,)
elif c[0] == 0:
em_c = (1,)
em_p = (x,)
else:
em_c = (1, c[0])
em_p = (x, 2)
return EampParams(p=em_p, c=em_c, transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW)
def _make_initial_partial_eamp_params(self) -> EampEdgeSchedulerSPPartialEampParams:
return EampEdgeSchedulerSPPartialEampParams(
j=self._cn - 1,
c=[0] * (self._cn - 1) + [1],
)
def _advance_partial_eamp_params(
self,
partial_eamp_params: EampEdgeSchedulerSPPartialEampParams,
estimate_result: bool
) -> Optional[EampEdgeSchedulerSPPartialEampParams]:
j, c, p2_lmu = partial_eamp_params.j, partial_eamp_params.c, partial_eamp_params.p2_lmu
def range_size(p: EampEdgeSchedulerSPPartialEampParams) -> int:
return EampRfm.get_restrictive_formula_properties(
self.store.data.params, self._make_eamp_params(p),
).range_size
if p2_lmu is not None:
rl, rm, ru = p2_lmu
rnl = rm if estimate_result else rl
rnu = ru if estimate_result else rm
rnm = self._get_closest_prime(int(round(float(rnu + rnl) / 2)), (rnl + 1, rnu - 1))
if rnm is None or rnm == rm:
return None
neg = EampParams(p=(rnu,), c=(1,), transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW)
mid = EampParams(p=(rnm,), c=(1,), transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW)
pos = EampParams(p=(rnl,), c=(1,), transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW)
gap = self._multiplicative_gap(pos, neg) - min(
self._multiplicative_gap(pos, mid), self._multiplicative_gap(mid, neg)
)
if gap < 0.01:
return None
return EampEdgeSchedulerSPPartialEampParams(
j=0,
c=c.copy(),
p2_lmu=(rnl, rnm, rnu),
)
else:
c_next = c.copy()
if estimate_result is True:
if j == 0:
c_next[0] += 1
return EampEdgeSchedulerSPPartialEampParams(j=0, c=c_next)
else:
c_next[j - 1] = 1
return EampEdgeSchedulerSPPartialEampParams(j=j - 1, c=c_next)
else:
if j == 0:
ru = range_size(partial_eamp_params)
rl = ru // 2
rm = self._get_closest_prime(int(round(float(ru + rl) / 2)), (rl + 1, ru - 1))
return EampEdgeSchedulerSPPartialEampParams(
j=0,
c=c.copy(),
p2_lmu=(rl, rm, ru),
) if rm is not None else None
else:
c_next[j] = 0
c_next[j - 1] = 1
return EampEdgeSchedulerSPPartialEampParams(j=j - 1, c=c_next)
@staticmethod
def get_upper_bound_for_multiplicative_gap_of_result(a: int, q: int) -> float:
return EampEdgeScheduler.get_upper_bound_for_multiplicative_gap_of_result(a, q) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/eamp/eamp_edge_scheduler_sp.py | 0.773815 | 0.326164 | eamp_edge_scheduler_sp.py | pypi |
from abc import abstractmethod
from fractions import Fraction
from math import sqrt, prod, ceil, floor
from typing import Tuple, Optional, Union, Generic, TypeVar, List, Counter
from rfb_mc.component.eamp.eamp_rfm import EampParams, EampRfm
from rfb_mc.component.eamp.types import ProbabilisticInterval
from rfb_mc.component.eamp.utility import majority_vote_error_probability, \
multi_majority_vote_iteration_count_to_ensure_beta, probability_of_correctness
from rfb_mc.scheduler import Scheduler, SchedulerAlgorithmYield
from rfb_mc.store import Store
from rfb_mc.types import RfBmcTask, RfBmcResult, BmcTask, BmcResult
PartialEampParams = TypeVar("PartialEampParams")
class EampEdgeSchedulerBase(Generic[PartialEampParams], Scheduler[ProbabilisticInterval, ProbabilisticInterval]):
def __init__(
self,
store: Store,
confidence: Union[Fraction, float],
a: int,
q: int,
min_model_count: Optional[int] = None,
max_model_count: Optional[int] = None,
):
super().__init__(store)
# amount of models that are at most possible for a formula having the amount of bits specified
theoretical_max_model_count = prod([
2 ** (bit_width * amount) for bit_width, amount in store.data.params.bit_width_counter.items()
])
assert 0 <= confidence < 1, "Parameter 'confidence' is at least 0 and less than 1"
assert 1 <= a, "Parameter 'a' is at least 1"
assert 1 <= q, "Parameter 'q' is at least 1"
assert min_model_count is None or 0 <= min_model_count, "Parameter 'min_model_count' is at least 0"
assert min_model_count is None or max_model_count is None or min_model_count <= max_model_count, \
"Parameter 'min_model_count' is less than or equal parameter 'max_model_count'"
assert min_model_count is None or min_model_count <= theoretical_max_model_count, \
"Parameter 'min_model_count' is less than or equal the theoretical maximal model count of the formula," \
" i.e. 2**(amount of bits in variables)"
self.min_model_count: int = min_model_count if min_model_count is not None else 0
self.max_model_count: int = \
min(max_model_count, theoretical_max_model_count) if max_model_count is not None \
else theoretical_max_model_count
self.a: int = a
self.q: int = q
self.confidence: Fraction = Fraction(confidence)
self.store: Store = store
g, lg = EampEdgeSchedulerBase.get_g_and_lg(a)
self.g: float = g
self.lg: float = lg
@abstractmethod
def _get_upper_bound_on_estimate_iteration_count(self) -> int:
"""
Returns an upper bound on the necessary amount of majority vote counting estimate calls that
will need to be performed.
"""
raise NotImplementedError()
@abstractmethod
def _get_required_minimal_min_model_count_when_no_lower_bound_could_be_established(self) -> int:
"""
Returns the minimal min_model_count required to ensure that, if estimate never responded with True,
the lower bound, established by min_model_count, will satisfy the multiplicative gap constraints on
the final interval.
"""
raise NotImplementedError()
@abstractmethod
def _make_eamp_params(self, partial_eamp_params: PartialEampParams) -> EampParams:
raise NotImplementedError()
@abstractmethod
def _make_initial_partial_eamp_params(self) -> PartialEampParams:
"""
Returns the initial partial eamp parameters with which the algorithm will
start the estimate iterations.
"""
raise NotImplementedError()
@abstractmethod
def _advance_partial_eamp_params(
self,
partial_eamp_params: PartialEampParams,
estimate_result: bool
) -> Optional[PartialEampParams]:
"""
Returns the next partial eamp parameters as a response to the estimate result.
If the return value is None, it is assumed the iteration procedure has finished.
"""
raise NotImplementedError()
def _range_size(self, partial_eamp_params: Union[PartialEampParams, EampParams]) -> int:
"""
Returns range size of the given eamp params.
"""
return EampRfm.get_restrictive_formula_properties(
self.store.data.params,
partial_eamp_params if isinstance(partial_eamp_params, EampParams)
else self._make_eamp_params(partial_eamp_params),
).range_size
def _multiplicative_gap(
self,
positive_eamp_params: Union[PartialEampParams, EampParams],
negative_eamp_params: Union[PartialEampParams, EampParams]
):
"""
Returns multiplicative gap of interval given by eamp params that caused a positive and negative estimate result.
"""
return (
float(self._range_size(negative_eamp_params)) / float(self._range_size(positive_eamp_params))
* (self.lg / self.g)
) ** (1 / self.q)
def _run_algorithm_once(self):
if self.min_model_count == self.max_model_count:
return ProbabilisticInterval(
lower_bound=self.min_model_count,
upper_bound=self.max_model_count,
confidence=Fraction(1)
)
g, lg = EampEdgeSchedulerBase.get_g_and_lg(self.a)
mv_estimate_count_upper_bound = self._get_upper_bound_on_estimate_iteration_count()
# maximal allowed error probability of the algorithm
beta = 1 - self.confidence
r = multi_majority_vote_iteration_count_to_ensure_beta(
Fraction(1, 4),
beta,
mv_estimate_count_upper_bound,
)
def make_rf_bmc_task(eamp_params: EampParams):
return RfBmcTask(
rfm_guid=EampRfm.get_guid(),
rfm_formula_params=eamp_params,
a=self.a,
q=self.q,
)
def pre_estimate(p: PartialEampParams) -> Optional[bool]:
if self.max_model_count ** self.q < self._range_size(p) * lg:
return False
elif self.min_model_count ** self.q > self._range_size(p) * g:
return True
elif p_neg is not None and self._range_size(p_neg) <= self._range_size(p):
return False
elif p_pos is not None and self._range_size(p) <= self._range_size(p_pos):
return True
else:
return None
p_pos: Optional[PartialEampParams] = None
p_neg: Optional[PartialEampParams] = None
# error probability of the independent probabilistic execution that have occurred
error_probabilities: List[Fraction] = []
min_model_count = self.min_model_count
max_model_count = self.max_model_count
def get_edge_interval():
if p_pos is not None:
lower_bound = max(int(ceil((self._range_size(p_pos) * g) ** (1 / self.q))), min_model_count)
else:
lower_bound = min_model_count
if p_neg is not None:
upper_bound = min(int(floor((self._range_size(p_neg) * lg) ** (1 / self.q))), max_model_count)
else:
upper_bound = max_model_count
upper_bound = max(lower_bound, upper_bound)
return ProbabilisticInterval(
lower_bound=lower_bound,
upper_bound=upper_bound,
confidence=probability_of_correctness(error_probabilities),
)
def majority_vote_estimate(p: PartialEampParams):
while True:
rf_bmc_task = make_rf_bmc_task(self._make_eamp_params(p))
# copies the required results data in order for it not to be modified while using them
rf_bmc_results: Counter[RfBmcResult] = \
self.store.data.rf_bmc_results_map.get(rf_bmc_task, Counter[RfBmcResult]()).copy()
positive_voters = sum([
count
for result, count in rf_bmc_results.items()
if result.bmc is None
])
negative_voters = sum([
count
for result, count in rf_bmc_results.items()
if result.bmc is not None
])
remaining = max(0, r - (positive_voters + negative_voters))
if positive_voters >= negative_voters and positive_voters >= negative_voters + remaining:
return True, majority_vote_error_probability(Fraction(1, 4), r)
if negative_voters > positive_voters and negative_voters > positive_voters + remaining:
return False, majority_vote_error_probability(Fraction(1, 4), r)
yield SchedulerAlgorithmYield[ProbabilisticInterval](
required_tasks=Counter[Union[RfBmcTask, BmcTask]](remaining * [rf_bmc_task]),
predicted_required_tasks=Counter[Union[RfBmcTask, BmcTask]](),
intermediate_result=get_edge_interval(),
)
p = self._make_initial_partial_eamp_params()
mv_estimate_count = 0
while True:
while pre_estimate(p) is False:
next_p = self._advance_partial_eamp_params(p, False)
if next_p is None:
break
else:
p = next_p
if pre_estimate(p) is False and self._advance_partial_eamp_params(p, False) is None:
break
if mv_estimate_count + 1 > mv_estimate_count_upper_bound and False:
raise RuntimeError(
"Estimate iteration upper bound was incorrect. "
"This error is caused by an incorrect implementation "
"of _get_upper_bound_on_estimate_iteration_count."
)
else:
mv_estimate_count += 1
mv_estimate, mv_error_prob = yield from majority_vote_estimate(p)
error_probabilities.append(mv_error_prob)
if mv_estimate:
p_pos = p
next_p = self._advance_partial_eamp_params(p, True)
if next_p is None:
if p_neg is None:
raise RuntimeError(
"Iteration procedure cannot be terminated when no negative estimate result has been found. "
"This error is caused by an incorrect implementation of _advance_partial_eamp_params."
)
else:
break
else:
p = next_p
else:
p_neg = p
next_p = self._advance_partial_eamp_params(p, False)
if next_p is None:
break
else:
p = next_p
if p_pos is None:
s = self._get_required_minimal_min_model_count_when_no_lower_bound_could_be_established()
if min_model_count < s:
bmc_task_result: Optional[Tuple[BmcTask, BmcResult]] = self.store.data.bmc_task_result
while bmc_task_result is None or bmc_task_result[0].a < s:
yield SchedulerAlgorithmYield[ProbabilisticInterval](
required_tasks=Counter[Union[RfBmcTask, BmcTask]]([BmcTask(a=s)]),
predicted_required_tasks=Counter[Union[RfBmcTask, BmcTask]](),
intermediate_result=get_edge_interval(),
)
bmc_task_result = self.store.data.bmc_task_result
if bmc_task_result[1].bmc is not None and bmc_task_result[1].bmc < s:
return ProbabilisticInterval(
lower_bound=bmc_task_result[1].bmc,
upper_bound=bmc_task_result[1].bmc,
confidence=Fraction(1),
)
else:
min_model_count = s
return get_edge_interval()
def _run_algorithm(self):
yield from self._run_algorithm_once()
# second iteration ensures updated results are used
return (yield from self._run_algorithm_once())
@staticmethod
def get_g_and_lg(a: int) -> Tuple[float, float]:
"""
Returns the internal parameters g and G for the given a.
"""
return (sqrt(a + 1) - 1) ** 2, (sqrt(a + 1) + 1) ** 2
@staticmethod
@abstractmethod
def get_upper_bound_for_multiplicative_gap_of_result(a: int, q: int) -> float:
"""
Returns an upper bound on the multiplicative gap of the final edge interval returned
by the eamp edge scheduler.
"""
raise NotImplementedError() | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/eamp/eamp_edge_scheduler_base.py | 0.934073 | 0.505432 | eamp_edge_scheduler_base.py | pypi |
import random
from enum import Enum, unique
from math import prod, log2, ceil
from typing import NamedTuple, Tuple, Any, List
from rfb_mc.restrictive_formula_module import RestrictiveFormulaModule
from rfb_mc.types import Params
@unique
class EampTransformMethod(Enum):
SORTED_ROLLING_WINDOW = "SRW"
EampParams = NamedTuple("EampParams", [
("c", Tuple[int, ...]),
("p", Tuple[int, ...]),
("transform_method", EampTransformMethod),
])
EampParamProperties = NamedTuple("EampParamProperties", [
("range_size", int),
])
EampInstanceParams = NamedTuple("EampInstanceParams", [
("params", EampParams),
("coefficients", Tuple[Tuple[Tuple[Tuple[int, ...], int, int], ...], ...]),
("p", Tuple[int, ...]),
])
class EampRfm(RestrictiveFormulaModule[EampParams, EampParamProperties, EampInstanceParams]):
@classmethod
def get_guid(cls):
return "eamp-rfm"
@classmethod
def encode_restrictive_formula_params(
cls,
params: EampParams,
) -> Any:
return (
params.c,
params.p,
params.transform_method.value
)
@classmethod
def decode_restrictive_formula_params(
cls,
params: Any,
) -> EampParams:
c, p, transform_method = params
return EampParams(
c=c,
p=p,
transform_method=EampTransformMethod(transform_method)
)
@classmethod
def get_restrictive_formula_properties(
cls,
params: Params,
restrictive_formula_params: EampParams,
) -> EampParamProperties:
return EampParamProperties(
range_size=get_range_size(restrictive_formula_params.c, restrictive_formula_params.p)
)
@classmethod
def generate_restrictive_formula_instance_params(
cls,
params: Params,
restrictive_formula_params: EampParams,
q: int,
) -> EampInstanceParams:
variables: List[int] = []
for size in sorted(params.bit_width_counter.keys()):
# add amount of variables with size q-times as they are cloned q-times
variables += [size] * params.bit_width_counter[size] * q
def get_slice_count_sorted_rolling_window(domain_bit_count: int) -> int:
slice_count = 0
queue = sorted(variables)
while len(queue) > 0:
x = queue.pop(0)
if x >= domain_bit_count:
for i in range(x // domain_bit_count):
slice_count += 1
if (x // domain_bit_count) * domain_bit_count != x:
slice_count += 1
else:
slice_item = [x]
while len(queue) > 0 and sum([y for y in slice_item]) + queue[0] <= domain_bit_count:
slice_item.append(queue.pop(0))
slice_count += 1
return slice_count
def get_slice_count(domain_bit_count: int) -> int:
if restrictive_formula_params.transform_method == EampTransformMethod.SORTED_ROLLING_WINDOW:
return get_slice_count_sorted_rolling_window(domain_bit_count)
else:
raise RuntimeError(f"Not implemented transform method {restrictive_formula_params.transform_method}")
def generate_coefficients(j: int) -> Tuple[Tuple[int, ...], int, int]:
pj = restrictive_formula_params.p[j]
return (
tuple([
random.randint(0, pj - 1) for _ in range(
get_slice_count(
int(ceil(log2(pj)))
)
)
]),
random.randint(0, pj - 1),
random.randint(0, pj - 1),
)
return EampInstanceParams(
params=restrictive_formula_params,
coefficients=tuple([
tuple([
generate_coefficients(j) for _ in range(restrictive_formula_params.c[j])
]) for j in range(len(restrictive_formula_params.c))
]),
p=restrictive_formula_params.p,
)
def get_range_size(c: Tuple[int, ...], p: Tuple[int, ...]) -> int:
"""
Returns the size of the range of the hash family for
the given c and p parameters.
"""
return prod([p[i] ** c[i] for i in range(len(c))]) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/eamp/eamp_rfm.py | 0.73077 | 0.373504 | eamp_rfm.py | pypi |
import z3
from math import log2, ceil
from typing import Type, List, Tuple
from rfb_mc.component.eamp.eamp_rfm import EampInstanceParams, EampRfm, EampTransformMethod
from rfb_mc.component.runner_z3 import RfmiGenerationArgsZ3
from rfb_mc.restrictive_formula_module_implementation import RestrictiveFormulaModuleImplementation
from rfb_mc.types import Params
class EampRfmiZ3(RestrictiveFormulaModuleImplementation[EampInstanceParams, RfmiGenerationArgsZ3, z3.BoolRef]):
@classmethod
def get_restrictive_formula_module(cls) -> Type[EampRfm]:
return EampRfm
@classmethod
def generate_restrictive_formula(
cls,
params: Params,
instance_params: EampInstanceParams,
args: RfmiGenerationArgsZ3,
) -> z3.BoolRef:
def get_slices_sorted_rolling_window(domain_bit_count: int) -> List[z3.BitVecRef]:
slices = []
queue = sorted(args.variables, key=lambda x: x.size())
while len(queue) > 0:
x = queue.pop(0)
if x.size() >= domain_bit_count:
for i in range(x.size() // domain_bit_count):
slices.append(
z3.Extract(i * domain_bit_count + domain_bit_count - 1, i * domain_bit_count, x)
)
if (x.size() // domain_bit_count) * domain_bit_count != x.size():
rem_slice_size = x.size() % domain_bit_count
slices.append(
z3.ZeroExt(
domain_bit_count - rem_slice_size,
z3.Extract(x.size() - 1, x.size() - rem_slice_size, x)
)
)
else:
slice_item = [x]
while len(queue) > 0 and sum([y.size() for y in slice_item]) + queue[0].size() <= domain_bit_count:
slice_item.append(queue.pop(0))
slices.append(z3.Concat(slice_item) if len(slice_item) > 1 else slice_item[0])
return slices
def get_slices(domain_bit_count: int) -> List[z3.BitVecRef]:
if instance_params.params.transform_method == EampTransformMethod.SORTED_ROLLING_WINDOW:
return get_slices_sorted_rolling_window(domain_bit_count)
else:
raise RuntimeError(f"Not implemented transform method {instance_params.params.transform_method}")
def make_hash_equation(pj: int, var_b: Tuple[int, ...], b1: int, b2: int):
domain_bit_count = int(ceil(log2(pj)))
bc = int(ceil(log2(pj + 1)))
slices = get_slices(domain_bit_count)
return z3.URem(
z3.Sum([
z3.ZeroExt(bc - s.size(), s) * z3.BitVecVal(var_b[i], bc) for i, s in enumerate(slices)
]) + z3.BitVecVal(b1, bc),
z3.BitVecVal(pj, bc)
) == z3.BitVecVal(b2, bc)
return z3.And([
make_hash_equation(instance_params.p[j], var_b, b1, b2)
for j in range(len(instance_params.p))
for var_b, b1, b2 in instance_params.coefficients[j]
]) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/eamp/eamp_rfmi_z3.py | 0.719384 | 0.452173 | eamp_rfmi_z3.py | pypi |
from fractions import Fraction
from functools import lru_cache
from math import sqrt, prod, log2, ceil, floor, log
from typing import Tuple, Optional, List, Union
from rfb_mc.component.eamp.eamp_edge_scheduler_base import EampEdgeSchedulerBase
from rfb_mc.component.eamp.primes import get_lowest_prime_above_or_equal_power_of_power_of_two
from rfb_mc.component.eamp.eamp_rfm import EampParams, EampTransformMethod
from rfb_mc.store import Store
class EampEdgeScheduler(EampEdgeSchedulerBase[Tuple[int, List[int]]]):
def __init__(
self,
store: Store,
confidence: Union[Fraction, float],
a: int,
q: int,
min_model_count: Optional[int] = None,
max_model_count: Optional[int] = None,
):
super().__init__(store, confidence, a, q, min_model_count, max_model_count)
self._cn: int = int(
floor(log2(log2(self.max_model_count ** self.q / self.lg) + 1) + 1)
) if self.max_model_count ** self.q / self.lg >= 1 else 1
self._p: Tuple[int, ...] = tuple([
get_lowest_prime_above_or_equal_power_of_power_of_two(j)
for j in range(self._cn)
])
@lru_cache(1)
def _get_upper_bound_on_estimate_iteration_count(self) -> int:
# maximum amount of values that need to be iterated for c[0]
max_c0 = int(ceil(max([
log2(self._p[i] / prod([self._p[j] for j in range(1, i)]))
for i in range(1, self._cn)
]))) - 1 if self._cn > 1 else 1
# maximum amount of required estimate iterations
return self._cn - 1 + max_c0
@lru_cache(1)
def _get_required_minimal_min_model_count_when_no_lower_bound_could_be_established(self):
return int(ceil(self.g ** (1 / self.q)))
def _make_eamp_params(self, partial_eamp_params: Tuple[int, List[int]]) -> EampParams:
j, c = partial_eamp_params
return EampParams(
p=self._p,
c=tuple(c),
transform_method=EampTransformMethod.SORTED_ROLLING_WINDOW,
)
def _make_initial_partial_eamp_params(self) -> Tuple[int, List[int]]:
return self._cn - 1, [0] * (self._cn - 1) + [1]
def _advance_partial_eamp_params(
self,
partial_eamp_params: Tuple[int, List[int]],
estimate_result: bool
) -> Optional[Tuple[int, List[int]]]:
j, c = partial_eamp_params
c_next = c.copy()
if estimate_result is True:
if j == 0:
c_next[0] += 1
return 0, c_next
else:
c_next[j - 1] = 1
return j - 1, c_next
else:
if j == 0:
return None
else:
c_next[j] = 0
c_next[j - 1] = 1
return j - 1, c_next
@staticmethod
def get_q_for_fixed_a_that_ensures_upper_bound_for_multiplicative_gap_of_result(
a: int,
epsilon: float,
) -> int:
"""
Returns the minimal parameter q that ensures that for the given a we have,
get_upper_bound_for_multiplicative_gap_of_result(a, q) <= (1 + epsilon) ** 2.
That condition is equivalent to the statement that the geometric mean of the final edge interval
is a multiplicative approximation with error epsilon i.e.
model_count / (1 + epsilon) <= geometric_mean <= model_count * (1 + epsilon).
"""
g, lg = EampEdgeScheduler.get_g_and_lg(a)
return int(ceil(0.5 * log(2 * lg / g, 1 + epsilon)))
@staticmethod
def get_a_for_fixed_q_that_ensures_upper_bound_for_multiplicative_gap_of_result(
q: int,
epsilon: float,
) -> int:
"""
Returns the minimal parameter a that ensures that for the given q we have,
get_upper_bound_for_multiplicative_gap_of_result(a, q) <= (1 + epsilon) ** 2.
That condition is equivalent to the statement that the geometric mean of the final edge interval
is a multiplicative approximation with error epsilon i.e.
model_count / (1 + epsilon) <= geometric_mean <= model_count * (1 + epsilon).
"""
if 2 ** (1 / q) >= (1 + epsilon) ** 2:
raise ValueError(f"For epsilon={epsilon} and q={q} "
f"i.e. (1 + epsilon) ** 2 = {(1 + epsilon) ** 2}, higher a "
f"values will only be able to converge to {2 ** (1 / q)} thus epsilon "
f"{sqrt(2 ** (1 / q)) - 1}")
# TODO: replace by proper formula
a = 1
while EampEdgeScheduler.get_upper_bound_for_multiplicative_gap_of_result(a, q) > (1 + epsilon) ** 2:
a += 1
return a
@staticmethod
def get_upper_bound_for_multiplicative_gap_of_result(a: int, q: int) -> float:
g, lg = EampEdgeScheduler.get_g_and_lg(a)
return (2 * lg / g) ** (1 / q) | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/eamp/eamp_edge_scheduler.py | 0.904653 | 0.385172 | eamp_edge_scheduler.py | pypi |
from decimal import Decimal
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Iterable, Tuple, TypedDict, Literal, Any
import uuid
from rfb_mc.serialization import SerializedV1StoreData, v1_encode_rf_bmc_task_result, decode_store_data, \
v1_encode_store_data, v1_encode_params, v1_encode_bmc_task_result, SerializedV1RfBmcResultsMap, SerializedV1Params
from rfb_mc.store import Store, StoreData
from rfb_mc.types import RfBmcTask, RfBmcResult, Params, BmcTask, BmcResult
class DynamodbV1StoreItemBmcTask(TypedDict):
a: Decimal
class DynamodbV1StoreItemBmcResult(TypedDict):
bmc: Optional[Decimal]
class DynamodbV1StoreItemBmcTaskResult(TypedDict):
task: DynamodbV1StoreItemBmcTask
result: DynamodbV1StoreItemBmcResult
def v1_convert_dynamodb_store_item_bmc_task_result(
task_result: DynamodbV1StoreItemBmcTaskResult,
) -> Tuple[BmcTask, BmcResult]:
return BmcTask(
a=int(task_result["task"]["a"]),
), BmcResult(
bmc=int(task_result["result"]["bmc"]) if task_result["result"]["bmc"] is not None else None,
)
def v1_convert_bmc_task_result(
task_result: Tuple[BmcTask, BmcResult],
) -> DynamodbV1StoreItemBmcTaskResult:
return DynamodbV1StoreItemBmcTaskResult(
task=DynamodbV1StoreItemBmcTask(
a=Decimal(task_result[0].a),
),
result=DynamodbV1StoreItemBmcResult(
bmc=Decimal(task_result[1].bmc) if task_result[1].bmc is not None else None,
)
)
class DynamodbV1StoreItem(TypedDict):
id: str
version: Literal[1]
params: SerializedV1Params
rf_bmc_results_map: SerializedV1RfBmcResultsMap
bmc_task_result: Optional[DynamodbV1StoreItemBmcTaskResult]
def v1_convert_dynamodb_store_item(item: DynamodbV1StoreItem) -> SerializedV1StoreData:
return SerializedV1StoreData(
version=1,
params=item["params"],
rf_bmc_results_map=item["rf_bmc_results_map"],
bmc_task_result=v1_encode_bmc_task_result(
v1_convert_dynamodb_store_item_bmc_task_result(item["bmc_task_result"])
) if item["bmc_task_result"] is not None else None,
)
def v1_convert_store_data(ident: str, store_data: StoreData) -> DynamodbV1StoreItem:
encoded_store_data = v1_encode_store_data(store_data)
return DynamodbV1StoreItem(
id=ident,
version=encoded_store_data["version"],
params=encoded_store_data["params"],
rf_bmc_results_map=encoded_store_data["rf_bmc_results_map"],
bmc_task_result=v1_convert_bmc_task_result(store_data.bmc_task_result)
if store_data.bmc_task_result is not None else None
)
class DynamodbStore(Store):
VERSION = 1
def __init__(self, table, ident: str):
"""
Initializes a dynamodb store, requires the identifier to point to
an existing store data entry. It modifies the data format if the version is
different as otherwise update methods will throw.
"""
super().__init__(
DynamodbStore.get_and_correct_store_data_entry(table, ident)
)
self.table = table
self.ident = ident
def sync(self):
data = self.get_store_data_entry(self.table, self.ident)[1]
with self.data_lock:
self.data = data
def _add_results(
self,
bmc_task_result: Optional[Tuple[BmcTask, BmcResult]],
rf_bmc_task_results: Iterable[Tuple[RfBmcTask, RfBmcResult]],
):
# dynamodb request to increment rf bmc task result counters
def send_rf_bmc_request(
task_result: Tuple[RfBmcTask, RfBmcResult],
count: int,
):
expression_attribute_names = {
"#task_result": v1_encode_rf_bmc_task_result(task_result),
}
expression_attribute_values = {
":version": DynamodbStore.VERSION,
":inc": count,
}
update_expression = "ADD rf_bmc_results_map.#task_result :inc"
# increments the necessary counters
return self.table.update_item(
Key={"id": self.ident},
UpdateExpression=update_expression,
ConditionExpression="attribute_exists(id) AND version = :version",
ExpressionAttributeValues=expression_attribute_values,
ExpressionAttributeNames=expression_attribute_names,
)
# dynamodb request to update the bmc task result
def send_bmc_request(task_result: Tuple[BmcTask, BmcResult]):
dyn_task_result = v1_convert_bmc_task_result(task_result)
try:
# applies the bmc task result
return self.table.update_item(
Key={"id": self.ident},
UpdateExpression="SET bmc_task_result = :bmc_task_result",
ConditionExpression="attribute_exists(id) AND version = :version AND "
"(attribute_not_exists(bmc_task_result) "
"OR bmc_task_result = :null "
"OR bmc_task_result.task.a <= :bmc_task_a)",
ExpressionAttributeValues={
":null": None,
":version": DynamodbStore.VERSION,
":bmc_task_result": dyn_task_result,
":bmc_task_a": dyn_task_result["task"]["a"],
},
)
except self.table.meta.client.exceptions.ConditionalCheckFailedException:
return None
with ThreadPoolExecutor() as executor:
rf_bmc_task_result_counter = Counter(rf_bmc_task_results)
fs = []
if bmc_task_result:
fs.append(executor.submit(send_bmc_request, bmc_task_result))
for task_result in rf_bmc_task_result_counter:
fs.append(executor.submit(send_rf_bmc_request, task_result, rf_bmc_task_result_counter[task_result]))
for fut in fs:
fut.result()
self.sync()
@classmethod
def get_and_correct_store_data_entry(
cls,
table,
ident: str,
) -> StoreData:
"""
Retrieves the store data and updates the data format if the version is
different.
"""
version, data = DynamodbStore.get_store_data_entry(table, ident)
# ensures the data format is correct in order for class method to
# update the data correctly
if version != cls.VERSION:
cls.replace_store_data_entry(table, ident, data)
return data
@staticmethod
def get_store_data_entry(table: Any, ident: str) -> Tuple[int, StoreData]:
"""
Retrieves the store data entry with the given identifier from
the table and decodes it.
"""
item: DynamodbV1StoreItem = table.get_item(
Key={
"id": ident,
}
)["Item"]
return decode_store_data(v1_convert_dynamodb_store_item(item))
@staticmethod
def replace_store_data_entry(
table,
ident: str,
store_data: StoreData,
):
"""
Removes the store entry and then puts the provided data in the entry.
"""
table.delete_item(
Key={
"id": ident,
},
)
item: DynamodbV1StoreItem = v1_convert_store_data(ident, store_data)
table.put_item(
Item=item
)
@staticmethod
def create_store_data_entry(
table,
params: Params,
ident: Optional[str] = None,
accept_existing: bool = False,
) -> str:
"""
Creates an empty store entry.
If the ident is specified it will be used, otherwise a uuid4 id will be generated.
If accept_existing is True and the ident is specified it will not raise an error if there already
exist a store entry with the given ident.
Note that if ident is not specified, this method will retry until an ident is generated that does not already
exist.
"""
ident_specified = ident is not None
# a generated uuid4 id is highly unlikely to collide with existing ids
ident = ident if ident is not None else str(uuid.uuid4())
item: DynamodbV1StoreItem = DynamodbV1StoreItem(
id=ident,
version=1,
params=v1_encode_params(params),
rf_bmc_results_map={},
bmc_task_result=None,
)
try:
table.put_item(
Item=item,
ConditionExpression="attribute_not_exists(id)",
)
except table.meta.client.exceptions.ConditionalCheckFailedException:
if ident_specified:
if accept_existing:
return ident
else:
raise RuntimeError(f"Store entry with ident \"{ident}\" already exists")
else:
# retry creating a store entry since the id was already generated before
return DynamodbStore.create_store_data_entry(
table, params, None, accept_existing
)
return ident | /rfb_mc-0.0.23.tar.gz/rfb_mc-0.0.23/rfb_mc/component/aws/dynamodb_store.py | 0.84759 | 0.220867 | dynamodb_store.py | pypi |
# Deprecation Warning
<span style="color:red; font-size:4em;">This version of the project has been deprecated.</span> <br/>
<span style="color:green; font-size:4em;">Please use <a href="https://github.com/iluxonchik/rfc-bibtex/">rfc-bibtex</a> instead.</span>
The package has **changed name on PyPi** from `rfc-bibtex` to `rfcbibtex`.
You can install the new version with:
`pip install rfcbibtex`
or
`pipenv install rfcbibtex`
# RFCBibTex
A command line tool that creates `BibTex` entries for IETF `RFC`s and `Internet Drafts`.
It can read the list of `RFC`s and `Internet Drafts` to parse from various sources:
* directly from `.tex` files
* directly from `.aux` files
* from a text file (one ID per line)
* from command-line arguments
Duplicate entires are filtered out.
# Installation/Requirements
You can use `pip` to install this command-line tool:
pip install rfcbibtex
or `pipenv`:
pipenv install rfcbibtex
<span style="color:red;">Please note, that <span style="color:green">rfcbibtex</span> is the new package name on PyPi. The deprecated version used <span style="color:green">rfc-bibtex</span>.</span>
Alternatively, you can clone this repository or download the `rfc-bibtex.py`. This tool has no
external dependencies, so as long as you have `Python 3.x` installed, everything
should work fine.
**Requirements**:
* `Python 3.x`
* internet connection
## Testing
First, install the test dependencies:
`pipenv install --dev`
or
`pip install -r dev-requirements.txt`
and then run:
`python -m unittest discover tests`
from the project root.
# Usage
This tool automates the requests to the `https://datatracker.ietf.org/doc/<id>/<version>/bibtex/` and `https://datatracker.ietf.org/doc/<id>/bibtex/` endpoints.
```
usage: rfcbibtex [-h] [-f FILE_NAME] [-o FILE_NAME]
[inline_args [inline_args ...]]
Generate BibTex entries for IETF RFCs and Internet Drafts. The list of IDs can
be read from a file (including .tex and .aux) or directly from command-line
arguments.
positional arguments:
inline_args list of RFC and/or Internet Draft IDs, in any order.
optional arguments:
-h, --help show this help message and exit
-f FILE_NAME, --file FILE_NAME
read list of RFC and/or Internet Draft IDs from a
file. Supported file formats are the following: .tex,
.aux and .txt (one ID per line). If a file with any
other extension is provided, the tool attempts to read
it as a .txt file, containing one ID per line.
-o FILE_NAME, --output FILE_NAME
output the resulting BibTex to a file
```
## Identifier Format
The identifier format of RFCs is `rfc<rfc_num>`, where `<rfc_num>` is the RFC number.
For example, for the [RFC specifying TLS 1.2](https://tools.ietf.org/html/rfc5246) you
would write `rfc5246` (**NOTE**: the identifiers are case-insensitive, so `RFC5246` and `rFc5246` are also accepted).
The input format of Internet-Drafts(I-Ds) is `draft-<rest>`, where `<rest>` is the rest of
the draft's name ([all Internet-Drafts begin with "draft"](https://www.ietf.org/id-info/guidelines.html#naming)). For example, for the
[TLS 1.3 Draft 21]() you would write `draft-ietf-tls-tls13-21`.
## Read Inputs From Command Line
Example command:
`rfcbibtex RFC5246 draft-ietf-tls-tls13-21`
**Output**:
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
## Read Identifiers From A file
Option: `-f <file_name_1> ... <file_name_N>`
Alternatively, identifiers can be specified in a file. The following input formats are accepted:
* `.tex`: if you specify an `.tex` file, the program will search for \cite{} commands in the file and
extract those that appear to contain RFCs or Internet Drafts identifiers.
* `.aux`: if you specify an `.aux` file, the program will search for \citation{} commands in the file
and extract those that appear to contain RFCs or Internet Drafts identifiers.
* `.txt`: the program defaults to this file type if the file's extension is neither `.tex`, nor `.aux`.
This file type must contain a list of RFCs or Internet Drafts identifiers, one per line.
Please note that the identifiers must be in the format specified in the [Identifier Format](#identifier-format) seciton.
### Reading Identifiers From A .tex File
<a id="tex-file"></a>Consider that you have a file called `rfcs_and_ids.tex` with the following content:
\documentclass{article}
\usepackage[utf8]{inputenc}
\title{This Is A Simple Tex File For The RFCBibtex Project Demo}
\author{Illya Gerasymchuk}
\date{March 2019}
\usepackage{natbib}
\usepackage{graphicx}
\begin{document}
\maketitle
\section{Introduction}
There is nothing special here, nothing fancy, just a document with a few citations, like
\cite{RFC5246} this one. This one here \cite{the-documentary-2005} should not be parsed.
While this one \cite{draft-ietf-tls-tls13-21} should. And finally, let's cite the
TLS 1.3 RFC \cite{RFC8446}. Well, that's it folks. At least for now... This is a very basic
file, just to test if \textbf{the basic} latex parsing is working.
You can find the RFCBitex project here: https://github.com/iluxonchik/rfc-bibtex
\begin{figure}[h!]
\centering
\includegraphics[scale=1.7]{universe}
\caption{The Universe}
\label{fig:universe}
\end{figure}
\section{Conclusion}
As you can see, your .tex file may have various citations, but only the ones that are RFCs
and/or Internet Draft IDs are parsed.
\bibliographystyle{plain}
\bibliography{references}
\end{document}
If you run:
`rfcbibtex -f rfcs_and_ids.tex`
<a id="example-output"></a>You will get the following output:
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
@misc{rfc8446,
series = {Request for Comments},
number = 8446,
howpublished = {RFC 8446},
publisher = {RFC Editor},
doi = {10.17487/RFC8446},
url = {https://rfc-editor.org/rfc/rfc8446.txt},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 160,
year = 2018,
month = aug,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery. This document updates RFCs 5705 and 6066, and obsoletes RFCs 5077, 5246, and 6961. This document also specifies new requirements for TLS 1.2 implementations.},
}
### Reading Identifiers From a .aux File
Consider that you have a file called `rfcs_and_ids.aux` with the following content:
\relax
\citation{RFC5246}
\citation{the-documentary-2005}
\citation{draft-ietf-tls-tls13-21}
\citation{RFC8446}
\bibstyle{plain}
\bibdata{references}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The Universe}}{1}}
\newlabel{fig:universe}{{1}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Conclusion}{1}}
If you run:
`rfcbibtex -f rfcs_and_ids.aux`
You will get the [same output as in the previous section](#example-output).
### Reading Identifiers From a .txt File
Consider that you have a file called `rfcs_and_ids.txt` with the following content:
RFC5246
the-documentary-2005
draft-ietf-tls-tls13-21
RFC8446
If you run:
`rfcbibtex -f rfcs_and_ids.aux`
You will get the [same output as in the previous section](#example-output).
### Combining Multiple Files
You can also combine multiple files with different types. You can even combine files and command line arguments.
Let's assume you have a file called `rfcs.txt` with the following content:
RFC5246
rFc7231
We will also use the [rfcs_and_ids.tex from a previous example](#tex-file). If you run:
`rfcbibtex rfc1234 -f rfcs.txt rfcs_and_ids.tex`
<a id="mixed-files-output"></a>You will get the following output:
@misc{rfc7231,
series = {Request for Comments},
number = 7231,
howpublished = {RFC 7231},
publisher = {RFC Editor},
doi = {10.17487/RFC7231},
url = {https://rfc-editor.org/rfc/rfc7231.txt},
author = {Roy T. Fielding and Julian Reschke},
title = {{Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content}},
pagetotal = 101,
year = 2014,
month = jun,
abstract = {The Hypertext Transfer Protocol (HTTP) is a stateless \textbackslash{}\%application- level protocol for distributed, collaborative, hypertext information systems. This document defines the semantics of HTTP/1.1 messages, as expressed by request methods, request header fields, response status codes, and response header fields, along with the payload of messages (metadata and body content) and mechanisms for content negotiation.},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
@misc{rfc1234,
series = {Request for Comments},
number = 1234,
howpublished = {RFC 1234},
publisher = {RFC Editor},
doi = {10.17487/RFC1234},
url = {https://rfc-editor.org/rfc/rfc1234.txt},
author = {Don Provan},
title = {{Tunneling IPX traffic through IP networks}},
pagetotal = 6,
year = 1991,
month = jun,
abstract = {This memo describes a method of encapsulating IPX datagrams within UDP packets so that IPX traffic can travel across an IP internet. {[}STANDARDS-TRACK{]} This memo defines objects for managing DS1 Interface objects for use with the SNMP protocol. {[}STANDARDS-TRACK{]}},
}
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@misc{rfc8446,
series = {Request for Comments},
number = 8446,
howpublished = {RFC 8446},
publisher = {RFC Editor},
doi = {10.17487/RFC8446},
url = {https://rfc-editor.org/rfc/rfc8446.txt},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 160,
year = 2018,
month = aug,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery. This document updates RFCs 5705 and 6066, and obsoletes RFCs 5077, 5246, and 6961. This document also specifies new requirements for TLS 1.2 implementations.},
}
Note that **duplicate** entries have been removed.
### Output Contents To A File
Option: `-o <file_name>`
Considering `rfcs.txt`and `rfcs_and_ids.tex` from the above above.
If you run:
`rfcbibtex rfc1234 -f rfcs.txt rfcs_and_ids.tex -o output.bib`
A file `output.bib` would be created **or overridden** with the [the same content as in the above output](#mixed-files-output).
## Error Handling and Warning
The tool will print a warning in the following cases:
* no explicit version defined for a draft id
* drafts which have a new draft version update
* drafts which have been assigned an `RFC` number
* invalid identifier name provided as a **command-line argument** (invalid identifier names from files are simply not parsed)
* errors in fetching from URLs
It's important to note, that such errors **DO NOT break the correct functionality of the tool**. Those errors and warnings are printed out,
but **IGNORED**. The generated BibTex files are valid, even when errors are found. Errors and warnings are only printed on the console
(into the standard error output stream) and **never to the output files** (`-o` option).
Here is an example of an output of errors and warnings:
<img src="https://i.imgur.com/1YDLsBN.png" alt="RFCBibTex Errors and Warnings Example" width="50%"> | /rfc-bibtex-0.3.2.tar.gz/rfc-bibtex-0.3.2/README.md | 0.426322 | 0.916147 | README.md | pypi |
from unidecode import unidecode
from num2words import num2words
from .homoclave import Homoclave
from .verification_digit import VerificationDigit
import re
class RFC_PM:
_REGEX_DATE_FORMAT = re.compile(r'^\d{4}-\d{2}-\d{2}$')
_ROMAN_NUMBER_REGEX = "^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$"
_ROMAN_VALUES = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
_JURISTIC_PERSON_TYPE_REGEX = "(^S+\.+N+\.+C+\.)||(^S+\.+C+\.+L+\.)||(^S+\.+C+\.+S+\.)||(^A+\.+C+\.)||(^N+\.+C+\.)||(^S+\.+A+\.)||(^S+\.+C+\.)||(^R+\.+L+\.)||(^C+\.+V+\.)||(^S+\.)||(^R+\.)||(^C+\.)||(^V+\.)||(^L+\.)||(^A+\.)||(^N+\.)||(^P+\.)"
_FORBIDDEN_WORDS = [
"EL",
"LA",
"DE",
"LOS",
"LAS",
"Y",
"DEL",
"MI",
"POR",
"CON",
"AL",
"SUS",
"E",
"PARA",
"EN",
"MC",
"VON",
"MAC",
"VAN",
"COMPANIA",
"CIA",
"CIA.",
"SOCIEDAD",
"SOC",
"SOC.",
"COMPANY",
"CO",
"COOPERATIVA",
"COOP",
"SC",
"SCL",
"SCS",
"SNC",
"SRL",
"CV",
"SA",
"THE",
"OF",
"AND",
"A",
]
def __init__(self, nombre_empresa:str, fecha_constitucion:str) -> None:
if len(nombre_empresa) <= 1:
raise Exception("Error, company name should have at least 2 characters.")
if not self._validate_date_format(fecha_constitucion):
raise Exception("Incorrect foundation date format, should be YYYY-MM-DD")
self.company_name = nombre_empresa.replace(",", " ").upper()
self.day = fecha_constitucion.split("-")[2]
self.month = fecha_constitucion.split("-")[1]
self.year = fecha_constitucion.split("-")[0]
self.homoclave = Homoclave()
self.verification_digit = VerificationDigit()
def _validate_date_format(self,date_text) -> bool:
if self._REGEX_DATE_FORMAT.match(date_text):
return True
return False
def generate(self) -> str:
words = self.company_name.replace(",", "").split(" ")
words = list(map(self._normalize, words))
words = list(map(self._ignoreJuristicPersonTypeAbbreviations, words))
words = self._remove_empty_words(words)
words = list(map(self._ignoreForbiddenWords, words))
words = self._remove_empty_words(words)
words = list(map(self._markOneLetterAbbreviations, words))
words = self._remove_empty_words(words)
words = list(map(self._expandSpecialCharactersInSingletonWord, words))
words = self._remove_empty_words(words)
words = self._splitOneLetterAbbreviations(words)
words = self._remove_empty_words(words)
words = list(map(self._ignoreSpecialCharactersInWords, words))
words = self._remove_empty_words(words)
words = self._expandArabicNumerals(words)
words = self._remove_empty_words(words)
words = list(map(self._expandRomanNumerals, words))
words = self._remove_empty_words(words)
three_digit_code = self._threeDigitsCode(words)
foundation_code = self._foundationCode()
homoclave = self.homoclave.calculate(" ".join(words))
verification_digit = self.verification_digit.calculate(
three_digit_code + foundation_code + homoclave
)
return three_digit_code + foundation_code + homoclave + verification_digit
def _normalize(self, word) -> str:
return unidecode(word).upper().strip() if len(word) > 0 else word
def _ignoreJuristicPersonTypeAbbreviations(self, word) -> str:
return re.sub(self._JURISTIC_PERSON_TYPE_REGEX, "", word).strip()
def _remove_empty_words(self, words) -> list:
return [word for word in words if len(word) > 0]
def _ignoreForbiddenWords(self, word) -> str:
if word in self._FORBIDDEN_WORDS:
return ""
return word
def _markOneLetterAbbreviations(self, word) -> str:
return re.sub("^([^.])\\.", r"\1.AABBRREEVVIIAATTIIOONN", word)
def _expandSpecialCharactersInSingletonWord(self, word) -> str:
if len(word) == 1:
return (
word.replace("@", "ARROBA")
.replace("´", "APOSTROFE")
.replace("%", "PORCIENTO")
.replace("#", "NUMERO")
.replace("!", "ADMIRACION")
.replace(".", "PUNTO")
.replace("$", "PESOS")
.replace('"', "COMILLAS")
.replace("-", "GUION")
.replace("/", "DIAGONAL")
.replace("+", "SUMA")
.replace("(", "ABRE PARENTESIS")
.replace(")", "CIERRA PARENTESIS")
)
return word
def _ignoreSpecialCharactersInWords(self, word) -> str:
return re.sub('(.+?)[@´%#!.$"-/+\\(\\)](.+?)', r"\1\2", word)
def _splitOneLetterAbbreviations(self, words) -> list:
temp_words = "**********SPLIT**********".join(words)
temp_words = temp_words.split("AABBRREEVVIIAATTIIOONN")
final_words = list()
for word in temp_words:
final_words += word.split("**********SPLIT**********")
return final_words
def _expandArabicNumerals(self, words) -> str:
final_words = list()
for word in words:
if re.match("[0-9]+", word):
number = self._normalize(num2words(word, lang="es"))
final_words += number.split(" ")
else:
final_words.append(word)
return final_words
def _expandRomanNumerals(self, word) -> str:
if re.match(self._ROMAN_NUMBER_REGEX, word):
return self._romanToInt(word)
return word
def _romanToInt(self, word) -> int:
int_val = 0
for i in range(len(word)):
if i > 0 and self._ROMAN_VALUES[word[i]] > self._ROMAN_VALUES[word[i - 1]]:
int_val += (
self._ROMAN_VALUES[word[i]] - 2 * self._ROMAN_VALUES[word[i - 1]]
)
else:
int_val += self._ROMAN_VALUES[word[i]]
return int_val
def _threeDigitsCode(self, words) -> str:
if len(words) >= 3:
return words[0][0] + words[1][0] + words[2][0]
elif len(words) == 2:
return words[0][0] + words[1][0:2]
return self._firstThreeCharactersWithRightPad(words[0])
def _firstThreeCharactersWithRightPad(self, word) -> str:
if len(word) >= 3:
return word[0:3]
return word.ljust(3, "X")
def _foundationCode(self) -> str:
return (
self._lastTwoDigitsOf(self.year)
+ self._formattedInTwoDigits(self.month)
+ self._formattedInTwoDigits(self.day)
)
def _lastTwoDigitsOf(self, number) -> str:
return number[-2:]
def _formattedInTwoDigits(self, number) -> str:
return number.rjust(2, "0") | /rfc_generator-0.0.13-py3-none-any.whl/rfc_generator/src/rfc_pm.py | 0.501953 | 0.281572 | rfc_pm.py | pypi |
from unidecode import unidecode
class Homoclave:
_FULL_NAME_MAPPING = {
" ": "00",
"0": "00",
"1": "01",
"2": "02",
"3": "03",
"4": "04",
"5": "05",
"6": "06",
"7": "07",
"8": "08",
"9": "09",
"&": "10",
"A": "11",
"B": "12",
"C": "13",
"D": "14",
"E": "15",
"F": "16",
"G": "17",
"H": "18",
"I": "19",
"J": "21",
"K": "22",
"L": "23",
"M": "24",
"N": "25",
"O": "26",
"P": "27",
"Q": "28",
"R": "29",
"S": "32",
"T": "33",
"U": "34",
"V": "35",
"W": "36",
"X": "37",
"Y": "38",
"Z": "39",
"Ñ": "40",
}
_HOMOCLAVE_DIGITS = "123456789ABCDEFGHIJKLMNPQRSTUVWXYZ"
def calculate(self, fullname) -> str:
full_name = self._normalizeFullName(fullname)
mapped_full_name = self._mapFullNameToDigitsCode(full_name)
pairs_of_digits_sum = self._sumPairsOfDigits(mapped_full_name)
homoclave = self._buildHomoclave(pairs_of_digits_sum)
return homoclave
def _normalizeFullName(self, fullname) -> str:
full_name = fullname.upper()
full_name = self._normalize(full_name)
full_name = (
full_name.replace(",", "")
.replace(".", "")
.replace("'", "")
.replace("-", "")
)
full_name = self._addMissingCharToFullName(full_name, "Ñ")
return full_name
def _normalize(self, string) -> str:
return unidecode(string)
def _addMissingCharToFullName(self, full_name, missing_char) -> str:
index = full_name.find(missing_char)
if index == -1:
return full_name
while index >= 0:
full_name[index] = missing_char
index = full_name.find(missing_char)
return str(full_name)
def _mapFullNameToDigitsCode(self, full_name) -> str:
mapped_full_name = "0"
for i in range(len(full_name)):
mapped_full_name += self._mapCharacterToTwoDigitCode(full_name[i])
return mapped_full_name
def _mapCharacterToTwoDigitCode(self, c) -> str:
if c not in self._FULL_NAME_MAPPING:
raise BaseException("No two-digit-code mapping for char: " + c)
else:
return self._FULL_NAME_MAPPING[c]
def _sumPairsOfDigits(self, mapped_full_name) -> int:
pairs_of_digits_sum = 0
for i in range(len(mapped_full_name) - 1):
int_num1 = int(mapped_full_name[i : i + 2])
int_num2 = int(mapped_full_name[i + 1 : i + 2])
pairs_of_digits_sum += int_num1 * int_num2
return pairs_of_digits_sum
def _buildHomoclave(self, pairs_of_digits_sum) -> str:
last_three_digits = pairs_of_digits_sum % 1000
quo = int(last_three_digits / 34)
reminder = int(last_three_digits % 34)
homoclave = str(self._HOMOCLAVE_DIGITS[quo]) + str(
self._HOMOCLAVE_DIGITS[reminder]
)
return homoclave | /rfc_generator-0.0.13-py3-none-any.whl/rfc_generator/src/homoclave.py | 0.416915 | 0.451508 | homoclave.py | pypi |
from typing import Callable, Dict, List
from .methods import REGISTERED_METHODS
class ValidatorUi:
def status(self, message: str) -> None:
pass
def skip(self, subject: str, message: str) -> None:
pass
def success(self, subject: str, message: str) -> None:
pass
def error(self, subject: str, message: str) -> None:
pass
def fatal_error(self, message: str) -> None:
pass
class RfcHttpValidator:
def __init__(self, typemap: Dict[str, Callable], ui: ValidatorUi):
self.typemap = typemap
self.ui = ui
self.location: Callable[..., str] = None
def validate(self, http_message: str, location: Callable[..., str]) -> None:
self.location = location
lines = http_message.strip("\n").split("\n")
if len(lines) == 0:
self.ui.error(self.location(), "Empty http-message")
return
lines = self.combine_8792(lines)
skip_lines = self.check_start_line(lines[0])
try:
headers = self.combine_headers(lines[skip_lines:])
except ValueError as why:
self.ui.error(self.location(), str(why))
return
for hname, hvalue in headers.items():
header_type = self.typemap.get(hname)
if header_type:
subject = f"{hname}: {hvalue}"
try:
header_type().parse(hvalue.encode("ascii"))
self.ui.success(self.location(subject), "valid")
except ValueError as why:
self.ui.error(self.location(subject), str(why))
else:
self.ui.skip(self.location(hname), "no type information")
def check_start_line(self, start_line: str) -> int:
if start_line[0].isspace():
self.ui.error(
self.location(start_line), "Start line starts with whitespace"
)
return 0
parts = start_line.split(" ")
if parts[0][-1] == ":":
return 0 # it must be a header line
if "http" in parts[0].lower():
if parts[0] != "HTTP/1.1":
self.ui.error(
self.location(start_line),
"Status line doesn't start with 'HTTP/1.1'",
)
elif len(parts) < 3:
self.ui.error(
self.location(),
f"Status line '{start_line}' isn't 'HTTP/1.1 [status_code] [status_phrase]'",
)
else:
if not parts[1].isdigit():
self.ui.error(self.location(parts[1]), "Non-numeric status code")
elif not 99 < int(parts[1]) < 600:
self.ui.error(self.location(parts[1]), "Status code out of range")
else:
if len(parts) < 3:
self.ui.error(
self.location(), "Request line isn't '[method] [url] HTTP/1.1'"
)
else:
if parts[0] not in REGISTERED_METHODS:
self.ui.error(self.location(parts[0]), "Method not recognised")
if parts[2] != "HTTP/1.1":
self.ui.error(
self.location(),
f"Request line '{start_line}' doesn't end with 'HTTP/1.1'",
)
if len(parts) > 3:
self.ui.error(
self.location(start_line), "Request line has extra text"
)
return 1
def combine_8792(self, lines: List[str]) -> List[str]:
if not "NOTE: '\\' line wrapping per RFC 8792" in lines[0]:
return lines
lines = lines[2:]
output = [] # type: List[str]
continuation = False
for line in lines:
prev_continuation = continuation
if line.endswith("\\"):
continuation = True
line = line[:-1]
else:
continuation = False
if prev_continuation:
output[-1] += line.lstrip()
else:
output.append(line)
return output
def combine_headers(self, lines: List[str]) -> Dict[str, str]:
headers = {} # type: Dict[str, str]
prev_name: str = None
in_body = False
for line in lines:
if len(line.strip()) == 0:
if not headers: # a blank line before seeing any headers
raise ValueError("Body without headers")
in_body = True
if in_body:
continue
if line[0] == " ":
if prev_name:
headers[prev_name] += f" {line.strip()}"
continue
raise ValueError(
f"First header field line '{line}' starts with whitespace"
)
try:
name, value = line.split(":", 1)
except ValueError as why:
raise ValueError(f"Non-field line '{line}' in content") from why
if " " in name:
self.ui.error(self.location(name), "Whitespace in field name")
name = name.lower()
value = value.strip()
if name in headers:
headers[name] += f", {value}"
else:
headers[name] = value
prev_name = name
return headers | /rfc-http-validate-0.3.3.tar.gz/rfc-http-validate-0.3.3/rfc_http_validate/validate.py | 0.538983 | 0.234319 | validate.py | pypi |
typemap = {
# Compatible Fields
"accept": "list",
"accept-encoding": "list",
"accept-language": "list",
"accept-patch": "list",
"accept-post": "list",
"accept-ranges": "list",
"access-control-allow-credentials": "item",
"access-control-allow-headers": "list",
"access-control-allow-methods": "list",
"access-control-allow-origin": "item",
"access-control-expose-headers": "list",
"access-control-max-age": "item",
"access-control-request-headers": "list",
"access-control-request-method": "item",
"age": "item",
"allow": "list",
"alpn": "list",
"alt-svc": "dict",
"alt-used": "item",
"cache-control": "dict",
"cdn-loop": "list",
"clear-site-data": "list",
"connection": "list",
"content-encoding": "list",
"content-language": "list",
"content-length": "list",
"content-type": "item",
"cross-origin-resource-policy": "item",
"expect": "dict",
"expect-ct": "dict",
"host": "item",
"keep-alive": "dict",
"max-forwards": "item",
"origin": "item",
"pragma": "dict",
"prefer": "dict",
"preference-applied": "dict",
"retry-after": "item",
"sec-websocket-extensions": "list",
"sec-websocket-protocol": "list",
"sec-websocket-version": "item",
"server-timing": "list",
"surrogate-control": "dict",
"te": "list",
"timing-allow-origin": "list",
"trailer": "list",
"transfer-encoding": "list",
"vary": "list",
"x-content-type-options": "item",
"x-frame-options": "item",
"x-xss-protection": "list",
# Mapped Fields
"sf-content-location": "item",
"sf-cookie": "list",
"sf-date": "item",
"sf-etag": "item",
"sf-expires": "item",
"sf-if-match": "list",
"sf-if-modified-since": "item",
"sf-if-none-match": "list",
"sf-if-unmodified-since": "item",
"sf-link": "list",
"sf-last-modified": "item",
"sf-location": "item",
"sf-referer": "item",
"sf-set-cookie": "list",
# Defined elsewhere
"accept-ch": "list",
"cache-status": "list",
"cdn-cache-control": "dict",
"cross-origin-embedder-policy": "item",
"cross-origin-embedder-policy-report-only": "item",
"cross-origin-opener-policy": "item",
"cross-origin-opener-policy-report-only": "item",
"origin-agent-cluster": "item",
"priority": "dict",
"proxy-status": "list",
} | /rfc-http-validate-0.3.3.tar.gz/rfc-http-validate-0.3.3/rfc_http_validate/retrofit.py | 0.450601 | 0.277999 | retrofit.py | pypi |
from __future__ import absolute_import
from six.moves import range
import collections
"""
def py_zeros(dim, pytype):
assert len(dim) == 2
return [[pytype for y in range(dim[1])]
for x in range(dim[0])]
"""
try:
from editdist import distance as strdist
except ImportError:
def strdist(a, b):
if a == b:
return 0
else:
return 1
class EditItem(object):
__slots__ = ['operation', 'left', 'right', 'cost']
OP_INSERT = 1
OP_DELETE = 2
OP_RENAME = 3
OP_MATCH = 4
OP_COMBINE = 5
def __init__(self):
self.cost = 0
self.operation = 0
self.left = None
self.right = None
pass
def reset(self):
self.operation = 0
self.cost = 0
self.left = None
self.right = None
def setOperation(self, operation, left, right):
self.operation = operation
self.left = left
self.right = right
self.cost = 0
if left is not None and isinstance(left, EditItem):
self.cost += left.cost
if right is not None and isinstance(right, EditItem):
self.cost += right.cost
def clone(self):
cloneOut = EditItem()
returnValue = cloneOut
cloneOut.operation = self.operation
cloneOut.cost = self.cost
stack = []
if self.left is not None:
stack.append((1, cloneOut, self))
if self.right is not None:
stack.append((0, cloneOut, self))
while len(stack) > 0:
side, node, node1 = stack.pop()
xx = node1.left if side else node1.right
if xx is not None:
if isinstance(xx, EditItem):
if xx.operation == 0:
continue
if xx.operation == EditItem.OP_COMBINE:
newNode = EditItem()
newNode.operation = xx.operation
newNode.cost = xx.cost
if xx.left is not None:
stack.append((1, newNode, xx))
if xx.right is not None:
stack.append((0, newNode, xx))
else:
newNode = xx
else:
newNode = xx
if side:
node.left = newNode
else:
node.right = newNode
return returnValue
def toList(self):
list = []
self._toList(list)
return list
def _toList(self, list):
if self.operation != EditItem.OP_COMBINE:
list.append(self)
return
if self.left is not None and isinstance(self.left, EditItem):
self.left._toList(list)
if self.right is not None and isinstance(self.right, EditItem):
self.right._toList(list)
def toString(self):
left = ""
right = ""
if self.left:
left = "left= {:>3}".format(self.left.index)
if self.right:
right = " right= {:>3}".format(self.right.index)
txt = "OP={0} {1:>9}{2}".format(str(self.operation), left, right)
return txt
class AnnotatedTree(object):
def __init__(self, root, get_children):
self.get_children = get_children
self.root = root
self.nodes = list() # a post-order enumeration of the nodes in the tree
self.ids = list() # a matching list of ids
self.lmds = list() # left most descendents
self.keyroots = None
# k and k' are nodes specified in the post-order enumeration.
# keyroots = {k | there exists no k'>k such that lmd(k) == lmd(k')}
# see paper for more on keyroots
stack = list()
pstack = list()
stack.append((root, collections.deque()))
j = 0
while len(stack) > 0:
n, anc = stack.pop()
nid = j
for c in self.get_children(n):
a = collections.deque(anc)
a.appendleft(nid)
stack.append((c, a))
pstack.append(((n, nid), anc))
j += 1
lmds = dict()
keyroots = dict()
i = 0
while len(pstack) > 0:
(n, nid), anc = pstack.pop()
# print list(anc)
self.nodes.append(n)
self.ids.append(nid)
# print n.label, [a.label for a in anc]
if not self.get_children(n):
lmd = i
for a in anc:
if a not in lmds:
lmds[a] = i
else:
break
else:
try:
lmd = lmds[nid]
except Exception:
import pdb
pdb.set_trace()
self.lmds.append(lmd)
keyroots[lmd] = i
i += 1
self.keyroots = sorted(keyroots.values())
def distance(A, B, get_children, insert_cost, remove_cost, update_cost):
'''Computes the exact tree edit distance between trees A and B with a
richer API than :py:func:`zss.simple_distance`.
Use this function if either of these things are true:
* The cost to insert a node is **not** equivalent to the cost of changing
an empty node to have the new node's label
* The cost to remove a node is **not** equivalent to the cost of changing
it to a node with an empty label
Otherwise, use :py:func:`zss.simple_distance`.
:param A: The root of a tree.
:param B: The root of a tree.
:param get_children:
A function ``get_children(node) == [node children]``. Defaults to
:py:func:`zss.Node.get_children`.
:param insert_cost:
A function ``insert_cost(node) == cost to insert node >= 0``.
:param remove_cost:
A function ``remove_cost(node) == cost to remove node >= 0``.
:param update_cost:
A function ``update_cost(a, b) == cost to change a into b >= 0``.
:return: An integer distance [0, inf+)
'''
A, B = AnnotatedTree(A, get_children), AnnotatedTree(B, get_children)
# treedists = zeros((len(A.nodes), len(B.nodes)), int)
treedists = [[None for x in range(len(B.nodes))] for y in range(len(A.nodes))]
fd = [[EditItem() for x in range(len(B.nodes)+2)] for y in range(len(A.nodes)+2)]
A_remove = [remove_cost(A.nodes[i]) for i in range(len(A.nodes))]
B_insert = [insert_cost(B.nodes[i]) for i in range(len(B.nodes))]
def treedist(i, j):
Al = A.lmds
Bl = B.lmds
An = A.nodes
Bn = B.nodes
m = i - Al[i] + 2
n = j - Bl[j] + 2
for x in range(1, m):
fd1 = fd[x]
for y in range(1, n):
fd1[y].reset()
ioff = Al[i] - 1
joff = Bl[j] - 1
for x in range(1, m): # δ(l(i1)..i, θ) = δ(l(1i)..1-1, θ) + γ(v → λ)
fd[x][0].setOperation(EditItem.OP_COMBINE, fd[x-1][0], A_remove[x+ioff])
fd1 = fd[0]
for y in range(1, n): # δ(θ, l(j1)..j) = δ(θ, l(j1)..j-1) + γ(λ → w)
fd1[y].setOperation(EditItem.OP_COMBINE, fd1[y-1], B_insert[y+joff])
for x in range(1, m): # the plus one is for the xrange impl
fd1 = fd[x]
fdm1 = fd[x-1]
x_ioff = x + ioff
t = Al[i] == Al[x_ioff]
remove = A_remove[x_ioff]
treedists_1 = treedists[x_ioff]
left = An[x_ioff]
p = Al[x_ioff]-1-ioff
fd_p = fd[p]
y_joff = joff
for y in range(1, n):
y_joff += 1
# only need to check if x is an ancestor of i
# and y is an ancestor of j
if t and Bl[j] == Bl[y_joff]:
# +-
# | δ(l(i1)..i-1, l(j1)..j) + γ(v → λ)
# δ(F1 , F2 ) = min-+ δ(l(i1)..i , l(j1)..j-1) + γ(λ → w)
# | δ(l(i1)..i-1, l(j1)..j-1) + γ(v → w)
# +-
insert = B_insert[y_joff]
update = update_cost(left, Bn[y_joff])
op1Cost = fdm1[y].cost + remove.cost
op2Cost = fd1[y-1].cost + insert.cost
op3Cost = fdm1[y-1].cost + update.cost
if op1Cost < op2Cost:
if op1Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fdm1[y], remove)
elif op2Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fd1[y-1], insert)
else:
fd1[y].setOperation(EditItem.OP_COMBINE, fdm1[y-1], update)
else:
if op2Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fd1[y-1], insert)
else:
fd1[y].setOperation(EditItem.OP_COMBINE, fdm1[y-1], update)
# fd[x][y] = min(
# fd[x-1][y] + remove_cost(An[x+ioff]),
# fd[x][y-1] + insert_cost(Bn[y+joff]),
# fd[x-1][y-1] + update_cost(An[x+ioff], Bn[y+joff]),
# )
treedists_1[y_joff] = fd1[y].clone()
else:
# +-
# | δ(l(i1)..i-1, l(j1)..j) + γ(v → λ)
# δ(F1 , F2 ) = min-+ δ(l(i1)..i , l(j1)..j-1) + γ(λ → w)
# | δ(l(i1)..l(i)-1, l(j1)..l(j)-1)
# | + treedist(i1,j1)
# +-
q = Bl[y_joff]-1-joff
insert = B_insert[y_joff]
op1Cost = fdm1[y].cost + remove.cost
op2Cost = fd1[y-1].cost + insert.cost
op3Cost = fd_p[q].cost + treedists_1[y_joff].cost
if op1Cost < op2Cost:
if op1Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fdm1[y], remove)
elif op2Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fd1[y-1], insert)
else:
fd1[y].setOperation(EditItem.OP_COMBINE, fd_p[q], treedists_1[y_joff])
else:
if op2Cost < op3Cost:
fd1[y].setOperation(EditItem.OP_COMBINE, fd1[y-1], insert)
else:
fd1[y].setOperation(EditItem.OP_COMBINE, fd_p[q], treedists_1[y_joff])
# fd[x][y] = min(
# fd[x-1][y] + remove_cost(An[x+ioff]),
# fd[x][y-1] + insert_cost(Bn[y+joff]),
# fd[p][q] + treedists[x+ioff][y+joff]
# )
for i in A.keyroots:
for j in B.keyroots:
treedist(i, j)
return treedists[-1][-1] | /rfc-xmldiff-0.6.0.tar.gz/rfc-xmldiff-0.6.0/xmldiff/zzs.py | 0.596198 | 0.339923 | zzs.py | pypi |
import configparser
import functools
import logging
import os
import re
import shutil
import tarfile
import time
from datetime import datetime, timedelta
import click
import requests
from peewee import IntegrityError
from rfcpy.helpers.config import Config
from rfcpy.models import Data, DataIndex, create_tables, db
logging.basicConfig(level=logging.INFO)
def timer(function):
"""
Timer decorator for return a functions execution in seconds.
Args:
function: function to be timed
Returns: time taken plus the original function executed.
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
t1 = time.time()
result = function(*args, **kwargs)
click.echo(f"Download completed in {time.time() - t1:.2f} seconds")
return result
return wrapper
def get_categories(text):
"""Parse through each text file searching for the IETF's categories.
:arg text: from rfc txt file
:return any matched category, if not found or rfc not does not give a
category return "uncategorised".
"""
header = text[:500]
categories = [
"Standards Track",
"Informational",
"Experimental",
"Historic",
"Best Current Practice",
"Proposed Standard",
"Internet Standard",
]
match = [
x
for x in [re.findall(x.title(), header.title()) for x in categories]
if len(x) > 0
]
try:
return match[0][0]
except IndexError:
return "Uncategorised"
def get_title_list():
"""Parses all the current RFC titles from the rfc-index.txt file allowing
the title to be written to the database easily.
:return list of RFC title information
"""
list_of_titles = []
with open(os.path.join(Config.STORAGE_PATH, "rfc-index.txt"), "r") as f:
f = f.read().strip()
search_regex = "^([\d{1,4}])([^.]*)."
result = re.finditer(search_regex, f, re.M)
for title in result:
list_of_titles.append(title[0])
return list_of_titles
def map_title_from_list(number, title_list):
"""Used during the iterative inserts in fn:write_to_db - if number matches
the number within title_list, write title to db.
:arg number: string containing RFC number
:arg title_list: list of all rfc titles from fn:get_title_list
:returns result: string containing title of RFC.
"""
result = [title for title in title_list if number in title]
if result:
return result[0]
return None
def strip_extensions():
"""Strips away all non txt files from directory listing.
:return clean_list: generator of files with unwanted items removed
"""
# This continually breaks as the IETF adds more file types
# let's look to make it find only postitives (txt) rather
# parse out negatives (non-txt).
_, _, files = next(os.walk(Config.STORAGE_PATH))
dirty_extensions = [
"a.txt",
"rfc-index.txt",
".pdf",
".ps",
".ta",
".html",
".json",
]
clean_list = (x for x in files if not any(xs in x for xs in dirty_extensions))
return clean_list
def remove_rfc_files():
"""Removes of downloaded and unzipped RFC files and folders after being
written to the database."""
shutil.rmtree(Config.STORAGE_PATH)
def sanitize_inputs(inputs):
"""Allows only a-zA-Z0-9 characters as safe for searching the database.
:arg inputs: user provided search string to be sanitized.
:return regex: replace any non-approved chars with ' '.
"""
regex = re.compile("[^a-zA-Z0-9]")
return regex.sub(" ", inputs)
def create_config(testing=False):
"""Create basic config file.
options: 1. Database Name
2. Last Update
"""
if not os.path.exists(Config.ROOT_FOLDER):
os.mkdir(Config.ROOT_FOLDER)
config = configparser.ConfigParser()
config.add_section("Settings")
config.set("Settings", "Database Name", f"{Config.DATABASE_PATH}")
now = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S.%f")
config.set("Settings", "Last Update", f"{now}")
if testing is True:
with open(os.path.join(Config.TESTS_FOLDER, "rfc.cfg"), "w") as config_file:
config.write(config_file)
return
with open(os.path.join(Config.ROOT_FOLDER, Config.CONFIG_FILE), "w") as config_file:
config.write(config_file)
def read_config(testing=False):
"""Check if config file exists, if not create it and prompt user to
download the database.
:return config: config file opened for reading."""
config = configparser.ConfigParser()
config.read(Config.CONFIG_FILE)
if testing is True:
config.read(os.path.join(Config.TESTS_FOLDER, "rfc.cfg"))
return config
if not os.path.exists(Config.CONFIG_FILE):
create_config()
if not os.path.exists(Config.DATABASE_PATH):
first_run_update()
return config
def read_last_conf_update(testing=False):
"""Reads the 'Last Update' value in config file."""
config = read_config(testing)
value = config.get("Settings", "Last Update")
return value
def update_config(testing=False):
"""Updates the Last Update value once a new database has been initialised
after initial install or weekly update.
"""
config = read_config(testing)
config.set("Settings", "Last Update", f"{datetime.utcnow()}")
if testing is True:
with open(os.path.join(Config.TESTS_FOLDER, "rfc.cfg"), "w") as config_file:
config.write(config_file)
return
with open(Config.CONFIG_FILE, "w") as config_file:
config.write(config_file)
def check_last_update():
"""Uses timedelta to see if one week has elapsed since last update,
if so then prompt user to retrieve new list.
"""
last_update = read_last_conf_update()
to_dt = datetime.strptime(last_update, "%Y-%m-%d %H:%M:%S.%f")
week = to_dt + timedelta(weeks=1)
if datetime.utcnow() > week:
ask_user_to_update()
def ask_user_to_update():
"""If update is available, give user the option to download new files."""
print("[!] RFC's are updated weekly [!]")
print("[!] Do you wish to check for updates?")
answer = input("rfc.py ~# [Y/n] ")
if answer == "y" or answer == "Y" or answer == "":
print("updating...")
download_rfc_tar()
uncompress_tar()
write_to_db()
update_config()
def first_run_update():
"""Checks if database and/or config file exists and will ask user to update
based on which variable is missing.
Triggered on one conditions; no config file found. If cfg file is missing
but database is found, user has option to update the database or not.
"""
try:
if not os.path.exists(Config.DATABASE):
print("[!] Database Not Found! [!]")
print("The database will now be setup...")
download_rfc_tar()
uncompress_tar()
write_to_db()
update_config()
except OSError:
raise
@timer
def download_rfc_tar():
"""
Download all RFC's from IETF in a tar.gz for offline sorting.
Download progress is tracked via click.progressbar.
"""
r = requests.get(Config.URL, stream=True)
dl_length = r.headers["Content-Length"]
if r.status_code == 200:
with open(
os.path.join(Config.ROOT_FOLDER, Config.FILENAME), "wb"
) as f, click.progressbar(length=int(dl_length)) as bar:
r.raw.decode_content = True
for chunk in r.iter_content(1024):
f.write(chunk)
bar.update(len(chunk))
print("..\n[*] Download complete [*]")
def uncompress_tar():
"""Uncompress the downloaded tarball into the folder and then delete it."""
if os.path.exists(Config.STORAGE_PATH):
remove_rfc_files()
file_location = os.path.join(Config.ROOT_FOLDER, Config.FILENAME)
print("..uncompressing tar.gz...")
with tarfile.open(os.path.join(Config.ROOT_FOLDER, Config.FILENAME)) as f:
f.extractall(Config.STORAGE_PATH)
os.remove(file_location)
print("..Done!")
def write_to_db():
"""Write the contents of files to sqlite database.
function will run each time the database is updated. Relies on RFC number
as the Primary Key to issue Unique Key Constraint which prohibits duplicate
RFC's being written to DB.
Writes the following to models.Data (and its Virtual Table; DataIndex)
:arg number: RFC number taken from filename <rfc1918.txt>
:arg title: RFC Title taken from rfc-index.txt and mapped against number
:arg text: body of the document parsed for reading in terminal
:arg category: category type taken from document
:arg bookmark: boolean, if bookmarked returns 1 (True), default=0
Removes folder containing all text files post write.
"""
create_tables()
print("..Beginning database writes..")
title_list = get_title_list()
for file in strip_extensions():
with open(os.path.join(Config.STORAGE_PATH, file), errors="ignore") as f:
f = f.read().strip()
try:
number = file.strip(".txt").strip("rfc")
title = map_title_from_list(number, title_list)
body = f
category = get_categories(f)
bookmark = False
with db.atomic():
Data.create(
number=number,
title=title,
text=body,
category=category,
bookmark=bookmark,
)
DataIndex.create(
rowid=number, title=title, text=body, category=category
)
except IntegrityError as e:
logging.debug(f"Integrity Error: {e} Raised at {number}")
pass
except AttributeError or ValueError as e:
logging.debug(f"{e}: hit at RFC {file}")
pass
else:
remove_rfc_files()
print("Successfully finished importing all files to database.")
print("Now removing unnecessary files from disk....")
print("...Done!") | /rfc.py-2020.10.1-py3-none-any.whl/rfcpy/helpers/utils.py | 0.618204 | 0.25759 | utils.py | pypi |
# RFC BibTex
A command line tool that creates `BibTex` entries for IETF `RFC`s and `Internet Drafts`.
It can read the list of `RFC`s and `Internet Drafts` to parse from various sources:
* directly from `.tex` files
* directly from `.aux` files
* from a text file (one ID per line)
* from command-line arguments
Duplicate entires are filtered out.
# Installation/Requirements
You can use `pip` to install this command-line tool:
pip install rfcbibtex
or `pipenv`:
pipenv install rfcbibtex
Alternatively, you can clone this repository or download the `rfc-bibtex.py`. This tool has no
external dependencies, so as long as you have `Python 3.x` installed, everything
should work fine.
**Requirements**:
* `Python 3.x`
* Internet connection
## Testing
First, install the test dependencies:
`pipenv install --dev`
or
`pip install -r dev-requirements.txt`
and then run:
`python -m unittest discover tests`
from the project root.
# Usage
This tool automates the requests to the `https://datatracker.ietf.org/doc/<id>/<version>/bibtex/` and `https://datatracker.ietf.org/doc/<id>/bibtex/` endpoints.
```
usage: rfcbibtex [-h] [-f FILE_NAME] [-o FILE_NAME]
[inline_args [inline_args ...]]
Generate BibTex entries for IETF RFCs and Internet Drafts. The list of IDs can
be read from a file (including .tex and .aux) or directly from command-line
arguments.
positional arguments:
inline_args list of RFC and/or Internet Draft IDs, in any order.
optional arguments:
-h, --help show this help message and exit
-f FILE_NAME, --file FILE_NAME
read list of RFC and/or Internet Draft IDs from a
file. Supported file formats are the following: .tex,
.aux and .txt (one ID per line). If a file with any
other extension is provided, the tool attempts to read
it as a .txt file, containing one ID per line.
-o FILE_NAME, --output FILE_NAME
output the resulting BibTex to a file
```
## Identifier Format
The identifier format of RFCs is `rfc<rfc_num>`, where `<rfc_num>` is the RFC number.
For example, for the [RFC specifying TLS 1.2](https://tools.ietf.org/html/rfc5246) you
would write `rfc5246` (**NOTE**: the identifiers are case-insensitive, so `RFC5246` and `rFc5246` are also accepted).
The input format of Internet-Drafts(I-Ds) is `draft-<rest>`, where `<rest>` is the rest of
the draft's name ([all Internet-Drafts begin with "draft"](https://www.ietf.org/id-info/guidelines.html#naming)). For example, for the
[TLS 1.3 Draft 21]() you would write `draft-ietf-tls-tls13-21`.
## Read Inputs From Command Line
Example command:
`rfcbibtex RFC5246 draft-ietf-tls-tls13-21`
**Output**:
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
## Read Identifiers From A file
Option: `-f <file_name_1> ... <file_name_N>`
Alternatively, identifiers can be specified in a file. The following input formats are accepted:
* `.tex`: if you specify an `.tex` file, the program will search for \cite{} commands in the file and
extract those that appear to contain RFCs or Internet Drafts identifiers.
* `.aux`: if you specify an `.aux` file, the program will search for \citation{} commands in the file
and extract those that appear to contain RFCs or Internet Drafts identifiers.
* `.txt`: the program defaults to this file type if the file's extension is neither `.tex`, nor `.aux`.
This file type must contain a list of RFCs or Internet Drafts identifiers, one per line.
Please note that the identifiers must be in the format specified in the [Identifier Format](#identifier-format) seciton.
### Reading Identifiers From A .tex File
<a id="tex-file"></a>Consider that you have a file called `rfcs_and_ids.tex` with the following content:
\documentclass{article}
\usepackage[utf8]{inputenc}
\title{This Is A Simple Tex File For The RFCBibtex Project Demo}
\author{Illya Gerasymchuk}
\date{March 2019}
\usepackage{natbib}
\usepackage{graphicx}
\begin{document}
\maketitle
\section{Introduction}
There is nothing special here, nothing fancy, just a document with a few citations, like
\cite{RFC5246} this one. This one here \cite{the-documentary-2005} should not be parsed.
While this one \cite{draft-ietf-tls-tls13-21} should. And finally, let's cite the
TLS 1.3 RFC \cite{RFC8446}. Well, that's it folks. At least for now... This is a very basic
file, just to test if \textbf{the basic} latex parsing is working.
You can find the RFCBitex project here: https://github.com/iluxonchik/rfc-bibtex
\begin{figure}[h!]
\centering
\includegraphics[scale=1.7]{universe}
\caption{The Universe}
\label{fig:universe}
\end{figure}
\section{Conclusion}
As you can see, your .tex file may have various citations, but only the ones that are RFCs
and/or Internet Draft IDs are parsed.
\bibliographystyle{plain}
\bibliography{references}
\end{document}
If you run:
`rfcbibtex -f rfcs_and_ids.tex`
<a id="example-output"></a>You will get the following output:
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
@misc{rfc8446,
series = {Request for Comments},
number = 8446,
howpublished = {RFC 8446},
publisher = {RFC Editor},
doi = {10.17487/RFC8446},
url = {https://rfc-editor.org/rfc/rfc8446.txt},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 160,
year = 2018,
month = aug,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery. This document updates RFCs 5705 and 6066, and obsoletes RFCs 5077, 5246, and 6961. This document also specifies new requirements for TLS 1.2 implementations.},
}
### Reading Identifiers From a .aux File
Consider that you have a file called `rfcs_and_ids.aux` with the following content:
\relax
\citation{RFC5246}
\citation{the-documentary-2005}
\citation{draft-ietf-tls-tls13-21}
\citation{RFC8446}
\bibstyle{plain}
\bibdata{references}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The Universe}}{1}}
\newlabel{fig:universe}{{1}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {2}Conclusion}{1}}
If you run:
`rfcbibtex -f rfcs_and_ids.aux`
You will get the [same output as in the previous section](#example-output).
### Reading Identifiers From a .txt File
Consider that you have a file called `rfcs_and_ids.txt` with the following content:
RFC5246
the-documentary-2005
draft-ietf-tls-tls13-21
RFC8446
If you run:
`rfcbibtex -f rfcs_and_ids.aux`
You will get the [same output as in the previous section](#example-output).
### Combining Multiple Files
You can also combine multiple files with different types. You can even combine files and command line arguments.
Let's assume you have a file called `rfcs.txt` with the following content:
RFC5246
rFc7231
We will also use the [rfcs_and_ids.tex from a previous example](#tex-file). If you run:
`rfcbibtex rfc1234 -f rfcs.txt rfcs_and_ids.tex`
<a id="mixed-files-output"></a>You will get the following output:
@misc{rfc7231,
series = {Request for Comments},
number = 7231,
howpublished = {RFC 7231},
publisher = {RFC Editor},
doi = {10.17487/RFC7231},
url = {https://rfc-editor.org/rfc/rfc7231.txt},
author = {Roy T. Fielding and Julian Reschke},
title = {{Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content}},
pagetotal = 101,
year = 2014,
month = jun,
abstract = {The Hypertext Transfer Protocol (HTTP) is a stateless \textbackslash{}\%application- level protocol for distributed, collaborative, hypertext information systems. This document defines the semantics of HTTP/1.1 messages, as expressed by request methods, request header fields, response status codes, and response header fields, along with the payload of messages (metadata and body content) and mechanisms for content negotiation.},
}
@techreport{draft-ietf-tls-tls13-21,
number = {draft-ietf-tls-tls13-21},
type = {Internet-Draft},
institution = {Internet Engineering Task Force},
publisher = {Internet Engineering Task Force},
note = {Work in Progress},
url = {https://datatracker.ietf.org/doc/html/draft-ietf-tls-tls13-21},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 143,
year = ,
month = ,
day = ,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery.},
}
@misc{rfc1234,
series = {Request for Comments},
number = 1234,
howpublished = {RFC 1234},
publisher = {RFC Editor},
doi = {10.17487/RFC1234},
url = {https://rfc-editor.org/rfc/rfc1234.txt},
author = {Don Provan},
title = {{Tunneling IPX traffic through IP networks}},
pagetotal = 6,
year = 1991,
month = jun,
abstract = {This memo describes a method of encapsulating IPX datagrams within UDP packets so that IPX traffic can travel across an IP internet. {[}STANDARDS-TRACK{]} This memo defines objects for managing DS1 Interface objects for use with the SNMP protocol. {[}STANDARDS-TRACK{]}},
}
@misc{rfc5246,
series = {Request for Comments},
number = 5246,
howpublished = {RFC 5246},
publisher = {RFC Editor},
doi = {10.17487/RFC5246},
url = {https://rfc-editor.org/rfc/rfc5246.txt},
author = {Eric Rescorla and Tim Dierks},
title = {{The Transport Layer Security (TLS) Protocol Version 1.2}},
pagetotal = 104,
year = 2008,
month = aug,
abstract = {This document specifies Version 1.2 of the Transport Layer Security (TLS) protocol. The TLS protocol provides communications security over the Internet. The protocol allows client/server applications to communicate in a way that is designed to prevent eavesdropping, tampering, or message forgery. {[}STANDARDS-TRACK{]}},
}
@misc{rfc8446,
series = {Request for Comments},
number = 8446,
howpublished = {RFC 8446},
publisher = {RFC Editor},
doi = {10.17487/RFC8446},
url = {https://rfc-editor.org/rfc/rfc8446.txt},
author = {Eric Rescorla},
title = {{The Transport Layer Security (TLS) Protocol Version 1.3}},
pagetotal = 160,
year = 2018,
month = aug,
abstract = {This document specifies version 1.3 of the Transport Layer Security (TLS) protocol. TLS allows client/server applications to communicate over the Internet in a way that is designed to prevent eavesdropping, tampering, and message forgery. This document updates RFCs 5705 and 6066, and obsoletes RFCs 5077, 5246, and 6961. This document also specifies new requirements for TLS 1.2 implementations.},
}
Note that **duplicate** entries have been removed.
### Output Contents To A File
Option: `-o <file_name>`
Considering `rfcs.txt`and `rfcs_and_ids.tex` from the above above.
If you run:
`rfcbibtex rfc1234 -f rfcs.txt rfcs_and_ids.tex -o output.bib`
A file `output.bib` would be created **or overridden** with the [the same content as in the above output](#mixed-files-output).
## Error Handling and Warning
The tool will print a warning in the following cases:
* no explicit version defined for a draft id
* drafts which have a new draft version update
* drafts which have been assigned an `RFC` number
* invalid identifier name provided as a **command-line argument** (invalid identifier names from files are simply not parsed)
* errors in fetching from URLs
It's important to note, that such errors **DO NOT break the correct functionality of the tool**. Those errors and warnings are printed out,
but **IGNORED**. The generated BibTex files are valid, even when errors are found. Errors and warnings are only printed on the console
(into the standard error output stream) and **never to the output files** (`-o` option).
Here is an example of an output of errors and warnings:
<img src="https://i.imgur.com/1YDLsBN.png" alt="RFCBibTex Errors and Warnings Example" width="50%"> | /rfcbibtex-0.3.2.tar.gz/rfcbibtex-0.3.2/README.md | 0.426322 | 0.826817 | README.md | pypi |
# Python RFCC - Data understanding, clustering and outlier detection for regression and classification tasks
Random forests are invariant and robust estimators that can fit complex interactions between input data of different types and binary, categorical, or continuous outcome variables, including those with multiple dimensions. In addition to these desirable properties, random forests impose a structure on the observations from which researchers and data analysts can infer clusters or groups of interest.
You can use these clusters to:
- structure your data,
- elucidate new patterns of how features influence outcomes,
- define subgroups for further analysis,
- derive prototypical observations,
- identify outlier observations,
- catch mislabeled data,
- evaluate the performance of the estimation model in more detail.
Random Forest Consensus Clustering and implement is implemented in the Scikit-Learn / SciPy data science ecosystem. This algorithm differs from prior approaches by making use of the entire tree structure. Observations become proximate if they follow similar decision paths across trees of a random forest.
More info in here:
```
Marquart, Ingo and Koca Marquart, Ebru, RFCC: Random Forest Consensus Clustering for Regression and Classification (March 19, 2021). Available at SSRN: https://ssrn.com/abstract=3807828 or http://dx.doi.org/10.2139/ssrn.3807828
```
# Installation
Install via pip!
```python
pip install rfcc
```
# Usage
Let's illustrate the approach with a simple example. We will be regression the miles-per-gallon in the city (__cty__) performance of a set of
cars on the class (compact, pick-up etc.), the number of cylinders and the engine displacement.
The data is available in the pydataset package
```python
dataset=data("mpg")
y_col=["cty"]
x_col=['displ', 'class' , 'cyl']
Y=dataset[y_col]
X=dataset[x_col]
print(X.head(5))
```
```python
displ class cyl
1 1.8 compact 4
2 1.8 compact 4
3 2.0 compact 4
4 2.0 compact 4
5 2.8 compact 6
```
We want __class__ and __cyl__ to be treated as categorical variable, so we'll keep track of these columns.
## Initialization and model choice
The first step is to initialize the model, much like one would initialize an scikit-learn model.
The main class is __cluster_model__ from the rfcc package.
We only need to pass an appropriate ensemble model (RandomForestClassifier, RandomForestRegressor) and specify the options we'd like to use.
Since miles-per-gallon is a continuous measure, we'll be using a random forest regression.
```python
from sklearn.ensemble import RandomForestRegressor
from rfcc.model import cluster_model
model=cluster_model(model=RandomForestRegressor,max_clusters=20,random_state=1)
```
We have two options to specify the size and number of clusters to be returned.
The parameter __max_clusters__ sets the maximum amount of leafs in each decision tree. It ensures that the model does not return too many or too few clusters, but it does change the estimation of the random forest.
Another option is to set __max_clusters__ to a high value, or leave it unspecified, and use the hierarchical clustering algorithm to extract clusters of the desired size. See below for __t_param__ in the fit method.
## Fitting and optional parameters
Now we need to fit our model to the data.
```python
model.fit(X,Y)
```
The following optional parameters can be passed
- **encode** (list): A list of columns that we'd like to encode before fitting the model. Note that all non-numerical columns will be encoded automatically. However, you can also encode numerical data by passing it in the __encode__ parameter.
- **encode_y** (bool): You can choose to ordinally encode the outcome variables. If you do a classification, scikit learn will choose how to encode the outcome variables. If the variable is continuous, this will usually lead to a rather bad fit, in which case you may want to encode.
- **linkage_method** (str): Linkage method used in the clustering algorithm (average, single, complete, ward)
- **clustering_type** (str): "rfcc" (default) our path based clustering, or "binary" as in prior approaches
- **t_param** (float): If None, number of clusters corresponds to average number of leafs. If __t_param__ is specified,
pick that level of clustering hierarchy where distance between members of the group is less than __t_param__. The higher the value, the larger average size of a cluster.
Let's check how well our model does on our training set
```python
model.score(X,Y)
```
```python
0.9231321010907177
```
## Cluster compositions
Once the model is fit, we can extract the composition of clusters.
Let's see which car types and cylinders have the best and worst miles-per-gallon performance.
First, we use the cluster_descriptions method to return the compositions for each cluster.
```python
clusters=model.cluster_descriptions(variables_to_consider=['class','manufacturer'], continuous_measures="mean")
```
The optional parameters are:
- **variables_to_consider** (list): List of columns in X to take into account.
- **continuous_measures** (str, list): Measures to compute for each continuous feature (mean, std, median, max, min, skew)
We will sort our clusters by the average mpg and return the clusters with the two highest and two lowest mpg performances.
```python
clusters=clusters.sort_values(by="cty-mean")
print(clusters.head(2))
print(clusters.tail(2))
```
```python
Nr_Obs cty-mean class manufacturer
7 11.85 suv: 1.0% ford: 0.29%, land rover: 0.57%, mercury: 0.14%
49 12.02 pickup: 0.35%, suv: 0.63% chevrolet: 0.18%, dodge: 0.43%, ford: 0.12%, jeep: 0.1%, lincoln: 0.06%, mercury: 0.02%, nissan: 0.02%, toyota: 0.06%
```
```python
Nr_Obs cty-mean class manufacturer
15 24.4 compact: 0.33%, midsize: 0.13%, subcompact: 0.53% honda: 0.53%, toyota: 0.33%, volkswagen: 0.13%
3 32.3 compact: 0.33%, subcompact: 0.67% volkswagen: 1.0%
```
## Decision Path Analysis
Cluster descriptions return the proportions of values for any feature we are interested in. However, we also may want to know how a decision tree classifies an observation. For example, it may be that the feature __manufacturer__ has
no predictive value, whereas the number of cylinders or the displacement does.
Another reason to do a decision path analysis is to check whether
Currently, path analyses are queried for each estimator in the random forest.
In the future patch, the path analysis will be available for the entire random forest.
Let's see how the first decision tree (index 0) classifies the observations with the lowest miles-per-gallon performance
```python
paths=model.path_analysis(estimator_id=0)
paths.sort_values(by="Output_cty")
print(paths.head(5))
```
```python
Nr_Obs Output_cty class displ manufacturer
17 [11.4] class is not: 2seater, compact displ between 5.25 and 4.4 manufacturer: audi, chevrolet, dodge
21 [12.4] class: suv displ larger than: 4.4 manufacturer is not: audi, chevrolet, dodge
5 [12.6] class: midsize, minivan, pickup displ larger than: 4.4 manufacturer is not: audi, chevrolet, dodge
13 [12.6] class is not: 2seater, compact displ larger than: 5.25 manufacturer: audi, chevrolet, dodge
5 [13.4] class: minivan displ between 3.75 and 3.15 -
22 [14.1] - displ between 4.4 and 3.85 -
```
## Detection of outliers and mislabelled data
Outliers are observations that are unusual - not necessarily because their features differ, but rather because their implications for the outcome variable are different from other comparable observations. Mislabelled data may appear as
outlier, since the relationships between outcome and feature values may not make much sense.
Since outliers follow distinct decision paths in the random forest, RFCC does not cluster them with other observations.
We can therefore find outliers by analyzing clusters that have very few observations.
Let's see what outliers exist in the mpg data.
```python
clusters=model.cluster_descriptions(continuous_measures="mean")
clusters=clusters.sort_values(by="Nr_Obs")
outliers=clusters.head(2)
print(outliers)
```
```python
Cluster_ID Nr_Obs cty-mean class cyl manufacturer displ-mean
16 1 16.0 minivan: 1.0% 6: 1.0% dodge: 1.0% 4.0
3 2 18.0 midsize: 1.0% 6: 1.0% hyundai: 1.0% 2.5
```
It seems we have one cluster (id=16) with a dodge minivan, and a cluster (id=3) with two observations.
We can get the constituent observations directly from our model.
```python
ids=model.get_observations(cluster_id=16)
print(dataset.iloc[ids,:])
ids=model.get_observations(cluster_id=3)
print(dataset.iloc[ids,:])
```
```python
manufacturer model displ year cyl trans
48 dodge caravan 2wd 4.0 2008 6 auto(l6)
manufacturer model displ year cyl trans
113 hyundai sonata 2.5 1999 6 auto(l4)
114 hyundai sonata 2.5 1999 6 manual(m5)
```
| /rfcc-1.0.1.tar.gz/rfcc-1.0.1/README.md | 0.644784 | 0.989419 | README.md | pypi |
# rfcontrolpy
rfcontrolpy is a Python library and port of the node.js [rfcontrolpy](https://github.com/rrooggiieerr/rfcontrolpy)
module for parsing and constructing 433mhz On-Off Keying (OOK) signals for various devices,
switches and weather stations.
It works together with the [RFControl](https://github.com/rrooggiieerr/RFControl) Arduino library
for receiving the signals.
The Python port now contains a working contoller and a dozen of protocols. Not all protocols are
ported yet due to low demand or lack of hardware.
You can find a list of all supported protocols [here](protocols.md).
The Processing Pipeline
-----------------------
### 1. Receiving
The arduino is connected via serial bus to the processing computer (for example a raspberry pi)
and waits for rf signal.
> Mostly all 433mhzw OOK signals from devices are send multiple times directly in row and have a
> longer footer pulse in between. They differ by the pulse lengths used to encode the data and footer
> and the pulse count.
[RFControl](https://github.com/rrooggiieerr/RFControl) running on the Arduino detects the start of a
signal by its longer footer pulse and verifies it one time by comparing it with the next signal.
It is unaware of the specific protocol, it just uses the stated fact above. Also we are
not interested in it if the pulse was a high or low pulse (presence or absence of a carrier wave),
because the information is decoded in the pulse lengths.
We will call the received sequence of pulse lengths now **timings sequence**. For example a timing
sequence in microseconds could look like this:
```
288 972 292 968 292 972 292 968 292 972 920 344 288 976 920 348
284 976 288 976 284 976 288 976 288 976 916 348 284 980 916 348
284 976 920 348 284 976 920 348 284 980 280 980 284 980 916 348
284 9808
```
You can clearly see the two different pulse lengths (around 304 and 959 microseconds) for the data
encoding and the longer footer pulse (9808 microseconds).
All observed protocols have less than 8 different pulse length and all pulse length do differ by at
least a factor of 2. This makes a further compression and simplification possible: We map each
pulse length to a number from 0 to 7 (a bucket) and calculate for a better accuracy the average of
all timings mapped to each of the bucket. The result is something like that:
```
buckets: 304, 959, 9808
pulses: 01010101011001100101010101100110011001100101011002
```
To make the representation unique, we choose the buckets in ascending order (respectively we are
sorting it after receiving from the Arduino).
We call the sorted buckets **pulse lengths**, the compressed timings **pulse sequence** and the
length of the pulse sequence (inclusive footer) **pulse count**.
### 2. Protocol Matching
We detect possible protocols by two criteria. The pulse length must match with a small tolerance
and the pulse count must match.
### 3. Protocol Parsing
If a protocol matches, its `parse` function is called with the pulse sequence. Most protocols are
parsed almost the same way. First the pulse squence must be converted to a binary representation.
In almost all cases there exist a mapping from pulse sequences to a binary `0` and `1`. In this
example the pulse sequence `0110` represents a binary `0` and `0101` maps to a binary `1`:
```Python
pulses2binary_mapping = {
["0110": "0"], # Binary 0
["0101": "1"], # Binary 1
["02": ""] # Footer
}
binary = helpers.pulses2binary(pulses, pulses2binary_mapping)
```
The binary reprsentation now looks like this:
```
110011000010
```
As last step the protocol dependent information must be extracted from the binary representation:
```Python
decoded = {
"id": int(binary[:6], 2),
"unit": int(binary[6:11], 2),
"state": binary[12] == "1"
}
```
Details
--------
RFControl is more sensitive than needed for most protocols.
So we get sometimes, depending of the accuracy of the sender/remote, different bucket counts.
This is by design, to catch up further protocols that maybe need a higher sensitivity. The specific
protocol has not to deal with this issue, because `rfcontrolpy` auto merges similar buckets before
calling the `decodePulses` function of each protocol.
The algorithm is the following:
1. Record the (maybe to many) buckets and compressed pulses with [RFControl](https://github.com/pimatic/RFControl) (Arduino / c++)
2. Sort the buckets in `rfcontrolpy` `prepare_compressed_pulses`
3. Try to find a matching protocol in rfcontrolpy `decode_pulses`
4. If we have more than 3 buckets and two of the buckets are similar (`b1*2 < b2`) we merge them to just one bucket by averaging and adapting the pulses in rfcontrolpy `fix_pulses`
5. Go to step 3
Adding a new Protocol
--------------------
## Preparation
1. Fork the rfcontrolpy repository and clone your fork into a local directory.
2. We are using [unittest](https://docs.python.org/3/library/unittest.html) for automating tests.
3. You should be able to run the tests with `python3 -m unittest discover`.
5. Running `python3 -m build` let it compile all files and whats for changes.
## Protocol development
1. Create a new protocol file (like the others) in `rfcontrol/protocols/`.
2. Add a test case in `tests/protocols` with the data from the Arduino.
3. Adapt the protocol file, so that the test get passed.
| /rfcontrolpy-0.0.5.tar.gz/rfcontrolpy-0.0.5/README.md | 0.413596 | 0.964921 | README.md | pypi |
import glob
import logging
from os.path import basename, dirname, isfile, join
import rfcontrol.protocols
from rfcontrol.protocols import *
logger = logging.getLogger(__name__)
protocols = [
getattr(rfcontrol.protocols, basename(f)[:-3])
for f in glob.glob(join(dirname(__file__), "protocols/*.py"))
if isfile(f) and not basename(f).startswith("_")
]
def does_protocol_match(pulse_lengths, pulse_sequence, protocol) -> bool:
"""
Test if a protocol matches the pulse lengths and pulse sequence
"""
if protocol.pulse_count != len(pulse_sequence):
return False
if len(protocol.pulse_lengths) != len(pulse_lengths):
return False
for i, pulse_length in enumerate(pulse_lengths):
max_delta = pulse_length * 0.4
if abs(protocol.pulse_lengths[i] - pulse_length) > max_delta:
return False
return True
def sort_indices(array: []):
"""
Sort the indexes of an array by order of element value.
"""
sorted_indices = [i for i, _ in sorted(enumerate(array), key=lambda x: x[1])]
# logger.debug("sorted indices: %s", sorted_indices)
return sorted_indices
def compress_timings(timings: []):
""" """
pulses = ""
buckets = []
sums = []
counts = []
for timing in timings:
# Search for a bucket.
has_match = False
for j, bucket in enumerate(buckets):
if abs(bucket - timing) < bucket * 0.5:
pulses += str(j)
sums[j] += timing
counts[j] += 1
has_match = True
if not has_match:
# Create new bucket.
pulses += str(len(buckets))
buckets.append(timing)
sums.append(timing)
counts.append(1)
for j, _bucket in enumerate(buckets):
buckets[j] = round(sums[j] / counts[j])
return (buckets, pulses)
def prepare_compressed_pulses(input: str):
""" """
# Input is something like:
# 268 2632 1282 10168 0 0 0 0 010002000202000002000200020200020002...
# The first 8 numbers are the pulse length and the last string is the pulse sequence
parts = input.split(" ")
pulse_lengths = [int(i) for i in parts[0:8]]
pulse_sequence = parts[8]
# Now lets filter out 0 pulses
pulse_lengths = list(filter(lambda pulse_length: pulse_length > 0, pulse_lengths))
# Next sort the pulses from short to long and update indices in pulses.
return sort_compressed_pulses(pulse_lengths, pulse_sequence)
def sort_compressed_pulses(pulse_lengths: [], pulse_sequence: str):
"""
Sort the pulses from short to long and updates indices in pulses.
"""
sorted_indices = sort_indices(pulse_lengths)
pulse_lengths.sort()
reindexed_pulse_sequence = ""
for c in pulse_sequence:
reindexed_pulse_sequence += str(sorted_indices.index(int(c)))
# logger.debug("reindexed pulse sequence: %s", reindexed_pulse_sequence)
pulse_sequence = reindexed_pulse_sequence
return (pulse_lengths, pulse_sequence)
def fix_pulses(pulse_lengths: [], pulse_sequence: str):
"""
Merge pulse timings with similar length.
Timings are considered the same if they differ less then a factor of 2.
"""
# If we have less then 3 different pulseLenght there is nothing to fix.
if len(pulse_lengths) <= 3:
return None
# Consider timing as the same if they differ less then a factor of 2.
i = 1
while i < len(pulse_lengths):
if pulse_lengths[i - 1] * 2 < pulse_lengths[i]:
i += 1
continue
# Merge pulseLengths[i-1] and pulseLengths[i]
new_pulse_length = int((pulse_lengths[i - 1] + pulse_lengths[i]) / 2)
# Replace the old two pulse length with the new one
new_pulse_lengths = pulse_lengths[: i - 1]
new_pulse_lengths.append(new_pulse_length)
new_pulse_lengths.extend(pulse_lengths[i + 1 :])
break
# Nothing to do...
if i is len(pulse_lengths):
return None
# Adapt pulses
new_pulse_sequence = pulse_sequence
while i < len(pulse_lengths):
new_pulse_sequence = new_pulse_sequence.replace(f"{i}", f"{i-1}")
i += 1
return (new_pulse_lengths, new_pulse_sequence)
def decode_pulses(pulse_lengths: [], pulse_sequence: str):
"""
Decode pulse sequence to protocol
"""
# Filter out 0 length pulses
pulse_lengths = [i for i in pulse_lengths if i > 0]
# logger.debug("Non 0 pulse lengths: %s", pulse_lengths)
pulse_lengths, pulse_sequence = sort_compressed_pulses(
pulse_lengths, pulse_sequence
)
return _decode_pulses(pulse_lengths, pulse_sequence)
def _decode_pulses(pulse_lengths: [], pulse_sequence: str):
results = []
for protocol in protocols:
if does_protocol_match(pulse_lengths, pulse_sequence, protocol):
decoded = protocol.decode(pulse_sequence)
if decoded is not None:
logger.debug("Protocol %s matches", protocol.name)
logger.debug("Decoded reception: %s", decoded)
results.append({"protocol": protocol.name, "values": decoded})
# Try to fix pulses.
fixed_pulses = fix_pulses(pulse_lengths, pulse_sequence)
if fixed_pulses is not None:
# We have fixes so try again with the fixed pulse lengths...
results.extend(_decode_pulses(fixed_pulses[0], fixed_pulses[1]))
return results
def encode_pulses(protocol_name: str, message: str):
"""
Encode protocol to pulse sequence
"""
protocol = get_protocol(protocol_name)
if protocol is None:
raise Exception(f"Could not find a protocol named {protocol_name}")
if protocol.encode is None:
raise Exception("The protocol has no send report")
return {
"pulses": protocol.encode(**message),
"pulse_lengths": protocol.pulse_lengths,
}
def get_all_protocols() -> []:
"""
Get all implemented protocols.
"""
return protocols
def get_protocol(protocol_name: str) -> str | None:
"""
Get protocol with given name.
"""
return getattr(rfcontrol.protocols, protocol_name, None) | /rfcontrolpy-0.0.5.tar.gz/rfcontrolpy-0.0.5/rfcontrol/controller.py | 0.477798 | 0.285182 | controller.py | pypi |
import logging
from rfcontrol.helpers import binary2pulses, pulses2binary
from rfcontrol.protocols import RFControlProtocolTypes
logger = logging.getLogger(__name__)
# Mapping for decoding.
pulses2binary_mapping = [
["00", "0"], # binary 0
["01", "1"], # binary 1
["02", ""], # footer
]
# Mapping for encoding
binary2pulses_mapping = {
"0": "00",
"1": "01",
}
name = "switch10"
type = RFControlProtocolTypes.SWITCH
brands = ["Easy Home Advanced"]
pulse_lengths = [271, 1254, 10092]
pulse_count = 116
def decode(pulses):
# Pulses is something like:
# 01010000000101010100000100010101010000000101010100000101000100000101000101010001000100010001010001000101000000010102
# We first map the sequences to binary.
binary = pulses2binary(pulses, pulses2binary_mapping)
logger.debug(binary)
# Binary is now something like:
# 11000111100 1011110001111001101001101110101010110101100011
if binary is None:
return None
# Now we extract the data from that string.
# | 11000111100 10111100011110011010011011101010 1011 01 01 100011
# | ? | systemcode | Group | State | group2 | Unit
groupcode1 = int(binary[43:47], 2)
groupcode2 = int(binary[49:51], 2)
group_res = True
if groupcode1 == 11 and groupcode2 == 1:
group_res = False
elif groupcode1 == 12 and groupcode2 == 3:
group_res = True
decoded = {
"id": int(binary[11:43], 2),
"unit": int(binary[51:57], 2),
"all": group_res,
"state": binary[47] == "1",
}
logger.debug(decoded)
return decoded
def encode(id: int, unit: int, state: bool, all: bool = False):
if all:
groupcode1 = binary2pulses(f"{12:04b}", binary2pulses_mapping)
groupcode2 = binary2pulses(f"{3:02b}", binary2pulses_mapping)
else:
groupcode1 = binary2pulses(f"{11:04b}", binary2pulses_mapping)
groupcode2 = binary2pulses(f"{1:02b}", binary2pulses_mapping)
encoded = "0101000000010101010000"
encoded += binary2pulses(f"{id:032b}", binary2pulses_mapping)
encoded += groupcode1
if state:
encoded += binary2pulses("10", binary2pulses_mapping)
else:
encoded += binary2pulses("01", binary2pulses_mapping)
encoded += groupcode2
encoded += binary2pulses(f"{unit:06b}", binary2pulses_mapping)
encoded += "02"
logger.debug(encoded)
return encoded | /rfcontrolpy-0.0.5.tar.gz/rfcontrolpy-0.0.5/rfcontrol/protocols/switch10.py | 0.581303 | 0.309728 | switch10.py | pypi |
import logging
from rfcontrol.helpers import binary2pulses, pulses2binary
from rfcontrol.protocols import RFControlProtocolTypes
logger = logging.getLogger(__name__)
# Mapping for decoding.
pulses2binary_mapping = [
["02", ""], # Header
["0001", "0"], # Binary 0
["0100", "1"], # Binary 1
["0000", "N"], # State = don't change
["03", ""], # Footer
]
# Mapping for encoding.
binary2pulses_mapping = {
"0": "0001",
"1": "0100",
"N": "0000",
}
name = "dimmer1"
type = RFControlProtocolTypes.DIMMER
brands = ["CoCo Technologies", "D-IO (Chacon)", "Intertechno", "KlikAanKlikUit", "Nexa"]
pulse_lengths = [260, 1300, 2700, 10400]
pulse_count = 148
def decode(pulses: str):
# Pulses is something like:
# 0200010001010000010001010000010001000101000100010001000100000101000100010000010001000100010001010001000001000100000001000100010001010001000100010003
# We first map the sequences to binary.
binary = pulses2binary(pulses, pulses2binary_mapping)
# Binary is now something like:
# 001000111101001000100110100100000001
# Now we extract the data from that string.
# | 00100100011111011100000110 | 0 | N | 0000 | 1111 |
# | 00100011110100100010011010 | 0 | 1 | 0000 | 0001 |
# | ID | All | State | unit | level |
state = None
if binary[27] != "N":
state = binary[27] == "1"
decoded = {
"id": int(binary[:26], 2),
"unit": int(binary[28:32], 2),
"all": binary[26] == "1",
"state": state,
"dimlevel": int(binary[32:36], 2),
}
logger.debug(decoded)
return decoded
def encode(
id: int, unit: int, state: bool = None, dimlevel: int = None, all: bool = False
):
encoded = "02"
encoded += binary2pulses(f"{id:026b}", binary2pulses_mapping)
encoded += binary2pulses_mapping["1"] if all else binary2pulses_mapping["0"]
if state is None:
encoded += binary2pulses_mapping["N"]
else:
encoded += binary2pulses_mapping["1"] if state else binary2pulses_mapping["0"]
encoded += binary2pulses(f"{unit:04b}", binary2pulses_mapping)
encoded += binary2pulses(f"{dimlevel:04b}", binary2pulses_mapping)
encoded += "03"
logger.debug(encoded)
return encoded | /rfcontrolpy-0.0.5.tar.gz/rfcontrolpy-0.0.5/rfcontrol/protocols/dimmer1.py | 0.601125 | 0.248968 | dimmer1.py | pypi |
import logging
from rfcontrol.helpers import binary2pulses, pulses2binary
from rfcontrol.protocols import RFControlProtocolTypes
logger = logging.getLogger(__name__)
# Mapping for decoding.
pulses2binary_mapping = [
["0101", "1"], # Binary 1
["1010", "2"], # Binary tri-state
["0110", "0"], # Bbinary 0
["02", ""], # Footer
]
# Mapping for encoding.
binary2pulses_mapping = {"0": "0110", "1": "0101", "2": "1010"}
name = "switch8"
type = RFControlProtocolTypes.SWITCH
brands = ["Rev"]
pulse_lengths = [189, 547, 5720]
pulse_count = 50
def binaryToChar(data):
character = 0
i = len(data) - 1
while i >= 0:
if data[i] == "2":
break
i -= 1
character += 1
return chr(65 + character)
def decode(pulses):
# Pulses is something like:
# 01010101010101010110011001101010010101010101101002
# We first map the sequences to binary.
binary = pulses2binary(pulses, pulses2binary_mapping)
# Binary is now something like:
# 111100021112
if binary is None:
return None
# Now we extract the data from that string.
# | 11110 | 00211 | 1 | 2
# | SystemCode | ProgramCode | inverse state | state
# First we save the tri-state as a char.
unit = binaryToChar(binary[5:10])
state = True if binary[11] == "2" else False
# For the rest we don't need the third state. Set all 2 to 0
binary = binary.replace("2", "0")
# Building the unit code to something like this 'E10'
unit += str(int(binary[5:10], 2))
decoded = {
"id": int(binary[:5], 2),
"unit": unit,
"state": state,
}
logger.debug(decoded)
return decoded
def encode(id: int, unit: int, state: bool, all: bool = False):
encoded = binary2pulses(f"{id:05b}", binary2pulses_mapping)
unit_char = ord(unit[0]) - 65
unit = int(unit[1:])
programcode1 = f"{unit:05b}"
programcode2 = programcode1[: 4 - unit_char] + "2" + programcode1[5 - unit_char :]
encoded += binary2pulses(programcode2, binary2pulses_mapping)
encoded += binary2pulses_mapping["1"] if state else binary2pulses_mapping["2"]
encoded += binary2pulses_mapping["2"] if state else binary2pulses_mapping["1"]
encoded += "02"
logger.debug(encoded)
return encoded | /rfcontrolpy-0.0.5.tar.gz/rfcontrolpy-0.0.5/rfcontrol/protocols/switch8.py | 0.626581 | 0.342242 | switch8.py | pypi |
from __future__ import annotations
import gi
gi.require_version("Gtk", "4.0")
gi.require_version("Soup", "2.4")
from gi.repository import Gio, GLib, Gtk, Soup
from typing import (
Callable,
Optional,
Final,
Any,
Dict,
Union,
)
import logging
import os
import platform
from threading import Thread
import time
from pathlib import PurePath
EXPAND_AND_FILL: Final[Dict[str, Any]] = dict(
hexpand=True, vexpand=True, halign=Gtk.Align.FILL, valign=Gtk.Align.FILL
)
logger = logging.getLogger(__name__)
def get_border_width(width: int) -> Dict[str, int]:
"""Replicates border_width parameter from Gtk3, by producing a dict equivalent margin parameters
Args:
width (int): the border width
Returns:
Dict[str, int]: the dict with margin parameters
"""
return dict(
margin_start=width,
margin_end=width,
margin_top=width,
margin_bottom=width,
)
def add_action_entries(
map: Gio.ActionMap,
action: str,
callback: Callable[[Gio.ActionMap, Gio.SimpleAction, GLib.Variant], None],
param: Optional[str] = None,
state: Optional[GLib.Variant] = None,
callback_arg: Optional[Any] = None,
) -> None:
if state:
simple_action = Gio.SimpleAction.new_stateful(
action, GLib.VariantType.new(param) if param else None, state
)
else:
simple_action = Gio.SimpleAction.new(
action, GLib.VariantType.new(param) if param else None
)
if callback_arg:
simple_action.connect("activate", callback, callback_arg)
else:
simple_action.connect("activate", callback)
map.add_action(simple_action)
def get_file_creation_timestamp(file_path: Union[os.PathLike, str]):
# get creation time, or something similar...
# https://stackoverflow.com/a/39501288
if platform.system() == "Windows":
try:
return os.stat(file_path).st_ctime
except FileNotFoundError:
time.sleep(1)
try:
return os.stat(file_path).st_ctime
except FileNotFoundError:
return None
else:
try:
# this should work on macOS
return os.stat(file_path).st_birthtime
except AttributeError:
return os.stat(file_path).st_mtime
class LongTaskWindow(Gtk.Window):
def __init__(
self, parent_window: Optional[Gtk.Window] = None, *args, **kwargs
):
kwargs.update(
dict(
transient_for=parent_window,
modal=True,
default_width=250,
default_height=100,
destroy_with_parent=True,
decorated=False,
)
)
Gtk.Window.__init__(self, *args, **kwargs)
main_grid = Gtk.Grid(
column_spacing=10,
row_spacing=10,
**EXPAND_AND_FILL,
**get_border_width(5),
)
self._label = Gtk.Label(wrap=True, **EXPAND_AND_FILL)
main_grid.attach(self._label, 0, 0, 1, 1)
label = Gtk.Label(
label="This may take a while...",
)
main_grid.attach(label, 0, 1, 1, 1)
self.set_child(main_grid)
main_grid.show()
def set_text(self, text: str):
self._label.set_markup(text)
class ExitableThread(Thread):
def __init__(self):
super().__init__()
self._should_exit: bool = False
@property
def should_exit(self):
return self._should_exit
@should_exit.setter
def should_exit(self, value: bool):
self._should_exit = value
class Session(Soup.Session):
def __init__(self, **kwargs):
super().__init__(use_thread_context=True, **kwargs)
# Use conda OpenSSL certificates on Windows
if os.name == "nt" and "CONDA_PREFIX" in os.environ:
ca_file = PurePath(
os.environ["CONDA_PREFIX"], "Library", "ssl", "cacert.pem"
)
try:
db = Gio.TlsFileDatabase.new(str(ca_file))
except GLib.Error as e:
logger.warning(
f"Could not create TLS database for {str(ca_file)} -> {e.message}"
)
else:
self.props.tls_database = db
self.props.ssl_use_system_ca_file = False | /rfi-downloader-0.1.0.tar.gz/rfi-downloader-0.1.0/rfi_downloader/utils/__init__.py | 0.783243 | 0.162912 | __init__.py | pypi |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GLib, Gio
from enum import auto, IntEnum, unique
import logging
from pathlib import PurePath
from typing import Final, Dict, Any, Optional
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
@unique
class FileStatus(IntEnum):
CREATED = auto()
SAVED = auto()
QUEUED = auto()
RUNNING = auto()
SUCCESS = auto()
FAILURE = auto()
REMOVED_FROM_LIST = auto()
SKIPPED = auto()
def __str__(self):
# pylint: disable=no-member
return self.name.lower().capitalize().replace("_", " ")
class File(ABC):
@abstractmethod
def __init__(
self,
filename: str,
relative_filename: PurePath,
created: float,
status: FileStatus,
):
self._filename = filename
self._relative_filename = relative_filename
self._created: Final[float] = created
self._status = status
self._row_reference: Gtk.TreeRowReference = None
self._operation_metadata: Final[Dict[int, Dict[str, Any]]] = dict()
self._cancellable = Gio.Cancellable()
self._saved: float = 0
self._requeue: bool = False
self._succeeded: float = 0
@property
def cancellable(self) -> Gio.Cancellable:
return self._cancellable
@property
def operation_metadata(self) -> Dict[int, Dict[str, Any]]:
return self._operation_metadata
@property
def filename(self) -> str:
return self._filename
@property
def relative_filename(self) -> PurePath:
return self._relative_filename
@property
def created(self) -> float:
return self._created
@property
def saved(self) -> float:
return self._saved
@saved.setter
def saved(self, value: int):
self._saved = value
@property
def succeeded(self) -> float:
return self._succeeded
@succeeded.setter
def succeeded(self, value: int):
self._succeeded = value
@property
def status(self) -> FileStatus:
return self._status
@status.setter
def status(self, value: FileStatus):
self._status = value
@property
def requeue(self) -> bool:
return self._requeue
@requeue.setter
def requeue(self, value: bool):
self._requeue = value
@property
def row_reference(self) -> Gtk.TreeRowReference:
return self._row_reference
@row_reference.setter
def row_reference(self, value: Gtk.TreeRowReference):
self._row_reference = value
def __str__(self):
return f"{type(self).__name__}: {self._filename} -> {str(self._status)}"
def _update_progressbar_worker_cb(self, index: int, value: float):
if not self.row_reference.valid():
logger.warning(
f"_update_progressbar_worker_cb: {self.filename} is invalid!"
)
return GLib.SOURCE_REMOVE
model = self.row_reference.get_model()
path = self.row_reference.get_path()
parent_iter = model.get_iter(path)
n_children = model.iter_n_children(parent_iter)
cumul_value = (index * 100.0 + value) / n_children
model[parent_iter][4] = cumul_value
model[parent_iter][5] = f"{cumul_value:.1f} %"
child_iter = model.iter_nth_child(parent_iter, index)
model[child_iter][4] = value
model[child_iter][5] = f"{value:.1f} %"
return GLib.SOURCE_REMOVE
def _update_status_worker_cb(self, index: int, status: FileStatus, message):
if not self.row_reference.valid():
logger.warning(
f"_update_status_worker_cb: {self.filename} is invalid!"
)
return GLib.SOURCE_REMOVE
model = self.row_reference.get_model()
path = self.row_reference.get_path()
iter = model.get_iter(path)
if index == -1: # parent
self.status = status
iter = model.get_iter(path)
else:
iter = model.iter_nth_child(iter, index)
model[iter][2] = int(status)
# When the operation succeeds, ensure that the progressbars go
# to 100 %, which is necessary when the operation doesnt
# do any progress updated (which would be unfortunate!)
if status == FileStatus.SUCCESS:
model[iter][4] = 100.0
model[iter][5] = "100.0 %"
elif status == FileStatus.FAILURE:
model[iter][6] = "red"
model[iter][7] = GLib.markup_escape_text(message)
elif status == FileStatus.SKIPPED:
model[iter][6] = "grey"
model[iter][7] = GLib.markup_escape_text(message)
return GLib.SOURCE_REMOVE
def update_status(
self, index: int, status: FileStatus, message: Optional[str] = None
):
"""
When an operation has finished, update the status of the corresponding
entry in the treemodel.
An index of -1 refers to the parent entry, 0 or higher refers to a child.
"""
GLib.idle_add(self._update_status_worker_cb, index, status, message)
def update_progressbar(self, index: int, value: float):
"""
This method will update the progressbar of the current operation,
defined by index, as well as the global one.
value must be between 0 and 100.
Try not to use this function too often, as it may slow the GUI
down considerably. I recommend to use it only when value is a whole number
"""
GLib.idle_add(self._update_progressbar_worker_cb, index, value) | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/file.py | 0.819605 | 0.186021 | file.py | pypi |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Sequence, Dict, Optional, NamedTuple, Type
import importlib.resources
import yaml
from munch import Munch
from .operation import Operation
from .engine import Engine
class Preference(ABC):
@abstractmethod
def __init__(self, key: str, default: Any, description: Optional[str]):
self._key: str = key
self._default: Any = default
self._description = description
@property
def key(self) -> str:
return self._key
@property
def default(self) -> Any:
return self._default
@property
def description(self):
return self._description
class BooleanPreference(Preference):
def __init__(
self, key: str, default: bool = False, description: Optional[str] = None
):
super().__init__(key, default, description)
TestBooleanPreference1 = BooleanPreference(
key="Boolean Pref1",
default=True,
description="This is a description for Boolean Pref1",
)
TestBooleanPreference2 = BooleanPreference(key="Boolean Pref2", default=False)
TestBooleanPreference3 = BooleanPreference(
key="Boolean Pref3", description="This is a description for Boolean Pref3"
)
class ListPreference(Preference):
def __init__(
self,
key: str,
values: Sequence[str],
default: Optional[str] = None,
description: Optional[str] = None,
):
if default and default not in values:
raise ValueError("default has to be within values array!")
if not default:
default = values[0]
super().__init__(key, default, description)
self._values = values
@property
def values(self) -> Sequence[str]:
return self._values
TestListPreference1 = ListPreference(
key="List Pref1",
values=("Option1", "Option2", "Option3"),
description="This is a description for List Pref1",
)
TestListPreference2 = ListPreference(
key="List Pref2",
values=("Option1", "Option2", "Option3"),
default="Option3",
)
class DictPreference(Preference):
def __init__(
self,
key: str,
values: Dict[str, Any],
default: Optional[str] = None,
description: Optional[str] = None,
):
if default and default not in values:
raise ValueError("default has to be within values dict!")
if not default:
default = list(values.keys())[0]
super().__init__(key, default, description)
self._values = values
@property
def values(self) -> Dict[str, Any]:
return self._values
@classmethod
def from_file(
cls,
key: str,
yaml_file,
default: Optional[str] = None,
description: Optional[str] = None,
):
with open(yaml_file, "r") as f:
yaml_dict = yaml.safe_load(stream=f)
return cls(key, yaml_dict, default, description)
TestDictPreference1 = DictPreference(
key="Dict Pref1",
values=dict(
option1="option1",
option2=dict(option2="option2"),
option3=list("option3"),
),
default="option2",
description="This is a description for Dict Pref1",
)
TestDictPreference2 = DictPreference(
key="Dict Pref2",
values=dict(
option1="option1",
option2=dict(option2="option2"),
option3=list("option3"),
),
)
with importlib.resources.path(
"rfi_file_monitor.data", "rfi-instruments.yaml"
) as f:
TestDictPreference3 = DictPreference.from_file(
key="Dict Pref3 From File",
yaml_file=f,
description="This is a description for Dict Pref3 From File",
)
class StringPreference(Preference):
def __init__(
self, key: str, default: str = "", description: Optional[str] = None
):
super().__init__(key, default, description)
TestStringPreference1 = StringPreference(
key="String Pref1",
description="This is a description for String Pref1",
)
TestStringPreference2 = StringPreference(
key="String Pref2",
default="String Pref2 default value",
)
AllowedFilePatternsPreference = StringPreference(
key="Allowed File Patterns",
description="These comma separated file patterns will be added automatically to the allowed patterns of all operations and engines that support them. Use cautiously!!",
)
IgnoredFilePatternsPreference = StringPreference(
key="Ignored File Patterns",
description="These comma separated file patterns will be added automatically to the ignored patterns of all operations and engines that support them.",
default="*.swp,*.swx,*.DS_Store",
)
class Preferences(NamedTuple):
settings: Munch[Preference, Any]
operations: Munch[Type[Operation], bool]
engines: Munch[Type[Engine], bool] | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/preferences.py | 0.935013 | 0.295803 | preferences.py | pypi |
from __future__ import annotations
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GLib
from ..engine import Engine, EngineThread
from ..files.regular_file import RegularFile
from ..file import FileStatus
from ..utils.decorators import exported_filetype, with_pango_docs
import logging
from tempfile import TemporaryDirectory
from pathlib import Path, PurePath
import os
from time import sleep, time
logger = logging.getLogger(__name__)
SIZE_UNITS = {
"B": 1,
"KB": 1000,
"MB": 1000000,
"GB": 1000000000,
}
@with_pango_docs(filename="temporary_file_engine.pango")
@exported_filetype(filetype=RegularFile)
class TemporaryFileEngine(Engine):
NAME = "Temporary File Generator"
DEBUG = 1
def __init__(self, appwindow):
super().__init__(appwindow, FileGeneratorThread, "")
# Set filesize
filesize_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(filesize_grid, 0, 0, 1, 1)
label = Gtk.Label(
label="Filesize: ",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
filesize_grid.attach(label, 1, 0, 1, 1)
filesize_number_spinbutton = self.register_widget(
Gtk.SpinButton(
adjustment=Gtk.Adjustment(
lower=1, upper=1024, value=10, page_size=0, step_increment=1
),
value=10,
update_policy=Gtk.SpinButtonUpdatePolicy.IF_VALID,
numeric=True,
climb_rate=5,
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
"filesize_number",
desensitized=True,
)
filesize_grid.attach(filesize_number_spinbutton, 2, 0, 1, 1)
filesize_unit_combobox = Gtk.ComboBoxText(
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
for unit in SIZE_UNITS:
filesize_unit_combobox.append_text(unit)
filesize_unit_combobox.set_active(0)
self.register_widget(
filesize_unit_combobox, "filesize_unit", desensitized=True
)
filesize_grid.attach(filesize_unit_combobox, 3, 0, 1, 1)
# Add horizontal separator
separator = Gtk.Separator(
orientation=Gtk.Orientation.HORIZONTAL,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
)
self.attach(separator, 0, 1, 3, 1)
# set time between files being created
time_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(time_grid, 0, 2, 1, 1)
label = Gtk.Label(
label="Time between file creation events: ",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
time_grid.attach(label, 1, 0, 1, 1)
time_number_spinbutton = self.register_widget(
Gtk.SpinButton(
adjustment=Gtk.Adjustment(
lower=1,
upper=3600 * 24,
value=5,
page_size=0,
step_increment=1,
),
value=5,
update_policy=Gtk.SpinButtonUpdatePolicy.IF_VALID,
numeric=True,
climb_rate=5,
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
"creation_delay",
desensitized=True,
)
time_grid.attach(time_number_spinbutton, 2, 0, 1, 1)
# Add vertical separator
separator = Gtk.Separator(
orientation=Gtk.Orientation.VERTICAL,
halign=Gtk.Align.CENTER,
valign=Gtk.Align.FILL,
hexpand=False,
vexpand=True,
)
self.attach(separator, 1, 0, 1, 3)
# start index
start_index_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(start_index_grid, 2, 0, 1, 1)
label = Gtk.Label(
label="Start index: ",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
start_index_grid.attach(label, 0, 0, 1, 1)
start_index_spinbutton = self.register_widget(
Gtk.SpinButton(
adjustment=Gtk.Adjustment(
lower=0, upper=10000, value=0, page_size=0, step_increment=1
),
value=0,
update_policy=Gtk.SpinButtonUpdatePolicy.IF_VALID,
numeric=True,
climb_rate=5,
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
"start_index",
desensitized=False,
)
start_index_grid.attach(start_index_spinbutton, 1, 0, 1, 1)
# prefix
prefix_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(prefix_grid, 2, 2, 1, 1)
label = Gtk.Label(
label="File prefix: ",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
prefix_grid.attach(label, 0, 0, 1, 1)
self._prefix_entry = self.register_widget(
Gtk.Entry(
text="test_",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
"file_prefix",
desensitized=False,
)
prefix_grid.attach(self._prefix_entry, 1, 0, 1, 1)
# this starts out as valid
self._valid = True
def _file_prefix_entry_changed_cb(self, entry):
if self.params.file_prefix:
self._valid = True
else:
self._valid = False
self.notify("valid")
class FileGeneratorThread(EngineThread):
SUFFIX = ".dat"
def run(self):
# close task_window
GLib.idle_add(
self._engine.kill_task_window,
self._task_window,
priority=GLib.PRIORITY_HIGH,
)
# sleep for 1 sec to not have the task window flash
sleep(1)
index = int(self.params.start_index)
with TemporaryDirectory() as tempdir:
while 1:
if self.should_exit:
self._engine.cleanup()
return
basename = f"{self.params.file_prefix}{index}{self.SUFFIX}"
path = Path(tempdir, basename)
path.write_bytes(
os.urandom(
int(
self.params.filesize_number
* SIZE_UNITS[self.params.filesize_unit]
)
)
)
index = index + 1
if (
self._engine.props.running
and self._engine._appwindow._queue_manager.props.running
):
_file = RegularFile(
str(path),
PurePath(basename),
time(),
FileStatus.CREATED,
)
GLib.idle_add(
self._engine._appwindow._queue_manager.add,
_file,
priority=GLib.PRIORITY_HIGH,
)
# ensure we dont need no to wait too long when stopping the engine
for _ in range(int(self._engine.params.creation_delay)):
sleep(1)
if self.should_exit:
self._engine.cleanup()
return | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/engines/temporary_file_engine.py | 0.502441 | 0.173218 | temporary_file_engine.py | pypi |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from ..engine_advanced_settings import EngineAdvancedSettings
from ..engine import Engine
from ..utils import PATTERN_PLACEHOLDER_TEXT
class DirectoryWatchdogEngineAdvancedSettings(EngineAdvancedSettings):
def __init__(self, engine: Engine):
super().__init__(engine)
self._row_counter = 0
# Process existing directories in monitored directory
self._process_existing_directories_checkbutton = engine.register_widget(
Gtk.CheckButton(
label="Process existing directories in target directory",
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
active=False,
),
"process_existing_directories",
)
self.attach(
self._process_existing_directories_checkbutton,
0,
self._row_counter,
1,
1,
)
self._row_counter += 1
self._add_horizontal_separator()
# Specify allowed file patterns
allowed_file_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(allowed_file_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
allowed_file_patterns_grid.attach(
Gtk.Label(
label="Allowed filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._allowed_file_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"allowed_file_patterns",
)
allowed_file_patterns_grid.attach(
self._allowed_file_patterns_entry, 1, 0, 1, 1
)
self._add_horizontal_separator()
# Specify ignored file patterns
ignore_file_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(ignore_file_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
ignore_file_patterns_grid.attach(
Gtk.Label(
label="Ignored filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._ignored_file_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"ignore_file_patterns",
)
ignore_file_patterns_grid.attach(
self._ignored_file_patterns_entry, 1, 0, 1, 1
)
self._add_horizontal_separator()
# Specify allowed directory patterns
allowed_directory_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(allowed_directory_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
allowed_directory_patterns_grid.attach(
Gtk.Label(
label="Allowed directory patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._allowed_directory_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"allowed_directory_patterns",
)
allowed_directory_patterns_grid.attach(
self._allowed_directory_patterns_entry, 1, 0, 1, 1
)
self._add_horizontal_separator()
# Specify ignored directory patterns
ignore_directory_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(ignore_directory_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
ignore_directory_patterns_grid.attach(
Gtk.Label(
label="Ignored directory patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._ignored_directory_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"ignore_directory_patterns",
)
ignore_directory_patterns_grid.attach(
self._ignored_directory_patterns_entry, 1, 0, 1, 1
)
def _add_horizontal_separator(self):
self.attach(
Gtk.Separator(
orientation=Gtk.Orientation.HORIZONTAL,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=True,
),
0,
self._row_counter,
1,
1,
)
self._row_counter += 1 | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/engines/directory_watchdog_engine_advanced_settings.py | 0.425725 | 0.161982 | directory_watchdog_engine_advanced_settings.py | pypi |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from ..engine_advanced_settings import EngineAdvancedSettings
from ..engine import Engine
from ..utils import PATTERN_PLACEHOLDER_TEXT
class AWSS3BucketEngineAdvancedSettings(EngineAdvancedSettings):
def __init__(self, engine: Engine):
super().__init__(engine)
self._row_counter = 0
# Specify allowed file patterns
allowed_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(allowed_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
allowed_patterns_grid.attach(
Gtk.Label(
label="Allowed filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._allowed_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"allowed_patterns",
)
allowed_patterns_grid.attach(self._allowed_patterns_entry, 1, 0, 1, 1)
self._add_horizontal_separator()
# Specify ignored file patterns
ignore_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(ignore_patterns_grid, 0, self._row_counter, 1, 2)
self._row_counter += 1
ignore_patterns_grid.attach(
Gtk.Label(
label="Ignored filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._ignored_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"ignore_patterns",
)
ignore_patterns_grid.attach(self._ignored_patterns_entry, 1, 0, 1, 1)
def _add_horizontal_separator(self):
self.attach(
Gtk.Separator(
orientation=Gtk.Orientation.HORIZONTAL,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=True,
),
0,
self._row_counter,
1,
1,
)
self._row_counter += 1 | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/engines/aws_s3_bucket_engine_advanced_settings.py | 0.432183 | 0.165931 | aws_s3_bucket_engine_advanced_settings.py | pypi |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from ..engine_advanced_settings import EngineAdvancedSettings
from ..engine import Engine
from ..utils import PATTERN_PLACEHOLDER_TEXT
class FileWatchdogEngineAdvancedSettings(EngineAdvancedSettings):
def __init__(self, engine: Engine):
super().__init__(engine)
self._row_counter = 0
# Monitor recursively
self._monitor_recursively_checkbutton = engine.register_widget(
Gtk.CheckButton(
label="Monitor target directory recursively",
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
active=True,
),
"monitor_recursively",
)
self.attach(
self._monitor_recursively_checkbutton, 0, self._row_counter, 1, 1
)
self._row_counter += 1
self._add_horizontal_separator()
# Process existing files in monitored directory
self._process_existing_files_checkbutton = engine.register_widget(
Gtk.CheckButton(
label="Process existing files in target directory",
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
active=False,
),
"process_existing_files",
)
self.attach(
self._process_existing_files_checkbutton, 0, self._row_counter, 1, 1
)
self._row_counter += 1
self._add_horizontal_separator()
# Specify allowed file patterns
allowed_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(allowed_patterns_grid, 0, self._row_counter, 1, 1)
self._row_counter += 1
allowed_patterns_grid.attach(
Gtk.Label(
label="Allowed filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._allowed_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"allowed_patterns",
)
allowed_patterns_grid.attach(self._allowed_patterns_entry, 1, 0, 1, 1)
self._add_horizontal_separator()
# Specify allowed file patterns
ignore_patterns_grid = Gtk.Grid(
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
column_spacing=5,
)
self.attach(ignore_patterns_grid, 0, self._row_counter, 1, 2)
self._row_counter += 1
ignore_patterns_grid.attach(
Gtk.Label(
label="Ignored filename patterns",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
0,
0,
1,
1,
)
self._ignored_patterns_entry = engine.register_widget(
Gtk.Entry(
placeholder_text=PATTERN_PLACEHOLDER_TEXT,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"ignore_patterns",
)
ignore_patterns_grid.attach(self._ignored_patterns_entry, 1, 0, 1, 1)
def _add_horizontal_separator(self):
self.attach(
Gtk.Separator(
orientation=Gtk.Orientation.HORIZONTAL,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=True,
),
0,
self._row_counter,
1,
1,
)
self._row_counter += 1 | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/engines/file_watchdog_engine_advanced_settings.py | 0.436022 | 0.182134 | file_watchdog_engine_advanced_settings.py | pypi |
from threading import current_thread
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from ..operation import Operation
from ..files.directory import Directory
from ..utils import ExitableThread, get_random_string
from ..utils.decorators import supported_filetypes, with_pango_docs
from ..utils.exceptions import SkippedOperation
import logging
from dataclasses import dataclass, field
from typing import Callable, List, Dict, Any, Tuple
import tarfile
import zipfile
from pathlib import Path
import os.path
logger = logging.getLogger(__name__)
@dataclass
class Compressor:
opener: Callable
mode: str
adder: str # 'add' (tar) or 'write' (zip)
suffix: str
opener_args: Dict[str, Any] = field(default_factory=dict)
COMPRESSORS: Dict[str, Compressor] = {
"TAR + GZIP": Compressor(tarfile.open, "w:gz", "add", ".tar.gz"),
"TAR + BZIP2": Compressor(tarfile.open, "w:bz2", "add", ".tar.bz2"),
"TAR + LZMA": Compressor(tarfile.open, "w:xz", "add", ".tar.xz"),
"TAR": Compressor(tarfile.open, "w", "add", ".tar"),
"ZIP": Compressor(
zipfile.ZipFile,
"w",
"write",
".zip",
opener_args=dict(compression=zipfile.ZIP_DEFLATED),
),
}
@supported_filetypes(filetypes=Directory)
@with_pango_docs(filename="directory_compressor.pango")
class DirectoryCompressorOperation(Operation):
NAME = "Directory Compressor"
def __init__(self, *args, **kwargs):
Operation.__init__(self, *args, **kwargs)
grid = Gtk.Grid(
border_width=5,
row_spacing=5,
column_spacing=5,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
)
self.add(grid)
# boxes are needed for
# destination folder
# compression type
label = Gtk.Label(
label="Destination",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
grid.attach(label, 0, 0, 1, 1)
directory_chooser_button = self.register_widget(
Gtk.FileChooserButton(
title="Select a directory to copy compressed files to",
action=Gtk.FileChooserAction.SELECT_FOLDER,
create_folders=True,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
"destination_directory",
)
grid.attach(directory_chooser_button, 1, 0, 1, 1)
grid.attach(
Gtk.Separator(
orientation=Gtk.Orientation.HORIZONTAL,
halign=Gtk.Align.FILL,
valign=Gtk.Align.CENTER,
hexpand=True,
vexpand=False,
),
0,
1,
2,
1,
)
label = Gtk.Label(
label="Compression Type",
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
)
grid.attach(label, 0, 2, 1, 1)
compression_type_combo_box = self.register_widget(
Gtk.ComboBoxText(
halign=Gtk.Align.START,
valign=Gtk.Align.CENTER,
hexpand=False,
vexpand=False,
),
"compression_type",
)
grid.attach(compression_type_combo_box, 1, 2, 1, 1)
for compression_type in COMPRESSORS:
compression_type_combo_box.append_text(compression_type)
compression_type_combo_box.set_active(0)
def preflight_check(self):
# ensure destination is not None
if self.params.destination_directory is None:
raise ValueError("Destination folder cannot be empty")
# ensure we are not writing into the monitored directory
from ..engines.directory_watchdog_engine import DirectoryWatchdogEngine
if not isinstance(
self.appwindow.active_engine, DirectoryWatchdogEngine
):
raise ValueError(
"DirectoryCompressor currently only works with DirectoryWatchdog engines!"
)
self._monitored_directory = (
self.appwindow.active_engine._get_params().monitored_directory
)
if Path(self.params.destination_directory).samefile(
self._monitored_directory
):
raise ValueError(
"Destination folder cannot be the same as the monitored directory"
)
try:
Path(self.params.destination_directory).resolve().relative_to(
Path(self._monitored_directory)
)
except ValueError:
pass
else:
raise ValueError(
"The destination directory cannot be a subdirectory of the monitored directory."
)
# ensure the destination is writable
tempfile = Path(
self.params.destination_directory, get_random_string(10)
)
try:
with tempfile.open("w") as f:
f.write("teststring")
except Exception as e:
raise ValueError(f"Cannot write to destination folder: {str(e)}")
else:
tempfile.unlink(missing_ok=True)
def _check_existing_zipfile(
self, _zipfile: Path, file_list: List[Tuple[str, int]]
):
if not _zipfile.exists() or not zipfile.is_zipfile(_zipfile):
return
with zipfile.ZipFile(_zipfile) as f:
zipped_files = dict(
zip(
map(
lambda x: os.path.join(self._monitored_directory, x),
f.namelist(),
),
f.infolist(),
)
)
if len(zipped_files) != len(file_list):
return
for _filename, _size in file_list:
if _filename not in zipped_files:
return
zipped_file = zipped_files[_filename]
if _size != zipped_file.file_size:
return
elif os.stat(_filename).st_mtime > _zipfile.stat().st_mtime:
return
raise SkippedOperation("Zipfile contents are equal to directory")
def _check_existing_tarfile(
self, _tarfile: Path, file_list: List[Tuple[str, int]]
):
if not _tarfile.exists() or not tarfile.is_tarfile(_tarfile):
return
with tarfile.open(_tarfile) as f:
zipped_files = dict(
zip(
map(
lambda x: os.path.join(self._monitored_directory, x),
f.getnames(),
),
f.getmembers(),
)
)
if len(zipped_files) != len(file_list):
return
for _filename, _size in file_list:
if _filename not in zipped_files:
return
zipped_file = zipped_files[_filename]
if _size != zipped_file.size:
return
elif os.stat(_filename).st_mtime > zipped_file.mtime:
return
raise SkippedOperation("Tarball contents are equal to directory")
def run(self, dir: Directory): # type: ignore[override]
compressor = COMPRESSORS[self.params.compression_type]
destination_filename = Path(
self.params.destination_directory, dir.relative_filename.name
).with_suffix(compressor.suffix)
our_thread = current_thread()
total_size = dir.total_size
file_list = list(dir)
number_of_files = len(dir)
size_seen = 0
if "tar" in compressor.suffix:
self._check_existing_tarfile(destination_filename, file_list)
elif "zip" in compressor.suffix:
self._check_existing_zipfile(destination_filename, file_list)
with compressor.opener(
destination_filename, compressor.mode, **compressor.opener_args
) as f:
for _file_index, (_file, _size) in enumerate(dir):
if (
isinstance(our_thread, ExitableThread)
and our_thread.should_exit
):
return "Job aborted"
arcname = os.path.relpath(_file, self._monitored_directory)
getattr(f, compressor.adder)(_file, arcname)
if total_size == 0:
dir.update_progressbar(
self.index, 100.0 * (_file_index + 1) / number_of_files
)
else:
size_seen += _size
dir.update_progressbar(
self.index, 100.0 * size_seen / total_size
) | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/operations/directory_compressor.py | 0.609408 | 0.175079 | directory_compressor.py | pypi |
from __future__ import annotations
from rfi_file_monitor.utils import ExitableThread
from gi.repository import Gio
from ..engine_advanced_settings import EngineAdvancedSettings
from ..engine import Engine
from ..utils.exceptions import SkippedOperation
from ..file import File, FileStatus
from ..files.regular_file import RegularFile, WeightedRegularFile
from ..files.directory import Directory
from ..operation import Operation
from typing import Type, Union, Sequence, Callable, Optional
import logging
import inspect
from pathlib import Path
import collections.abc
import functools
import threading
logger = logging.getLogger(__name__)
_app = Gio.Application.get_default()
def with_pango_docs(filename: str):
"""Decorator for engines and operations, used to set the name of the file
whose contents should be used to populate the associated Help dialog.
Provide the basename of the file only, and make sure it is placed in a folder
called `docs`, which must be a subfolder within the folder containing the engine or operation"""
def _with_pango_docs(cls: Type[Union[Operation, Engine]]):
if _app is None:
return cls
if not issubclass(cls, Operation) and not issubclass(cls, Engine):
logger.error(
f"with_pango_cos can only be used to decorate classes that extend Engine, Operation or QueueManager"
)
return cls
try:
module = inspect.getmodule(cls)
if not module:
raise Exception(f"module for class {cls.NAME} not found")
contents = (
Path(module.__file__)
.parent.joinpath("docs", filename)
.read_text()
)
except Exception:
logger.exception(
f"with_pango_docs: could not open {filename} for reading"
)
else:
_app.pango_docs_map[cls] = contents
return cls
return _with_pango_docs
def with_advanced_settings(
engine_advanced_settings: Type[EngineAdvancedSettings],
):
"""Decorator for Engine classes, to be used when some of their
settings have been delegated to an advanced settings window
OPTIONAL."""
def _with_advanced_settings(cls: Type[Engine]):
if _app is None:
return cls
if not issubclass(cls, Engine):
logger.error(
f"with_advanced_settings can only be used to decorate classes that extend Engine"
)
return cls
_app.engines_advanced_settings_map[cls] = engine_advanced_settings
return cls
return _with_advanced_settings
# This may need to be changed later, if an engine can record multiple filetypes...
def exported_filetype(filetype: Type[File]):
"""Decorator for Engine classes that declares which filetype
it will be looking out for. MANDATORY. Without this decorator,
the engine cannot be tied to operations."""
def _exported_filetype(cls: Type[Engine]):
if _app is None:
return cls
if not issubclass(cls, Engine):
logger.error(
f"exported_filetype can only be used to decorate classes that extend Engine"
)
return cls
_app.engines_exported_filetype_map[cls] = filetype
return cls
return _exported_filetype
def supported_filetypes(filetypes: Union[Type[File], Sequence[Type[File]]]):
"""Decorator for Operation classes that should be used to declare
which filetype(s) it supports. OPTIONAL. If unused, then the operation
will be assumed to support regular files only!"""
def _supported_filetypes(cls: Type[Operation]):
if _app is None:
return cls
if not issubclass(cls, Operation):
logger.error(
f"supported_filetypes can only be used to decorate classes that extend Operation"
)
return cls
if isinstance(filetypes, collections.abc.Sequence):
_filetypes = filetypes
else:
_filetypes = [filetypes]
for filetype in _filetypes:
if filetype in _app.filetypes_supported_operations_map:
_app.filetypes_supported_operations_map[filetype].append(cls)
else:
_app.filetypes_supported_operations_map[filetype] = [cls]
return cls
return _supported_filetypes
# We are using filesize to determine weight in progressbar changes
# However, if the combined filesize is 0, then the file index will be used instead.
def add_directory_support(run: Callable[[Operation, File], Optional[str]]):
@functools.wraps(run)
def wrapper(self: Operation, file: File):
current_thread = threading.current_thread()
if isinstance(file, RegularFile):
return run(self, file)
elif isinstance(file, Directory):
# get all files contained with Directory, as well as their sizes
_path = Path(file.filename)
_parent = _path.parent
total_size = file.total_size
size_seen = 0
file.operation_metadata[self.index] = {}
for file_index, (filename, size) in enumerate(file):
# abort if job has been cancelled
if (
isinstance(current_thread, ExitableThread)
and current_thread.should_exit
):
return str("Thread killed")
if total_size == 0:
offset = file_index / len(file)
weight = 1 / len(file)
else:
offset = size_seen / total_size
size_seen += size
weight = size / total_size
_file = WeightedRegularFile(
filename,
Path(filename).relative_to(_parent),
0,
FileStatus.CREATED,
offset,
weight,
)
# reuse the row_reference to ensure the progress bars are updated
_file.row_reference = file.row_reference
# run the wrapped method, and do the usual exception and return value handling
try:
rv = run(self, _file)
if rv:
return rv
except SkippedOperation:
pass
if self.index in _file.operation_metadata:
file.operation_metadata[self.index][
_file.filename
] = _file.operation_metadata[self.index]
return None
else:
raise NotImplementedError(f"{type(file)} is currently unsupported")
return wrapper | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/utils/decorators.py | 0.784897 | 0.164215 | decorators.py | pypi |
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from pathlib import Path
from typing import Final, Any, List, final, Optional
from munch import Munch, munchify
import logging
logger = logging.getLogger(__name__)
class WidgetParams:
"""
Inheriting from this class
"""
# pylint: disable=unsubscriptable-object
def __init__(self, params: Optional[Munch] = None):
if params:
self._params = params
else:
self._params: Final[Munch[str, Any]] = Munch()
self._signal_ids: Final[Munch[str, int]] = Munch()
self._widgets: Final[Munch[str, Gtk.Widget]] = Munch()
self._exportable_params: Final[List[str]] = list()
self._desensitized_widgets: Final[List[Gtk.Widget]] = list()
@final
def set_sensitive(self, sensitive: bool):
for widget in self.widgets.values():
if widget not in self._desensitized_widgets:
widget.set_sensitive(sensitive)
@final
def _entry_changed_cb(self, entry: Gtk.Entry, param_name: str):
self._params[param_name] = entry.get_text().strip()
@final
def _checkbutton_toggled_cb(
self, checkbutton: Gtk.CheckButton, param_name: str
):
self._params[param_name] = checkbutton.get_active()
@final
def _filechooserbutton_selection_changed_cb(
self, filechooserbutton: Gtk.FileChooserButton, param_name: str
):
self._params[param_name] = filechooserbutton.get_filename()
@final
def _spinbutton_value_changed_cb(
self, spinbutton: Gtk.SpinButton, param_name: str
):
self._params[param_name] = spinbutton.get_value()
@final
def _combobox_changed_cb(self, combobox: Gtk.ComboBoxText, param_name: str):
self._params[param_name] = combobox.get_active_text()
@final
def _switch_value_changed_cb(self, switch: Gtk.Switch, _, param_name: str):
self._params[param_name] = switch.get_active()
@final
def register_widget(
self,
widget: Gtk.Widget,
param_name: str,
exportable: bool = True,
desensitized: bool = False,
):
if param_name in self._params:
raise ValueError(
"register_widget cannot overwrite existing parameters!"
)
if isinstance(widget, Gtk.Switch):
self._params[param_name] = widget.get_active()
self._signal_ids[param_name] = widget.connect(
"notify::active", self._switch_value_changed_cb, param_name
)
elif isinstance(widget, Gtk.SpinButton):
self._params[param_name] = widget.get_value()
self._signal_ids[param_name] = widget.connect(
"value-changed", self._spinbutton_value_changed_cb, param_name
)
elif isinstance(widget, Gtk.CheckButton):
self._params[param_name] = widget.get_active()
self._signal_ids[param_name] = widget.connect(
"toggled", self._checkbutton_toggled_cb, param_name
)
elif isinstance(widget, Gtk.FileChooserButton):
self._params[param_name] = widget.get_filename()
self._signal_ids[param_name] = widget.connect(
"selection-changed",
self._filechooserbutton_selection_changed_cb,
param_name,
)
elif isinstance(widget, Gtk.Entry):
self._params[param_name] = widget.get_text().strip()
self._signal_ids[param_name] = widget.connect(
"changed", self._entry_changed_cb, param_name
)
elif isinstance(widget, Gtk.ComboBoxText):
self._params[param_name] = widget.get_active_text()
self._signal_ids[param_name] = widget.connect(
"changed", self._combobox_changed_cb, param_name
)
else:
raise NotImplementedError(
f"register_widget: no support for {type(widget).__name__}"
)
self._widgets[param_name] = widget
if exportable:
self._exportable_params.append(param_name)
if desensitized:
self._desensitized_widgets.append(widget)
return widget
def update_from_dict(self, yaml_dict: dict):
for param_name, value in yaml_dict.items():
if param_name not in self._params:
logger.warning(
f"update_from_dict: {param_name} not found in widget params!"
)
continue
widget = self._widgets[param_name]
with widget.handler_block(self._signal_ids[param_name]):
self._params[param_name] = value
if isinstance(widget, Gtk.SpinButton):
widget.set_value(value)
elif isinstance(widget, Gtk.CheckButton) or isinstance(
widget, Gtk.Switch
):
widget.set_active(value)
elif isinstance(widget, Gtk.FileChooserButton):
if value is None or not Path(value).exists:
continue
widget.set_filename(value)
elif isinstance(widget, Gtk.Entry):
widget.set_text(value)
elif isinstance(widget, Gtk.ComboBoxText):
for row in widget.get_model():
if row[0] == value:
widget.set_active_iter(row.iter)
break
else:
raise NotImplementedError(
f"update_from_dict: no support for {type(widget).__name__}"
)
@property
def params(self) -> Munch:
"""
A Munch dict containing the parameters that will be used by run
"""
return self._params
@property
def exportable_params(self) -> Munch:
"""
A Munch dict containing those parameters that have been considered safe for exporting.
This will typically exclude widgets meant to hold passwords and other secrets.
"""
return munchify(
{param: self._params[param] for param in self._exportable_params}
)
@property
def widgets(self) -> Munch:
"""
A Munch dict containing the registered widgets
"""
return self._widgets | /rfi-file-monitor-0.2.12.tar.gz/rfi-file-monitor-0.2.12/rfi_file_monitor/utils/widgetparams.py | 0.75274 | 0.157655 | widgetparams.py | pypi |
# RFInder
**Insallation instructions**
```
pip install rfinder
```
To create a local repository, type:
```
git clone https://github.com/Fil8/RFInder
```
***
**Requisites**
For a successfull installation make sure to have installed the following packages.
- RFInder makes use of the most common `python` packages (e.g. `numpy`, `scipy`, `astropy`).
- The parameter file is in `yaml` format, hence [`pyaml`](https://anaconda.org/anaconda/pyyaml), and [`json`](https://anaconda.org/conda-forge/json-c) packages should be installed,
- The `logging` module is used to print out warnings.
- `.gif` file of multiple plots can be created if `ffmpeg` is installed.
- `casacore` is utilized to open casa tables.
- install it with `python_casacore`: `pip install python-casacore` or `conda install -c conda-forge python-casacore`
- `texmaker` to plot latex fancy formulae
- `dvipng`
- `python tk`
***
**Description**
This is a set of tools that have been developed in preparation of the Apertif & MeerKAT surveys.
The main function of `rfinder` is to identify the presence of RFI in an observation and visualize it according to different parameters. Two are the main functions:
- estimate the RFI present in an MS file through a sigma clipping (`rms_clip`)
- read the `FLAG` column of an MS file (`use_flags`) and summarize how RFI affects the data products of an observation.
These are the products that `rfinder` provides and summarizes in an `.html` file:
- presence of RFI per frequency channel and baseline length.
- percentage flagged visibilities due to RFI per frequency channel.
- increase in noice due to RFI per frequency channel.
- estimated noise per frequency channel, assuming natural weighting.
check out the [WiKi](https://github.com/Fil8/RFInder/wiki) for a complete illustration of `RFInder`.
***
**Usage**
RFInder takes its variables from a default parameter file and from terminal, if any are given.
From your current working directory typying `rfinder` this message will be shown:
```
------ Reading default installation parameter file ------
MSNAME & telescope missing
please edit rfinder_default.yml in your current directory
or run: rfinder -i msname -fl <num> -tel <meerkat,apertif,wsrt>
(assuming the observation is located in your current directory)
------ RFInder out ------
```
Hence, you have to set the name of the MSfile you wish to analyse. There are two ways to do this. By specifying from terminal the path to the msfile from your current directory, the field number of the source you whish to analyse, and the telescope of the observation:
```
rfinder -i msname -fl <num> -tel <meerkat,apertif,wsrt>
```
or, editing the `rfinder_default.yml` configuration file that has been copied in your current directory (workdir, in the configuration file).
This configuration file is read automatically by RFInder through the command `rfinder`. A short explanation of the parameters is given in the configuration file, and by typing `rfinder -h` (see below).
If you wish to use a different configuration file (at your own risk!!), type: `rfinder -c <path_to_configuration_file>`.
**Minimal instructions**
- Default `rfinder` will scan the MSfile in chunks of 10 minutes averaging 10 channels together. The output product will be an `html` file where the `gis` scan through the time steps to show the identified RFI/flags.
- Running `rfinder -noCh` after `rfinder` will produce a `full_report.html` file containing both the analysis over time steps and the analysis of the dataset as a whole.
- Running `rfinder -noCh -noMov` will analyse the full dataset as a whole and generate the `full_report.html` without embedded movies.
_Attention_: the option `rfinder -noCh` will end with a report successfully generated, only if it is run after `rfinder`. Otherwise run `rfinder -noCh -noMov`.
(These [tutorials](https://github.com/Fil8/RFInder/tree/master/tutorials) show the different capabilities of `rfinder`. **outdated**)
**Output products**
If `rfinder` runs correctly, you will find the following output products in your current directory:
- the folder `rfi_pol` in your current directory, or in the directory specified by the `-odir` parameter (`pol` is the stokes parameters for which you analysed RFI).
- Within, there are the `.html` reports that you wished to generate.
- The configuration file `rfinder_default.yml` contains the parameters of the last run.
- A `log` of the commands run by the program is stored in `log-rfinder.log`, in your working directory.
**Help**
`rfinder -h` will show you a (minimal) help:
```
usage: rfinder [-h] [-v] [-c CONFIG] [-w WORKING_DIR] [-odir OUTPUT_DIR]
[-i INPUT] [-fl FIELD] [-tel TELESCOPE] [-mode RFIMODE]
[-pol POLARIZATION]
[-fint [FREQUENCY_INTERVAL [FREQUENCY_INTERVAL ...]]]
[-spwAv SPW_AV] [-tStep TIME_STEP] [-sig SIGMA_CLIP]
[-baseCut BASELINE_CUT] [-noCh] [-yesCh] [-noSpw] [-yesSpw]
[-noClp] [-yesClp]
RFInder: package to visualize the flagged RFI in a dataset
version 1.0.3
install path /home/maccagni/programs/RFInder/rfinder
Filippo Maccagni <filippo.maccagni@gmial.com>
optional arguments:
-h, --help Print help message and exit
-v, --version show program's version number and exit
-c CONFIG, --config CONFIG
RFInder configuration file (YAML format)
-idir INPUT_DIR, --input_dir WORKING_DIR
select working directory (MS file assumed to be here)
-odir OUTPUT_DIR, --output_dir OUTPUT_DIR
select output directory
-i INPUT, --input INPUT
input ['MS'] file
-fl FIELD, --field FIELD
select field of MS file to analyze
-tel TELESCOPE, --telescope TELESCOPE
select telescope: meerkat, apertif, wsrt
-mode RFIMODE, --rfimode RFIMODE
select mode where to investigate RFI: use_flags or
rms_clip
-pol POLARIZATION, --polarization POLARIZATION
select stokes parameter: xx, yy, xy, yx, q (also in
CAPS)
-fint [FREQUENCY_INTERVAL [FREQUENCY_INTERVAL ...]], --frequency_interval [FREQUENCY_INTERVAL [FREQUENCY_INTERVAL ...]]
select frequency interval where to measure noise in
GHz
-spwAv SPW_AV, --spw_av SPW_AV
select number of channels to average
-tStep TIME_STEP, --time_step TIME_STEP
select time step in minutes in which divide the
analysis of the MSfile
-sig SIGMA_CLIP, --sigma_clip SIGMA_CLIP
select sigma clip for rms_clip mode to find RFI
-baseCut BASELINE_CUT, --baseline_cut BASELINE_CUT
select cut in baseline lenght [m] for differential RFI
analysis
-noCh, --no_chunks desable chunking in time
-yesCh, --yes_chunks enable chunking in time
-noSpw, --no_spw_av desable averaging in channels
-yesSpw, --yes_spw_av
enable averaging in channels
-noClp, --no_cleanup desable cleanup of intermediate products
-yesClp, --yes_cleanup
enable cleanup of intermediate products
Run a command. This can be:
rfinder
rfinder -c path_to_config_file.yml
rfinder -i <ngc1399.ms> -fl <num> -tel <meerkat/apertif/wsrt>
```
***
**License**
This project is licensed under the GNU General Public License v3.0 - see [license](https://github.com/Fil8/RFInder/blob/master/LICENSE.md) for details.
***
<p>© <sub> Filippo M. Maccagni 2018 </sub></p>
| /rfinder-1.0.5.tar.gz/rfinder-1.0.5/README.md | 0.410993 | 0.958069 | README.md | pypi |
# pytype: skip-file
from __future__ import absolute_import
import struct
import sys
from builtins import chr
from builtins import object
from typing import List
class OutputStream(object):
"""For internal use only; no backwards-compatibility guarantees.
A pure Python implementation of stream.OutputStream."""
def __init__(self):
self.data = [] # type: List[bytes]
self.byte_count = 0
def write(self, b, nested=False):
# type: (bytes, bool) -> None
assert isinstance(b, bytes)
if nested:
self.write_var_int64(len(b))
self.data.append(b)
self.byte_count += len(b)
def write_byte(self, val):
self.data.append(chr(val).encode('latin-1'))
self.byte_count += 1
def write_var_int64(self, v):
# type: (int) -> None
if v < 0:
v += 1 << 64
if v <= 0:
raise ValueError('Value too large (negative).')
while True:
bits = v & 0x7F
v >>= 7
if v:
bits |= 0x80
self.write_byte(bits)
if not v:
break
def write_bigendian_int64(self, v):
self.write(struct.pack('>q', v))
def write_bigendian_uint64(self, v):
self.write(struct.pack('>Q', v))
def write_bigendian_int32(self, v):
self.write(struct.pack('>i', v))
def write_bigendian_double(self, v):
self.write(struct.pack('>d', v))
def get(self):
# type: () -> bytes
return b''.join(self.data)
def size(self):
# type: () -> int
return self.byte_count
def _clear(self):
# type: () -> None
self.data = []
self.byte_count = 0
class ByteCountingOutputStream(OutputStream):
"""For internal use only; no backwards-compatibility guarantees.
A pure Python implementation of stream.ByteCountingOutputStream."""
def __init__(self):
# Note that we don't actually use any of the data initialized by our super.
super(ByteCountingOutputStream, self).__init__()
self.count = 0
def write(self, byte_array, nested=False):
# type: (bytes, bool) -> None
blen = len(byte_array)
if nested:
self.write_var_int64(blen)
self.count += blen
def write_byte(self, _):
self.count += 1
def get_count(self):
return self.count
def get(self):
raise NotImplementedError
def __str__(self):
return '<%s %s>' % (self.__class__.__name__, self.count)
class InputStream(object):
"""For internal use only; no backwards-compatibility guarantees.
A pure Python implementation of stream.InputStream."""
def __init__(self, data):
# type: (bytes) -> None
self.data = data
self.pos = 0
# The behavior of looping over a byte-string and obtaining byte characters
# has been changed between python 2 and 3.
# b = b'\xff\x01'
# Python 2:
# b[0] = '\xff'
# ord(b[0]) = 255
# Python 3:
# b[0] = 255
if sys.version_info[0] >= 3:
self.read_byte = self.read_byte_py3
else:
self.read_byte = self.read_byte_py2
def size(self):
return len(self.data) - self.pos
def read(self, size):
# type: (int) -> bytes
self.pos += size
return self.data[self.pos - size:self.pos]
def read_all(self, nested):
# type: (bool) -> bytes
return self.read(self.read_var_int64() if nested else self.size())
def read_byte_py2(self):
# type: () -> int
self.pos += 1
# mypy tests against python 3.x, where this is an error:
return ord(self.data[self.pos - 1]) # type: ignore[arg-type]
def read_byte_py3(self):
# type: () -> int
self.pos += 1
return self.data[self.pos - 1]
def read_var_int64(self):
shift = 0
result = 0
while True:
byte = self.read_byte()
if byte < 0:
raise RuntimeError('VarLong not terminated.')
bits = byte & 0x7F
if shift >= 64 or (shift >= 63 and bits > 1):
raise RuntimeError('VarLong too long.')
result |= bits << shift
shift += 7
if not byte & 0x80:
break
if result >= 1 << 63:
result -= 1 << 64
return result
def read_bigendian_int64(self):
return struct.unpack('>q', self.read(8))[0]
def read_bigendian_uint64(self):
return struct.unpack('>Q', self.read(8))[0]
def read_bigendian_int32(self):
return struct.unpack('>i', self.read(4))[0]
def read_bigendian_double(self):
return struct.unpack('>d', self.read(8))[0]
def get_varint_size(v):
"""For internal use only; no backwards-compatibility guarantees.
Returns the size of the given integer value when encode as a VarInt."""
if v < 0:
v += 1 << 64
if v <= 0:
raise ValueError('Value too large (negative).')
varint_size = 0
while True:
varint_size += 1
v >>= 7
if not v:
break
return varint_size | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/coders/slow_stream.py | 0.701509 | 0.404272 | slow_stream.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import itertools
from array import array
from apache_beam.coders import typecoders
from apache_beam.coders.coder_impl import StreamCoderImpl
from apache_beam.coders.coders import BooleanCoder
from apache_beam.coders.coders import BytesCoder
from apache_beam.coders.coders import Coder
from apache_beam.coders.coders import FastCoder
from apache_beam.coders.coders import FloatCoder
from apache_beam.coders.coders import IterableCoder
from apache_beam.coders.coders import MapCoder
from apache_beam.coders.coders import NullableCoder
from apache_beam.coders.coders import StrUtf8Coder
from apache_beam.coders.coders import TupleCoder
from apache_beam.coders.coders import VarIntCoder
from apache_beam.portability import common_urns
from apache_beam.portability.api import schema_pb2
from apache_beam.typehints import row_type
from apache_beam.typehints.schemas import PYTHON_ANY_URN
from apache_beam.typehints.schemas import LogicalType
from apache_beam.typehints.schemas import named_tuple_from_schema
from apache_beam.typehints.schemas import schema_from_element_type
from apache_beam.utils import proto_utils
__all__ = ["RowCoder"]
class RowCoder(FastCoder):
""" Coder for `typing.NamedTuple` instances.
Implements the beam:coder:row:v1 standard coder spec.
"""
def __init__(self, schema):
"""Initializes a :class:`RowCoder`.
Args:
schema (apache_beam.portability.api.schema_pb2.Schema): The protobuf
representation of the schema of the data that the RowCoder will be used
to encode/decode.
"""
self.schema = schema
# Use non-null coders because null values are represented separately
self.components = [
_nonnull_coder_from_type(field.type) for field in self.schema.fields
]
def _create_impl(self):
return RowCoderImpl(self.schema, self.components)
def is_deterministic(self):
return all(c.is_deterministic() for c in self.components)
def to_type_hint(self):
return named_tuple_from_schema(self.schema)
def __hash__(self):
return hash(self.schema.SerializeToString())
def __eq__(self, other):
return type(self) == type(other) and self.schema == other.schema
def to_runner_api_parameter(self, unused_context):
return (common_urns.coders.ROW.urn, self.schema, [])
@staticmethod
@Coder.register_urn(common_urns.coders.ROW.urn, schema_pb2.Schema)
def from_runner_api_parameter(schema, components, unused_context):
return RowCoder(schema)
@staticmethod
def from_type_hint(type_hint, registry):
schema = schema_from_element_type(type_hint)
return RowCoder(schema)
@staticmethod
def from_payload(payload):
# type: (bytes) -> RowCoder
return RowCoder(proto_utils.parse_Bytes(payload, schema_pb2.Schema))
def __reduce__(self):
# when pickling, use bytes representation of the schema. schema_pb2.Schema
# objects cannot be pickled.
return (RowCoder.from_payload, (self.schema.SerializeToString(), ))
typecoders.registry.register_coder(row_type.RowTypeConstraint, RowCoder)
def _coder_from_type(field_type):
coder = _nonnull_coder_from_type(field_type)
if field_type.nullable:
return NullableCoder(coder)
else:
return coder
def _nonnull_coder_from_type(field_type):
type_info = field_type.WhichOneof("type_info")
if type_info == "atomic_type":
if field_type.atomic_type in (schema_pb2.INT32, schema_pb2.INT64):
return VarIntCoder()
elif field_type.atomic_type == schema_pb2.DOUBLE:
return FloatCoder()
elif field_type.atomic_type == schema_pb2.STRING:
return StrUtf8Coder()
elif field_type.atomic_type == schema_pb2.BOOLEAN:
return BooleanCoder()
elif field_type.atomic_type == schema_pb2.BYTES:
return BytesCoder()
elif type_info == "array_type":
return IterableCoder(_coder_from_type(field_type.array_type.element_type))
elif type_info == "map_type":
return MapCoder(
_coder_from_type(field_type.map_type.key_type),
_coder_from_type(field_type.map_type.value_type))
elif type_info == "logical_type":
# Special case for the Any logical type. Just use the default coder for an
# unknown Python object.
if field_type.logical_type.urn == PYTHON_ANY_URN:
return typecoders.registry.get_coder(object)
logical_type = LogicalType.from_runner_api(field_type.logical_type)
return LogicalTypeCoder(
logical_type, _coder_from_type(field_type.logical_type.representation))
elif type_info == "row_type":
return RowCoder(field_type.row_type.schema)
# The Java SDK supports several more types, but the coders are not yet
# standard, and are not implemented in Python.
raise ValueError(
"Encountered a type that is not currently supported by RowCoder: %s" %
field_type)
class RowCoderImpl(StreamCoderImpl):
"""For internal use only; no backwards-compatibility guarantees."""
SIZE_CODER = VarIntCoder().get_impl()
NULL_MARKER_CODER = BytesCoder().get_impl()
def __init__(self, schema, components):
self.schema = schema
self.constructor = named_tuple_from_schema(schema)
self.components = list(c.get_impl() for c in components)
self.has_nullable_fields = any(
field.type.nullable for field in self.schema.fields)
def encode_to_stream(self, value, out, nested):
nvals = len(self.schema.fields)
self.SIZE_CODER.encode_to_stream(nvals, out, True)
attrs = [getattr(value, f.name) for f in self.schema.fields]
words = array('B')
if self.has_nullable_fields:
nulls = list(attr is None for attr in attrs)
if any(nulls):
words = array('B', itertools.repeat(0, (nvals + 7) // 8))
for i, is_null in enumerate(nulls):
words[i // 8] |= is_null << (i % 8)
self.NULL_MARKER_CODER.encode_to_stream(words.tostring(), out, True)
for c, field, attr in zip(self.components, self.schema.fields, attrs):
if attr is None:
if not field.type.nullable:
raise ValueError(
"Attempted to encode null for non-nullable field \"{}\".".format(
field.name))
continue
c.encode_to_stream(attr, out, True)
def decode_from_stream(self, in_stream, nested):
nvals = self.SIZE_CODER.decode_from_stream(in_stream, True)
words = array('B')
words.fromstring(self.NULL_MARKER_CODER.decode_from_stream(in_stream, True))
if words:
nulls = ((words[i // 8] >> (i % 8)) & 0x01 for i in range(nvals))
else:
nulls = itertools.repeat(False, nvals)
# If this coder's schema has more attributes than the encoded value, then
# the schema must have changed. Populate the unencoded fields with nulls.
if len(self.components) > nvals:
nulls = itertools.chain(
nulls, itertools.repeat(True, len(self.components) - nvals))
# Note that if this coder's schema has *fewer* attributes than the encoded
# value, we just need to ignore the additional values, which will occur
# here because we only decode as many values as we have coders for.
return self.constructor(
*(
None if is_null else c.decode_from_stream(in_stream, True) for c,
is_null in zip(self.components, nulls)))
def _make_value_coder(self, nulls=itertools.repeat(False)):
components = [
component for component,
is_null in zip(self.components, nulls) if not is_null
] if self.has_nullable_fields else self.components
return TupleCoder(components).get_impl()
class LogicalTypeCoder(FastCoder):
def __init__(self, logical_type, representation_coder):
self.logical_type = logical_type
self.representation_coder = representation_coder
def _create_impl(self):
return LogicalTypeCoderImpl(self.logical_type, self.representation_coder)
def is_deterministic(self):
return self.representation_coder.is_deterministic()
def to_type_hint(self):
return self.logical_type.language_type()
class LogicalTypeCoderImpl(StreamCoderImpl):
def __init__(self, logical_type, representation_coder):
self.logical_type = logical_type
self.representation_coder = representation_coder.get_impl()
def encode_to_stream(self, value, out, nested):
return self.representation_coder.encode_to_stream(
self.logical_type.to_representation_type(value), out, nested)
def decode_from_stream(self, in_stream, nested):
return self.logical_type.to_language_type(
self.representation_coder.decode_from_stream(in_stream, nested)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/coders/row_coder.py | 0.782413 | 0.221983 | row_coder.py | pypi |
# pytype: skip-file
"""Common utility class to help SDK harness to execute an SDF. """
from __future__ import absolute_import
from __future__ import division
import logging
import threading
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import Union
from apache_beam.transforms.core import WatermarkEstimatorProvider
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
if TYPE_CHECKING:
from apache_beam.io.iobase import RestrictionProgress
from apache_beam.io.iobase import RestrictionTracker
from apache_beam.io.iobase import WatermarkEstimator
_LOGGER = logging.getLogger(__name__)
SplitResultPrimary = NamedTuple(
'SplitResultPrimary', [('primary_value', WindowedValue)])
SplitResultResidual = NamedTuple(
'SplitResultResidual',
[('residual_value', WindowedValue), ('current_watermark', Timestamp),
('deferred_timestamp', Optional[Duration])])
class ThreadsafeRestrictionTracker(object):
"""A thread-safe wrapper which wraps a `RestritionTracker`.
This wrapper guarantees synchronization of modifying restrictions across
multi-thread.
"""
def __init__(self, restriction_tracker):
# type: (RestrictionTracker) -> None
from apache_beam.io.iobase import RestrictionTracker
if not isinstance(restriction_tracker, RestrictionTracker):
raise ValueError(
'Initialize ThreadsafeRestrictionTracker requires'
'RestrictionTracker.')
self._restriction_tracker = restriction_tracker
# Records an absolute timestamp when defer_remainder is called.
self._timestamp = None
self._lock = threading.RLock()
self._deferred_residual = None
self._deferred_timestamp = None # type: Optional[Union[Timestamp, Duration]]
def current_restriction(self):
with self._lock:
return self._restriction_tracker.current_restriction()
def try_claim(self, position):
with self._lock:
return self._restriction_tracker.try_claim(position)
def defer_remainder(self, deferred_time=None):
"""Performs self-checkpoint on current processing restriction with an
expected resuming time.
Self-checkpoint could happen during processing elements. When executing an
DoFn.process(), you may want to stop processing an element and resuming
later if current element has been processed quit a long time or you also
want to have some outputs from other elements. ``defer_remainder()`` can be
called on per element if needed.
Args:
deferred_time: A relative ``Duration`` that indicates the ideal time gap
between now and resuming, or an absolute ``Timestamp`` for resuming
execution time. If the time_delay is None, the deferred work will be
executed as soon as possible.
"""
# Record current time for calculating deferred_time later.
with self._lock:
self._timestamp = Timestamp.now()
if deferred_time and not isinstance(deferred_time, (Duration, Timestamp)):
raise ValueError(
'The timestamp of deter_remainder() should be a '
'Duration or a Timestamp, or None.')
self._deferred_timestamp = deferred_time
checkpoint = self.try_split(0)
if checkpoint:
_, self._deferred_residual = checkpoint
def check_done(self):
with self._lock:
return self._restriction_tracker.check_done()
def current_progress(self):
# type: () -> RestrictionProgress
with self._lock:
return self._restriction_tracker.current_progress()
def try_split(self, fraction_of_remainder):
with self._lock:
return self._restriction_tracker.try_split(fraction_of_remainder)
def deferred_status(self):
# type: () -> Optional[Tuple[Any, Duration]]
"""Returns deferred work which is produced by ``defer_remainder()``.
When there is a self-checkpoint performed, the system needs to fulfill the
DelayedBundleApplication with deferred_work for a ProcessBundleResponse.
The system calls this API to get deferred_residual with watermark together
to help the runner to schedule a future work.
Returns: (deferred_residual, time_delay) if having any residual, else None.
"""
if self._deferred_residual:
# If _deferred_timestamp is None, create Duration(0).
if not self._deferred_timestamp:
self._deferred_timestamp = Duration()
# If an absolute timestamp is provided, calculate the delta between
# the absoluted time and the time deferred_status() is called.
elif isinstance(self._deferred_timestamp, Timestamp):
self._deferred_timestamp = (self._deferred_timestamp - Timestamp.now())
# If a Duration is provided, the deferred time should be:
# provided duration - the spent time since the defer_remainder() is
# called.
elif isinstance(self._deferred_timestamp, Duration):
self._deferred_timestamp -= (Timestamp.now() - self._timestamp)
return self._deferred_residual, self._deferred_timestamp
return None
def is_bounded(self):
return self._restriction_tracker.is_bounded()
class RestrictionTrackerView(object):
"""A DoFn view of thread-safe RestrictionTracker.
The RestrictionTrackerView wraps a ThreadsafeRestrictionTracker and only
exposes APIs that will be called by a ``DoFn.process()``. During execution
time, the RestrictionTrackerView will be fed into the ``DoFn.process`` as a
restriction_tracker.
"""
def __init__(self, threadsafe_restriction_tracker):
# type: (ThreadsafeRestrictionTracker) -> None
if not isinstance(threadsafe_restriction_tracker,
ThreadsafeRestrictionTracker):
raise ValueError(
'Initialize RestrictionTrackerView requires '
'ThreadsafeRestrictionTracker.')
self._threadsafe_restriction_tracker = threadsafe_restriction_tracker
def current_restriction(self):
return self._threadsafe_restriction_tracker.current_restriction()
def try_claim(self, position):
return self._threadsafe_restriction_tracker.try_claim(position)
def defer_remainder(self, deferred_time=None):
self._threadsafe_restriction_tracker.defer_remainder(deferred_time)
def is_bounded(self):
self._threadsafe_restriction_tracker.is_bounded()
class ThreadsafeWatermarkEstimator(object):
"""A threadsafe wrapper which wraps a WatermarkEstimator with locking
mechanism to guarantee multi-thread safety.
"""
def __init__(self, watermark_estimator):
# type: (WatermarkEstimator) -> None
from apache_beam.io.iobase import WatermarkEstimator
if not isinstance(watermark_estimator, WatermarkEstimator):
raise ValueError('Initializing Threadsafe requires a WatermarkEstimator')
self._watermark_estimator = watermark_estimator
self._lock = threading.Lock()
def __getattr__(self, attr):
if hasattr(self._watermark_estimator, attr):
def method_wrapper(*args, **kw):
with self._lock:
return getattr(self._watermark_estimator, attr)(*args, **kw)
return method_wrapper
raise AttributeError(attr)
def get_estimator_state(self):
with self._lock:
return self._watermark_estimator.get_estimator_state()
def current_watermark(self):
# type: () -> Timestamp
with self._lock:
return self._watermark_estimator.current_watermark()
def observe_timestamp(self, timestamp):
# type: (Timestamp) -> None
if not isinstance(timestamp, Timestamp):
raise ValueError(
'Input of observe_timestamp should be a Timestamp '
'object')
with self._lock:
self._watermark_estimator.observe_timestamp(timestamp)
class NoOpWatermarkEstimatorProvider(WatermarkEstimatorProvider):
"""A WatermarkEstimatorProvider which creates NoOpWatermarkEstimator for the
framework.
"""
def initial_estimator_state(self, element, restriction):
return None
def create_watermark_estimator(self, estimator_state):
from apache_beam.io.iobase import WatermarkEstimator
class _NoOpWatermarkEstimator(WatermarkEstimator):
"""A No-op WatermarkEstimator which is provided for the framework if there
is no custom one.
"""
def observe_timestamp(self, timestamp):
pass
def current_watermark(self):
return None
def get_estimator_state(self):
return None
return _NoOpWatermarkEstimator() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/sdf_utils.py | 0.89481 | 0.216943 | sdf_utils.py | pypi |
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing_extensions import Protocol
from apache_beam import coders
from apache_beam import pipeline
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.pipeline import ComponentIdMap
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import core
from apache_beam.transforms import environments
from apache_beam.typehints import native_type_compatibility
if TYPE_CHECKING:
from google.protobuf import message # pylint: disable=ungrouped-imports
from apache_beam.coders.coder_impl import IterableStateReader
from apache_beam.coders.coder_impl import IterableStateWriter
PortableObjectT = TypeVar('PortableObjectT', bound='PortableObject')
class PortableObject(Protocol):
def to_runner_api(self, __context):
# type: (PipelineContext) -> Any
pass
@classmethod
def from_runner_api(cls, __proto, __context):
# type: (Any, PipelineContext) -> Any
pass
class _PipelineContextMap(Generic[PortableObjectT]):
"""This is a bi-directional map between objects and ids.
Under the hood it encodes and decodes these objects into runner API
representations.
"""
def __init__(self,
context, # type: PipelineContext
obj_type, # type: Type[PortableObjectT]
namespace, # type: str
proto_map=None # type: Optional[Mapping[str, message.Message]]
):
# type: (...) -> None
self._pipeline_context = context
self._obj_type = obj_type
self._namespace = namespace
self._obj_to_id = {} # type: Dict[Any, str]
self._id_to_obj = {} # type: Dict[str, Any]
self._id_to_proto = dict(proto_map) if proto_map else {}
def populate_map(self, proto_map):
# type: (Mapping[str, message.Message]) -> None
for id, proto in self._id_to_proto.items():
proto_map[id].CopyFrom(proto)
def get_id(self, obj, label=None):
# type: (PortableObjectT, Optional[str]) -> str
if obj not in self._obj_to_id:
id = self._pipeline_context.component_id_map.get_or_assign(
obj, self._obj_type, label)
self._id_to_obj[id] = obj
self._obj_to_id[obj] = id
self._id_to_proto[id] = obj.to_runner_api(self._pipeline_context)
return self._obj_to_id[obj]
def get_proto(self, obj, label=None):
# type: (PortableObjectT, Optional[str]) -> message.Message
return self._id_to_proto[self.get_id(obj, label)]
def get_by_id(self, id):
# type: (str) -> PortableObjectT
if id not in self._id_to_obj:
self._id_to_obj[id] = self._obj_type.from_runner_api(
self._id_to_proto[id], self._pipeline_context)
return self._id_to_obj[id]
def get_by_proto(self, maybe_new_proto, label=None, deduplicate=False):
# type: (message.Message, Optional[str], bool) -> str
if deduplicate:
for id, proto in self._id_to_proto.items():
if proto == maybe_new_proto:
return id
return self.put_proto(
self._pipeline_context.component_id_map.get_or_assign(
label, obj_type=self._obj_type),
maybe_new_proto)
def get_id_to_proto_map(self):
# type: () -> Dict[str, message.Message]
return self._id_to_proto
def get_proto_from_id(self, id):
# type: (str) -> message.Message
return self.get_id_to_proto_map()[id]
def put_proto(self, id, proto, ignore_duplicates=False):
# type: (str, message.Message, bool) -> str
if not ignore_duplicates and id in self._id_to_proto:
raise ValueError("Id '%s' is already taken." % id)
elif (ignore_duplicates and id in self._id_to_proto and
self._id_to_proto[id] != proto):
raise ValueError(
'Cannot insert different protos %r and %r with the same ID %r',
self._id_to_proto[id],
proto,
id)
self._id_to_proto[id] = proto
return id
def __getitem__(self, id):
# type: (str) -> Any
return self.get_by_id(id)
def __contains__(self, id):
# type: (str) -> bool
return id in self._id_to_proto
class PipelineContext(object):
"""For internal use only; no backwards-compatibility guarantees.
Used for accessing and constructing the referenced objects of a Pipeline.
"""
def __init__(self,
proto=None, # type: Optional[Union[beam_runner_api_pb2.Components, beam_fn_api_pb2.ProcessBundleDescriptor]]
component_id_map=None, # type: Optional[pipeline.ComponentIdMap]
default_environment=None, # type: Optional[environments.Environment]
use_fake_coders=False, # type: bool
iterable_state_read=None, # type: Optional[IterableStateReader]
iterable_state_write=None, # type: Optional[IterableStateWriter]
namespace='ref', # type: str
requirements=(), # type: Iterable[str]
):
# type: (...) -> None
if isinstance(proto, beam_fn_api_pb2.ProcessBundleDescriptor):
proto = beam_runner_api_pb2.Components(
coders=dict(proto.coders.items()),
windowing_strategies=dict(proto.windowing_strategies.items()),
environments=dict(proto.environments.items()))
self.component_id_map = component_id_map or ComponentIdMap(namespace)
assert self.component_id_map.namespace == namespace
self.transforms = _PipelineContextMap(
self,
pipeline.AppliedPTransform,
namespace,
proto.transforms if proto is not None else None)
self.pcollections = _PipelineContextMap(
self,
pvalue.PCollection,
namespace,
proto.pcollections if proto is not None else None)
self.coders = _PipelineContextMap(
self,
coders.Coder,
namespace,
proto.coders if proto is not None else None)
self.windowing_strategies = _PipelineContextMap(
self,
core.Windowing,
namespace,
proto.windowing_strategies if proto is not None else None)
self.environments = _PipelineContextMap(
self,
environments.Environment,
namespace,
proto.environments if proto is not None else None)
if default_environment:
self._default_environment_id = self.environments.get_id(
default_environment,
label='default_environment') # type: Optional[str]
else:
self._default_environment_id = None
self.use_fake_coders = use_fake_coders
self.iterable_state_read = iterable_state_read
self.iterable_state_write = iterable_state_write
self._requirements = set(requirements)
def add_requirement(self, requirement):
# type: (str) -> None
self._requirements.add(requirement)
def requirements(self):
# type: () -> FrozenSet[str]
return frozenset(self._requirements)
# If fake coders are requested, return a pickled version of the element type
# rather than an actual coder. The element type is required for some runners,
# as well as performing a round-trip through protos.
# TODO(BEAM-2717): Remove once this is no longer needed.
def coder_id_from_element_type(self, element_type):
# type: (Any) -> str
if self.use_fake_coders:
return pickler.dumps(element_type).decode('ascii')
else:
return self.coders.get_id(coders.registry.get_coder(element_type))
def element_type_from_coder_id(self, coder_id):
# type: (str) -> Any
if self.use_fake_coders or coder_id not in self.coders:
return pickler.loads(coder_id)
else:
return native_type_compatibility.convert_to_beam_type(
self.coders[coder_id].to_type_hint())
@staticmethod
def from_runner_api(proto):
# type: (beam_runner_api_pb2.Components) -> PipelineContext
return PipelineContext(proto)
def to_runner_api(self):
# type: () -> beam_runner_api_pb2.Components
context_proto = beam_runner_api_pb2.Components()
self.transforms.populate_map(context_proto.transforms)
self.pcollections.populate_map(context_proto.pcollections)
self.coders.populate_map(context_proto.coders)
self.windowing_strategies.populate_map(context_proto.windowing_strategies)
self.environments.populate_map(context_proto.environments)
return context_proto
def default_environment_id(self):
# type: () -> Optional[str]
return self._default_environment_id | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/pipeline_context.py | 0.804905 | 0.244459 | pipeline_context.py | pypi |
# This module is experimental. No backwards-compatibility guarantees.
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
from typing import Optional
from apache_beam.runners import common
from apache_beam.utils import counters
class StateSampler(object):
def __init__(self, sampling_period_ms):
self._state_stack = [
ScopedState(self, counters.CounterName('unknown'), None)
]
self.state_transition_count = 0
self.time_since_transition = 0
def current_state(self):
# type: () -> ScopedState
"""Returns the current execution state.
This operation is not thread safe, and should only be called from the
execution thread."""
return self._state_stack[-1]
def _scoped_state(self,
counter_name, # type: counters.CounterName
name_context, # type: common.NameContext
output_counter,
metrics_container=None):
# type: (...) -> ScopedState
assert isinstance(name_context, common.NameContext)
return ScopedState(
self, counter_name, name_context, output_counter, metrics_container)
def update_metric(self, typed_metric_name, value):
metrics_container = self.current_state().metrics_container
if metrics_container is not None:
metrics_container.get_metric_cell(typed_metric_name).update(value)
def _enter_state(self, state):
# type: (ScopedState) -> None
self.state_transition_count += 1
self._state_stack.append(state)
def _exit_state(self):
# type: () -> None
self.state_transition_count += 1
self._state_stack.pop()
def start(self):
# type: () -> None
# Sampling not yet supported. Only state tracking at the moment.
pass
def stop(self):
# type: () -> None
pass
def reset(self):
# type: () -> None
pass
class ScopedState(object):
def __init__(self,
sampler, # type: StateSampler
name, # type: counters.CounterName
step_name_context, # type: Optional[common.NameContext]
counter=None,
metrics_container=None):
self.state_sampler = sampler
self.name = name
self.name_context = step_name_context
self.counter = counter
self.nsecs = 0
self.metrics_container = metrics_container
def sampled_seconds(self):
# type: () -> float
return 1e-9 * self.nsecs
def sampled_msecs_int(self):
# type: () -> int
return int(1e-6 * self.nsecs)
def __repr__(self):
return "ScopedState[%s, %s]" % (self.name, self.nsecs)
def __enter__(self):
self.state_sampler._enter_state(self)
def __exit__(self, exc_type, exc_value, traceback):
self.state_sampler._exit_state() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/worker/statesampler_slow.py | 0.854232 | 0.305309 | statesampler_slow.py | pypi |
"""A module for caching state reads/writes in Beam applications."""
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
import collections
import logging
import threading
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Generic
from typing import Hashable
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from apache_beam.metrics import monitoring_infos
if TYPE_CHECKING:
from apache_beam.portability.api import metrics_pb2
_LOGGER = logging.getLogger(__name__)
CallableT = TypeVar('CallableT', bound='Callable')
KT = TypeVar('KT')
VT = TypeVar('VT')
class Metrics(object):
"""Metrics container for state cache metrics."""
# A set of all registered metrics
ALL_METRICS = set() # type: Set[Hashable]
PREFIX = "beam:metric:statecache:"
def __init__(self):
# type: () -> None
self._context = threading.local()
def initialize(self):
# type: () -> None
"""Needs to be called once per thread to initialize the local metrics cache.
"""
if hasattr(self._context, 'metrics'):
return # Already initialized
self._context.metrics = collections.defaultdict(int)
def count(self, name):
# type: (str) -> None
self._context.metrics[name] += 1
def hit_miss(self, total_name, hit_miss_name):
# type: (str, str) -> None
self._context.metrics[total_name] += 1
self._context.metrics[hit_miss_name] += 1
def get_monitoring_infos(self, cache_size, cache_capacity):
# type: (int, int) -> List[metrics_pb2.MonitoringInfo]
"""Returns the metrics scoped to the current bundle."""
metrics = self._context.metrics
if len(metrics) == 0:
# No metrics collected, do not report
return []
# Add all missing metrics which were not reported
for key in Metrics.ALL_METRICS:
if key not in metrics:
metrics[key] = 0
# Gauges which reflect the state since last queried
gauges = [
monitoring_infos.int64_gauge(self.PREFIX + name, val) for name,
val in metrics.items()
]
gauges.append(
monitoring_infos.int64_gauge(self.PREFIX + 'size', cache_size))
gauges.append(
monitoring_infos.int64_gauge(self.PREFIX + 'capacity', cache_capacity))
# Counters for the summary across all metrics
counters = [
monitoring_infos.int64_counter(self.PREFIX + name + '_total', val)
for name,
val in metrics.items()
]
# Reinitialize metrics for this thread/bundle
metrics.clear()
return gauges + counters
@staticmethod
def counter_hit_miss(total_name, hit_name, miss_name):
# type: (str, str, str) -> Callable[[CallableT], CallableT]
"""Decorator for counting function calls and whether
the return value equals None (=miss) or not (=hit)."""
Metrics.ALL_METRICS.update([total_name, hit_name, miss_name])
def decorator(function):
# type: (CallableT) -> CallableT
def reporter(self, *args, **kwargs):
# type: (StateCache, Any, Any) -> Any
value = function(self, *args, **kwargs)
if value is None:
self._metrics.hit_miss(total_name, miss_name)
else:
self._metrics.hit_miss(total_name, hit_name)
return value
return reporter # type: ignore[return-value]
return decorator
@staticmethod
def counter(metric_name):
# type: (str) -> Callable[[CallableT], CallableT]
"""Decorator for counting function calls."""
Metrics.ALL_METRICS.add(metric_name)
def decorator(function):
# type: (CallableT) -> CallableT
def reporter(self, *args, **kwargs):
# type: (StateCache, Any, Any) -> Any
self._metrics.count(metric_name)
return function(self, *args, **kwargs)
return reporter # type: ignore[return-value]
return decorator
class StateCache(object):
""" Cache for Beam state access, scoped by state key and cache_token.
Assumes a bag state implementation.
For a given state_key, caches a (cache_token, value) tuple and allows to
a) read from the cache (get),
if the currently stored cache_token matches the provided
a) write to the cache (put),
storing the new value alongside with a cache token
c) append to the currently cache item (extend),
if the currently stored cache_token matches the provided
c) empty a cached element (clear),
if the currently stored cache_token matches the provided
d) evict a cached element (evict)
The operations on the cache are thread-safe for use by multiple workers.
:arg max_entries The maximum number of entries to store in the cache.
TODO Memory-based caching: https://issues.apache.org/jira/browse/BEAM-8297
"""
def __init__(self, max_entries):
# type: (int) -> None
_LOGGER.info('Creating state cache with size %s', max_entries)
self._missing = None
self._cache = self.LRUCache[Tuple[bytes, Optional[bytes]],
Any](max_entries, self._missing)
self._lock = threading.RLock()
self._metrics = Metrics()
@Metrics.counter_hit_miss("get", "hit", "miss")
def get(self, state_key, cache_token):
# type: (bytes, Optional[bytes]) -> Any
assert cache_token and self.is_cache_enabled()
with self._lock:
return self._cache.get((state_key, cache_token))
@Metrics.counter("put")
def put(self, state_key, cache_token, value):
# type: (bytes, Optional[bytes], Any) -> None
assert cache_token and self.is_cache_enabled()
with self._lock:
return self._cache.put((state_key, cache_token), value)
@Metrics.counter("clear")
def clear(self, state_key, cache_token):
# type: (bytes, Optional[bytes]) -> None
assert cache_token and self.is_cache_enabled()
with self._lock:
self._cache.put((state_key, cache_token), [])
@Metrics.counter("evict")
def evict(self, state_key, cache_token):
# type: (bytes, Optional[bytes]) -> None
assert self.is_cache_enabled()
with self._lock:
self._cache.evict((state_key, cache_token))
def evict_all(self):
# type: () -> None
with self._lock:
self._cache.evict_all()
def initialize_metrics(self):
# type: () -> None
self._metrics.initialize()
def is_cache_enabled(self):
# type: () -> bool
return self._cache._max_entries > 0
def size(self):
# type: () -> int
return len(self._cache)
def get_monitoring_infos(self):
# type: () -> List[metrics_pb2.MonitoringInfo]
"""Retrieves the monitoring infos and resets the counters."""
with self._lock:
size = len(self._cache)
capacity = self._cache._max_entries
return self._metrics.get_monitoring_infos(size, capacity)
class LRUCache(Generic[KT, VT]):
def __init__(self, max_entries, default_entry):
# type: (int, VT) -> None
self._max_entries = max_entries
self._default_entry = default_entry
self._cache = collections.OrderedDict(
) # type: collections.OrderedDict[KT, VT]
def get(self, key):
# type: (KT) -> VT
value = self._cache.pop(key, self._default_entry)
if value != self._default_entry:
self._cache[key] = value
return value
def put(self, key, value):
# type: (KT, VT) -> None
self._cache[key] = value
while len(self._cache) > self._max_entries:
self._cache.popitem(last=False)
def evict(self, key):
# type: (KT) -> None
self._cache.pop(key, self._default_entry)
def evict_all(self):
# type: () -> None
self._cache.clear()
def __len__(self):
# type: () -> int
return len(self._cache) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/worker/statecache.py | 0.867485 | 0.267289 | statecache.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
from apache_beam import coders
from apache_beam.runners import common
# This module is experimental. No backwards-compatibility guarantees.
def build_worker_instruction(*args):
"""Create an object representing a ParallelInstruction protobuf.
This will be a collections.namedtuple with a custom __str__ method.
Alas, this wrapper is not known to pylint, which thinks it creates
constants. You may have to put a disable=invalid-name pylint
annotation on any use of this, depending on your names.
Args:
*args: first argument is the name of the type to create. Should
start with "Worker". Second arguments is alist of the
attributes of this object.
Returns:
A new class, a subclass of tuple, that represents the protobuf.
"""
tuple_class = collections.namedtuple(*args)
tuple_class.__str__ = worker_object_to_string
tuple_class.__repr__ = worker_object_to_string
return tuple_class
def worker_printable_fields(workerproto):
"""Returns the interesting fields of a Worker* object."""
return [
'%s=%s' % (name, value)
# _asdict is the only way and cannot subclass this generated class
# pylint: disable=protected-access
for name,
value in workerproto._asdict().items()
# want to output value 0 but not None nor []
if (value or value == 0) and name not in (
'coder',
'coders',
'output_coders',
'elements',
'combine_fn',
'serialized_fn',
'window_fn',
'append_trailing_newlines',
'strip_trailing_newlines',
'compression_type',
'context',
'start_shuffle_position',
'end_shuffle_position',
'shuffle_reader_config',
'shuffle_writer_config')
]
def worker_object_to_string(worker_object):
"""Returns a string compactly representing a Worker* object."""
return '%s(%s)' % (
worker_object.__class__.__name__,
', '.join(worker_printable_fields(worker_object)))
# All the following Worker* definitions will have these lint problems:
# pylint: disable=invalid-name
# pylint: disable=pointless-string-statement
WorkerRead = build_worker_instruction('WorkerRead', ['source', 'output_coders'])
"""Worker details needed to read from a source.
Attributes:
source: a source object.
output_coders: 1-tuple of the coder for the output.
"""
WorkerSideInputSource = build_worker_instruction(
'WorkerSideInputSource', ['source', 'tag'])
"""Worker details needed to read from a side input source.
Attributes:
source: a source object.
tag: string tag for this side input.
"""
WorkerGroupingShuffleRead = build_worker_instruction(
'WorkerGroupingShuffleRead',
[
'start_shuffle_position',
'end_shuffle_position',
'shuffle_reader_config',
'coder',
'output_coders'
])
"""Worker details needed to read from a grouping shuffle source.
Attributes:
start_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to start reading.
end_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to stop reading.
shuffle_reader_config: An opaque string used to initialize the shuffle
reader. Contains things like connection endpoints for the shuffle
server appliance and various options.
coder: The KV coder used to decode shuffle entries.
output_coders: 1-tuple of the coder for the output.
"""
WorkerUngroupedShuffleRead = build_worker_instruction(
'WorkerUngroupedShuffleRead',
[
'start_shuffle_position',
'end_shuffle_position',
'shuffle_reader_config',
'coder',
'output_coders'
])
"""Worker details needed to read from an ungrouped shuffle source.
Attributes:
start_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to start reading.
end_shuffle_position: An opaque string to be passed to the shuffle
source to indicate where to stop reading.
shuffle_reader_config: An opaque string used to initialize the shuffle
reader. Contains things like connection endpoints for the shuffle
server appliance and various options.
coder: The value coder used to decode shuffle entries.
"""
WorkerWrite = build_worker_instruction(
'WorkerWrite', ['sink', 'input', 'output_coders'])
"""Worker details needed to write to a sink.
Attributes:
sink: a sink object.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple, coder to use to estimate bytes written.
"""
WorkerInMemoryWrite = build_worker_instruction(
'WorkerInMemoryWrite',
['output_buffer', 'write_windowed_values', 'input', 'output_coders'])
"""Worker details needed to write to a in-memory sink.
Used only for unit testing. It makes worker tests less cluttered with code like
"write to a file and then check file contents".
Attributes:
output_buffer: list to which output elements will be appended
write_windowed_values: whether to record the entire WindowedValue outputs,
or just the raw (unwindowed) value
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple, coder to use to estimate bytes written.
"""
WorkerShuffleWrite = build_worker_instruction(
'WorkerShuffleWrite',
['shuffle_kind', 'shuffle_writer_config', 'input', 'output_coders'])
"""Worker details needed to write to a shuffle sink.
Attributes:
shuffle_kind: A string describing the shuffle kind. This can control the
way the worker interacts with the shuffle sink. The possible values are:
'ungrouped', 'group_keys', and 'group_keys_and_sort_values'.
shuffle_writer_config: An opaque string used to initialize the shuffle
write. Contains things like connection endpoints for the shuffle
server appliance and various options.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for input elements. If the
shuffle_kind is grouping, this is expected to be a KV coder.
"""
WorkerDoFn = build_worker_instruction(
'WorkerDoFn',
['serialized_fn', 'output_tags', 'input', 'side_inputs', 'output_coders'])
"""Worker details needed to run a DoFn.
Attributes:
serialized_fn: A serialized DoFn object to be run for each input element.
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
side_inputs: A list of Worker...Read instances describing sources to be
used for getting values. The types supported right now are
WorkerInMemoryRead and WorkerTextRead.
"""
WorkerReifyTimestampAndWindows = build_worker_instruction(
'WorkerReifyTimestampAndWindows', ['output_tags', 'input', 'output_coders'])
"""Worker details needed to run a WindowInto.
Attributes:
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
"""
WorkerMergeWindows = build_worker_instruction(
'WorkerMergeWindows',
[
'window_fn',
'combine_fn',
'phase',
'output_tags',
'input',
'coders',
'context',
'output_coders'
])
"""Worker details needed to run a MergeWindows (aka. GroupAlsoByWindows).
Attributes:
window_fn: A serialized Windowing object representing the windowing strategy.
combine_fn: A serialized CombineFn object to be used after executing the
GroupAlsoByWindows operation. May be None if not a combining operation.
phase: Possible values are 'all', 'add', 'merge', and 'extract'.
A runner optimizer may split the user combiner in 3 separate
phases (ADD, MERGE, and EXTRACT), on separate VMs, as it sees
fit. The phase attribute dictates which DoFn is actually running in
the worker. May be None if not a combining operation.
output_tags: The string tags used to identify the outputs of a ParDo
operation. The tag is present even if the ParDo has just one output
(e.g., ['out'].
output_coders: array of coders, one for each output.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
coders: A 2-tuple of coders (key, value) to encode shuffle entries.
context: The ExecutionContext object for the current work item.
"""
WorkerCombineFn = build_worker_instruction(
'WorkerCombineFn', ['serialized_fn', 'phase', 'input', 'output_coders'])
"""Worker details needed to run a CombineFn.
Attributes:
serialized_fn: A serialized CombineFn object to be used.
phase: Possible values are 'all', 'add', 'merge', and 'extract'.
A runner optimizer may split the user combiner in 3 separate
phases (ADD, MERGE, and EXTRACT), on separate VMs, as it sees
fit. The phase attribute dictates which DoFn is actually running in
the worker.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for the output.
"""
WorkerPartialGroupByKey = build_worker_instruction(
'WorkerPartialGroupByKey', ['combine_fn', 'input', 'output_coders'])
"""Worker details needed to run a partial group-by-key.
Attributes:
combine_fn: A serialized CombineFn object to be used.
input: A (producer index, output index) tuple representing the
ParallelInstruction operation whose output feeds into this operation.
The output index is 0 except for multi-output operations (like ParDo).
output_coders: 1-tuple of the coder for the output.
"""
WorkerFlatten = build_worker_instruction(
'WorkerFlatten', ['inputs', 'output_coders'])
"""Worker details needed to run a Flatten.
Attributes:
inputs: A list of tuples, each (producer index, output index), representing
the ParallelInstruction operations whose output feeds into this operation.
The output index is 0 unless the input is from a multi-output
operation (such as ParDo).
output_coders: 1-tuple of the coder for the output.
"""
def get_coder_from_spec(coder_spec):
"""Return a coder instance from a coder spec.
Args:
coder_spec: A dict where the value of the '@type' key is a pickled instance
of a Coder instance.
Returns:
A coder instance (has encode/decode methods).
"""
assert coder_spec is not None
# Ignore the wrappers in these encodings.
ignored_wrappers = (
'com.google.cloud.dataflow.sdk.util.TimerOrElement$TimerOrElementCoder')
if coder_spec['@type'] in ignored_wrappers:
assert len(coder_spec['component_encodings']) == 1
coder_spec = coder_spec['component_encodings'][0]
return get_coder_from_spec(coder_spec)
# Handle a few well known types of coders.
if coder_spec['@type'] == 'kind:pair':
assert len(coder_spec['component_encodings']) == 2
component_coders = [
get_coder_from_spec(c) for c in coder_spec['component_encodings']
]
return coders.TupleCoder(component_coders)
elif coder_spec['@type'] == 'kind:stream':
assert len(coder_spec['component_encodings']) == 1
return coders.IterableCoder(
get_coder_from_spec(coder_spec['component_encodings'][0]))
elif coder_spec['@type'] == 'kind:windowed_value':
assert len(coder_spec['component_encodings']) == 2
value_coder, window_coder = [
get_coder_from_spec(c) for c in coder_spec['component_encodings']]
return coders.coders.WindowedValueCoder(
value_coder, window_coder=window_coder)
elif coder_spec['@type'] == 'kind:interval_window':
assert (
'component_encodings' not in coder_spec or
not coder_spec['component_encodings'])
return coders.coders.IntervalWindowCoder()
elif coder_spec['@type'] == 'kind:global_window':
assert (
'component_encodings' not in coder_spec or
not coder_spec['component_encodings'])
return coders.coders.GlobalWindowCoder()
elif coder_spec['@type'] == 'kind:varint':
assert (
'component_encodings' not in coder_spec or
len(coder_spec['component_encodings'] == 0))
return coders.coders.VarIntCoder()
elif coder_spec['@type'] == 'kind:length_prefix':
assert len(coder_spec['component_encodings']) == 1
return coders.coders.LengthPrefixCoder(
get_coder_from_spec(coder_spec['component_encodings'][0]))
elif coder_spec['@type'] == 'kind:bytes':
assert (
'component_encodings' not in coder_spec or
len(coder_spec['component_encodings'] == 0))
return coders.BytesCoder()
# We pass coders in the form "<coder_name>$<pickled_data>" to make the job
# description JSON more readable.
return coders.coders.deserialize_coder(coder_spec['@type'].encode('ascii'))
class MapTask(object):
"""A map task decoded into operations and ready to be executed.
Attributes:
operations: A list of Worker* object created by parsing the instructions
within the map task.
stage_name: The name of this map task execution stage.
system_names: The system names of the step corresponding to each map task
operation in the execution graph.
step_names: The user-given names of the step corresponding to each map task
operation (e.g. Foo/Bar/ParDo).
original_names: The internal name of a step in the original workflow graph.
name_contexts: A common.NameContext object containing name information
about a step.
"""
def __init__(
self,
operations,
stage_name,
system_names=None,
step_names=None,
original_names=None,
name_contexts=None):
# TODO(BEAM-4028): Remove arguments other than name_contexts.
self.operations = operations
self.stage_name = stage_name
self.name_contexts = name_contexts or self._make_name_contexts(
original_names, step_names, system_names)
@staticmethod
def _make_name_contexts(original_names, user_names, system_names):
# TODO(BEAM-4028): Remove method once map task relies on name contexts.
return [
common.DataflowNameContext(step_name, user_name, system_name)
for step_name,
user_name,
system_name in zip(original_names, user_names, system_names)
]
@property
def system_names(self):
"""Returns a list containing the system names of steps.
A System name is the name of a step in the optimized Dataflow graph.
"""
return [nc.system_name for nc in self.name_contexts]
@property
def original_names(self):
"""Returns a list containing the original names of steps.
An original name is the internal name of a step in the Dataflow graph
(e.g. 's2').
"""
return [nc.step_name for nc in self.name_contexts]
@property
def step_names(self):
"""Returns a list containing the user names of steps.
In this context, a step name is the user-given name of a step in the
Dataflow graph (e.g. 's2').
"""
return [nc.user_name for nc in self.name_contexts]
def __str__(self):
return '<%s %s steps=%s>' % (
self.__class__.__name__, self.stage_name, '+'.join(self.step_names)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/worker/operation_specs.py | 0.864882 | 0.288032 | operation_specs.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import logging
import queue
import threading
import traceback
from builtins import object
from builtins import range
from apache_beam.coders import observable
from apache_beam.io import iobase
from apache_beam.runners.worker import opcounters
from apache_beam.transforms import window
from apache_beam.utils.sentinel import Sentinel
# This module is experimental. No backwards-compatibility guarantees.
# Maximum number of reader threads for reading side input sources, per side
# input.
MAX_SOURCE_READER_THREADS = 15
# Number of slots for elements in side input element queue. Note that this
# value is intentionally smaller than MAX_SOURCE_READER_THREADS so as to reduce
# memory pressure of holding potentially-large elements in memory. Note that
# the number of pending elements in memory is equal to the sum of
# MAX_SOURCE_READER_THREADS and ELEMENT_QUEUE_SIZE.
ELEMENT_QUEUE_SIZE = 10
# Special element value sentinel for signaling reader state.
READER_THREAD_IS_DONE_SENTINEL = Sentinel.sentinel
# Used to efficiently window the values of non-windowed side inputs.
_globally_windowed = window.GlobalWindows.windowed_value(None).with_value
_LOGGER = logging.getLogger(__name__)
class PrefetchingSourceSetIterable(object):
"""Value iterator that reads concurrently from a set of sources."""
def __init__(
self,
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
self.sources = sources
self.num_reader_threads = min(max_reader_threads, len(self.sources))
# Queue for sources that are to be read.
self.sources_queue = queue.Queue()
for source in sources:
self.sources_queue.put(source)
# Queue for elements that have been read.
self.element_queue = queue.Queue(ELEMENT_QUEUE_SIZE)
# Queue for exceptions encountered in reader threads; to be rethrown.
self.reader_exceptions = queue.Queue()
# Whether we have already iterated; this iterable can only be used once.
self.already_iterated = False
# Whether an error was encountered in any source reader.
self.has_errored = False
self.read_counter = read_counter or opcounters.NoOpTransformIOCounter()
self.element_counter = element_counter
self.reader_threads = []
self._start_reader_threads()
def add_byte_counter(self, reader):
"""Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
"""
def update_bytes_read(record_size, is_record_size=False, **kwargs):
# Let the reader report block size.
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
def _start_reader_threads(self):
for _ in range(0, self.num_reader_threads):
t = threading.Thread(target=self._reader_thread)
t.daemon = True
t.start()
self.reader_threads.append(t)
def _reader_thread(self):
# pylint: disable=too-many-nested-blocks
try:
while True:
try:
source = self.sources_queue.get_nowait()
if isinstance(source, iobase.BoundedSource):
for value in source.read(source.get_range_tracker(None, None)):
if self.has_errored:
# If any reader has errored, just return.
return
if isinstance(value, window.WindowedValue):
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
else:
# Native dataflow source.
with source.reader() as reader:
# The tracking of time spend reading and bytes read from side
# inputs is kept behind an experiment flag to test performance
# impact.
self.add_byte_counter(reader)
returns_windowed_values = reader.returns_windowed_values
for value in reader:
if self.has_errored:
# If any reader has errored, just return.
return
if returns_windowed_values:
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
except queue.Empty:
return
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Encountered exception in PrefetchingSourceSetIterable '
'reader thread: %s',
traceback.format_exc())
self.reader_exceptions.put(e)
self.has_errored = True
finally:
self.element_queue.put(READER_THREAD_IS_DONE_SENTINEL)
def __iter__(self):
# pylint: disable=too-many-nested-blocks
if self.already_iterated:
raise RuntimeError(
'Can only iterate once over PrefetchingSourceSetIterable instance.')
self.already_iterated = True
# The invariants during execution are:
# 1) A worker thread always posts the sentinel as the last thing it does
# before exiting.
# 2) We always wait for all sentinels and then join all threads.
num_readers_finished = 0
try:
while True:
try:
with self.read_counter:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
if num_readers_finished == self.num_reader_threads:
return
else:
if self.element_counter:
self.element_counter.update_from(element)
yield element
self.element_counter.update_collect()
else:
yield element
finally:
if self.has_errored:
raise self.reader_exceptions.get()
except GeneratorExit:
self.has_errored = True
raise
finally:
while num_readers_finished < self.num_reader_threads:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
for t in self.reader_threads:
t.join()
def get_iterator_fn_for_sources(
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
"""Returns callable that returns iterator over elements for given sources."""
def _inner():
return iter(
PrefetchingSourceSetIterable(
sources,
max_reader_threads=max_reader_threads,
read_counter=read_counter,
element_counter=element_counter))
return _inner
class EmulatedIterable(collections.Iterable):
"""Emulates an iterable for a side input."""
def __init__(self, iterator_fn):
self.iterator_fn = iterator_fn
def __iter__(self):
return self.iterator_fn() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/worker/sideinputs.py | 0.731251 | 0.183447 | sideinputs.py | pypi |
# This module is experimental. No backwards-compatibility guarantees.
# pytype: skip-file
from __future__ import absolute_import
import contextlib
import threading
from typing import TYPE_CHECKING
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import Union
from apache_beam.runners import common
from apache_beam.utils.counters import Counter
from apache_beam.utils.counters import CounterFactory
from apache_beam.utils.counters import CounterName
try:
from apache_beam.runners.worker import statesampler_fast as statesampler_impl # type: ignore
FAST_SAMPLER = True
except ImportError:
from apache_beam.runners.worker import statesampler_slow as statesampler_impl
FAST_SAMPLER = False
if TYPE_CHECKING:
from apache_beam.metrics.execution import MetricsContainer
_STATE_SAMPLERS = threading.local()
def set_current_tracker(tracker):
_STATE_SAMPLERS.tracker = tracker
def get_current_tracker():
try:
return _STATE_SAMPLERS.tracker
except AttributeError:
return None
_INSTRUCTION_IDS = threading.local()
def get_current_instruction_id():
try:
return _INSTRUCTION_IDS.instruction_id
except AttributeError:
return None
@contextlib.contextmanager
def instruction_id(id):
try:
_INSTRUCTION_IDS.instruction_id = id
yield
finally:
_INSTRUCTION_IDS.instruction_id = None
def for_test():
set_current_tracker(StateSampler('test', CounterFactory()))
return get_current_tracker()
StateSamplerInfo = NamedTuple(
'StateSamplerInfo',
[('state_name', CounterName), ('transition_count', int),
('time_since_transition', int),
('tracked_thread', Optional[threading.Thread])])
# Default period for sampling current state of pipeline execution.
DEFAULT_SAMPLING_PERIOD_MS = 200
class StateSampler(statesampler_impl.StateSampler):
def __init__(self,
prefix, # type: str
counter_factory,
sampling_period_ms=DEFAULT_SAMPLING_PERIOD_MS):
self._prefix = prefix
self._counter_factory = counter_factory
self._states_by_name = {
} # type: Dict[CounterName, statesampler_impl.ScopedState]
self.sampling_period_ms = sampling_period_ms
self.tracked_thread = None # type: Optional[threading.Thread]
self.finished = False
self.started = False
super(StateSampler, self).__init__(sampling_period_ms)
@property
def stage_name(self):
# type: () -> str
return self._prefix
def stop(self):
# type: () -> None
set_current_tracker(None)
super(StateSampler, self).stop()
def stop_if_still_running(self):
# type: () -> None
if self.started and not self.finished:
self.stop()
def start(self):
# type: () -> None
self.tracked_thread = threading.current_thread()
set_current_tracker(self)
super(StateSampler, self).start()
self.started = True
def get_info(self):
# type: () -> StateSamplerInfo
"""Returns StateSamplerInfo with transition statistics."""
return StateSamplerInfo(
self.current_state().name,
self.state_transition_count,
self.time_since_transition,
self.tracked_thread)
def scoped_state(self,
name_context, # type: Union[str, common.NameContext]
state_name, # type: str
io_target=None,
metrics_container=None # type: Optional[MetricsContainer]
):
# type: (...) -> statesampler_impl.ScopedState
"""Returns a ScopedState object associated to a Step and a State.
Args:
name_context: common.NameContext. It is the step name information.
state_name: str. It is the state name (e.g. process / start / finish).
io_target:
metrics_container: MetricsContainer. The step's metrics container.
Returns:
A ScopedState that keeps the execution context and is able to switch it
for the execution thread.
"""
if not isinstance(name_context, common.NameContext):
name_context = common.NameContext(name_context)
counter_name = CounterName(
state_name + '-msecs',
stage_name=self._prefix,
step_name=name_context.metrics_name(),
io_target=io_target)
if counter_name in self._states_by_name:
return self._states_by_name[counter_name]
else:
output_counter = self._counter_factory.get_counter(
counter_name, Counter.SUM)
self._states_by_name[counter_name] = super(StateSampler,
self)._scoped_state(
counter_name,
name_context,
output_counter,
metrics_container)
return self._states_by_name[counter_name]
def commit_counters(self):
# type: () -> None
"""Updates output counters with latest state statistics."""
for state in self._states_by_name.values():
state_msecs = int(1e-6 * state.nsecs)
state.counter.update(state_msecs - state.counter.value()) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/worker/statesampler.py | 0.901518 | 0.245062 | statesampler.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import threading
import time
import apache_beam as beam
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.caching import streaming_cache
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class BackgroundCachingJob(object):
"""A simple abstraction that controls necessary components of a timed and
space limited background source recording job.
A background source recording job successfully completes source data
recording in 2 conditions:
#. The job is finite and runs into DONE state;
#. The job is infinite but hits an interactive_beam.options configured limit
and gets cancelled into CANCELLED/CANCELLING state.
In both situations, the background source recording job should be treated as
done successfully.
"""
def __init__(self, pipeline_result, limiters):
self._pipeline_result = pipeline_result
self._result_lock = threading.RLock()
self._condition_checker = threading.Thread(
target=self._background_caching_job_condition_checker, daemon=True)
# Limiters are checks s.t. if any are triggered then the background caching
# job gets cancelled.
self._limiters = limiters
self._condition_checker.start()
def _background_caching_job_condition_checker(self):
while True:
with self._result_lock:
if PipelineState.is_terminal(self._pipeline_result.state):
break
if self._should_end_condition_checker():
self.cancel()
break
time.sleep(0.5)
def _should_end_condition_checker(self):
return any([l.is_triggered() for l in self._limiters])
def is_done(self):
with self._result_lock:
is_terminated = self._pipeline_result.state in (
PipelineState.DONE, PipelineState.CANCELLED)
is_triggered = self._should_end_condition_checker()
is_cancelling = self._pipeline_result.state is PipelineState.CANCELLING
return is_terminated or (is_triggered and is_cancelling)
def is_running(self):
with self._result_lock:
return self._pipeline_result.state is PipelineState.RUNNING
def cancel(self):
"""Cancels this background source recording job.
"""
with self._result_lock:
if not PipelineState.is_terminal(self._pipeline_result.state):
try:
self._pipeline_result.cancel()
except NotImplementedError:
# Ignore the cancel invocation if it is never implemented by the
# runner.
pass
@property
def state(self):
with self._result_lock:
return self._pipeline_result.state
def attempt_to_run_background_caching_job(
runner, user_pipeline, options=None, limiters=None):
"""Attempts to run a background source recording job for a user-defined
pipeline.
Returns True if a job was started, False otherwise.
The pipeline result is automatically tracked by Interactive Beam in case
future cancellation/cleanup is needed.
"""
if is_background_caching_job_needed(user_pipeline):
# Cancel non-terminal jobs if there is any before starting a new one.
attempt_to_cancel_background_caching_job(user_pipeline)
# Cancel the gRPC server serving the test stream if there is one.
attempt_to_stop_test_stream_service(user_pipeline)
# TODO(BEAM-8335): refactor background source recording job logic from
# pipeline_instrument module to this module and aggregate tests.
from apache_beam.runners.interactive import pipeline_instrument as instr
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
user_pipeline.to_runner_api(use_fake_coders=True), runner, options)
background_caching_job_result = beam.pipeline.Pipeline.from_runner_api(
instr.build_pipeline_instrument(
runner_pipeline).background_caching_pipeline_proto(),
runner,
options).run()
recording_limiters = (
limiters
if limiters else ie.current_env().options.capture_control.limiters())
ie.current_env().set_background_caching_job(
user_pipeline,
BackgroundCachingJob(
background_caching_job_result, limiters=recording_limiters))
return True
return False
def is_background_caching_job_needed(user_pipeline):
"""Determines if a background source recording job needs to be started.
It does several state checks and recording state changes throughout the
process. It is not idempotent to simplify the usage.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
# Checks if the pipeline contains any source that needs to be cached.
need_cache = has_source_to_cache(user_pipeline)
# If this is True, we can invalidate a previous done/running job if there is
# one.
cache_changed = is_source_to_cache_changed(user_pipeline)
# When recording replay is disabled, cache is always needed for recordable
# sources (if any).
if need_cache and not ie.current_env().options.enable_recording_replay:
from apache_beam.runners.interactive.options import capture_control
capture_control.evict_captured_data()
return True
return (
need_cache and
# Checks if it's the first time running a job from the pipeline.
(
not job or
# Or checks if there is no previous job.
# DONE means a previous job has completed successfully and the
# cached events might still be valid.
not (
job.is_done() or
# RUNNING means a previous job has been started and is still
# running.
job.is_running()) or
# Or checks if we can invalidate the previous job.
cache_changed))
def is_cache_complete(pipeline_id):
# type: (str) -> bool
"""Returns True if the backgrond cache for the given pipeline is done.
"""
user_pipeline = ie.current_env().pipeline_id_to_pipeline(pipeline_id)
job = ie.current_env().get_background_caching_job(user_pipeline)
is_done = job and job.is_done()
cache_changed = is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=False)
# Stop reading from the cache if the background job is done or the underlying
# cache signature changed that requires a new background source recording job.
return is_done or cache_changed
def has_source_to_cache(user_pipeline):
"""Determines if a user-defined pipeline contains any source that need to be
cached. If so, also immediately wrap current cache manager held by current
interactive environment into a streaming cache if this has not been done.
The wrapping doesn't invalidate existing cache in any way.
This can help determining if a background source recording job is needed to
write cache for sources and if a test stream service is needed to serve the
cache.
Throughout the check, if source-to-cache has changed from the last check, it
also cleans up the invalidated cache early on.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
has_cache = instr.has_unbounded_sources(user_pipeline)
if has_cache:
if not isinstance(ie.current_env().get_cache_manager(user_pipeline,
create_if_absent=True),
streaming_cache.StreamingCache):
ie.current_env().set_cache_manager(
streaming_cache.StreamingCache(
ie.current_env().get_cache_manager(user_pipeline)._cache_dir,
is_cache_complete=is_cache_complete,
sample_resolution_sec=1.0),
user_pipeline)
return has_cache
def attempt_to_cancel_background_caching_job(user_pipeline):
"""Attempts to cancel background source recording job for a user-defined
pipeline.
If no background source recording job needs to be cancelled, NOOP. Otherwise,
cancel such job.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
if job:
job.cancel()
def attempt_to_stop_test_stream_service(user_pipeline):
"""Attempts to stop the gRPC server/service serving the test stream.
If there is no such server started, NOOP. Otherwise, stop it.
"""
if is_a_test_stream_service_running(user_pipeline):
ie.current_env().evict_test_stream_service_controller(user_pipeline).stop()
def is_a_test_stream_service_running(user_pipeline):
"""Checks to see if there is a gPRC server/service running that serves the
test stream to any job started from the given user_pipeline.
"""
return ie.current_env().get_test_stream_service_controller(
user_pipeline) is not None
def is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=True):
"""Determines if there is any change in the sources that need to be cached
used by the user-defined pipeline.
Due to the expensiveness of computations and for the simplicity of usage, this
function is not idempotent because Interactive Beam automatically discards
previously tracked signature of transforms and tracks the current signature of
transforms for the user-defined pipeline if there is any change.
When it's True, there is addition/deletion/mutation of source transforms that
requires a new background source recording job.
"""
# By default gets empty set if the user_pipeline is first time seen because
# we can treat it as adding transforms.
recorded_signature = ie.current_env().get_cached_source_signature(
user_pipeline)
current_signature = extract_source_to_cache_signature(user_pipeline)
is_changed = not current_signature.issubset(recorded_signature)
# The computation of extract_unbounded_source_signature is expensive, track on
# change by default.
if is_changed and update_cached_source_signature:
options = ie.current_env().options
# No info needed when recording replay is disabled.
if options.enable_recording_replay:
if not recorded_signature:
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
_LOGGER.info(
'Interactive Beam has detected unbounded sources in your pipeline. '
'In order to have a deterministic replay, a segment of data will '
'be recorded from all sources for %s seconds or until a total of '
'%s have been written to disk.',
options.recording_duration.total_seconds(),
sizeof_fmt(options.recording_size_limit))
else:
_LOGGER.info(
'Interactive Beam has detected a new streaming source was '
'added to the pipeline. In order for the cached streaming '
'data to start at the same time, all recorded data has been '
'cleared and a new segment of data will be recorded.')
ie.current_env().cleanup(user_pipeline)
ie.current_env().set_cached_source_signature(
user_pipeline, current_signature)
ie.current_env().add_user_pipeline(user_pipeline)
return is_changed
def extract_source_to_cache_signature(user_pipeline):
"""Extracts a set of signature for sources that need to be cached in the
user-defined pipeline.
A signature is a str representation of urn and payload of a source.
"""
from apache_beam.runners.interactive import pipeline_instrument as instr
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
unbounded_sources_as_applied_transforms = instr.unbounded_sources(
user_pipeline)
unbounded_sources_as_ptransforms = set(
map(lambda x: x.transform, unbounded_sources_as_applied_transforms))
_, context = user_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
signature = set(
map(
lambda transform: str(transform.to_runner_api(context)),
unbounded_sources_as_ptransforms))
return signature | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/background_caching_job.py | 0.712532 | 0.219923 | background_caching_job.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import apache_beam as beam
from apache_beam import runners
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as inst
from apache_beam.runners.interactive import background_caching_job
from apache_beam.runners.interactive.display import pipeline_graph
from apache_beam.runners.interactive.options import capture_control
from apache_beam.runners.interactive.utils import to_element_list
from apache_beam.testing.test_stream_service import TestStreamServiceController
# size of PCollection samples cached.
SAMPLE_SIZE = 8
_LOGGER = logging.getLogger(__name__)
class InteractiveRunner(runners.PipelineRunner):
"""An interactive runner for Beam Python pipelines.
Allows interactively building and running Beam Python pipelines.
"""
def __init__(
self,
underlying_runner=None,
render_option=None,
skip_display=True,
force_compute=True,
blocking=True):
"""Constructor of InteractiveRunner.
Args:
underlying_runner: (runner.PipelineRunner)
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
skip_display: (bool) whether to skip display operations when running the
pipeline. Useful if running large pipelines when display is not
needed.
force_compute: (bool) whether sequential pipeline runs can use cached data
of PCollections computed from the previous runs including show API
invocation from interactive_beam module. If True, always run the whole
pipeline and compute data for PCollections forcefully. If False, use
available data and run minimum pipeline fragment to only compute data
not available.
blocking: (bool) whether the pipeline run should be blocking or not.
"""
self._underlying_runner = (
underlying_runner or direct_runner.DirectRunner())
self._render_option = render_option
self._in_session = False
self._skip_display = skip_display
self._force_compute = force_compute
self._blocking = blocking
def is_fnapi_compatible(self):
# TODO(BEAM-8436): return self._underlying_runner.is_fnapi_compatible()
return False
def set_render_option(self, render_option):
"""Sets the rendering option.
Args:
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
"""
self._render_option = render_option
def start_session(self):
"""Start the session that keeps back-end managers and workers alive.
"""
if self._in_session:
return
enter = getattr(self._underlying_runner, '__enter__', None)
if enter is not None:
_LOGGER.info('Starting session.')
self._in_session = True
enter()
else:
_LOGGER.error('Keep alive not supported.')
def end_session(self):
"""End the session that keeps backend managers and workers alive.
"""
if not self._in_session:
return
exit = getattr(self._underlying_runner, '__exit__', None)
if exit is not None:
self._in_session = False
_LOGGER.info('Ending session.')
exit(None, None, None)
def apply(self, transform, pvalueish, options):
# TODO(qinyeli, BEAM-646): Remove runner interception of apply.
return self._underlying_runner.apply(transform, pvalueish, options)
def run_pipeline(self, pipeline, options):
if not ie.current_env().options.enable_recording_replay:
capture_control.evict_captured_data()
if self._force_compute:
ie.current_env().evict_computed_pcollections()
# Make sure that sources without a user reference are still cached.
inst.watch_sources(pipeline)
user_pipeline = ie.current_env().user_pipeline(pipeline)
pipeline_instrument = inst.build_pipeline_instrument(pipeline, options)
# The user_pipeline analyzed might be None if the pipeline given has nothing
# to be cached and tracing back to the user defined pipeline is impossible.
# When it's None, there is no need to cache including the background
# caching job and no result to track since no background caching job is
# started at all.
if user_pipeline:
# Should use the underlying runner and run asynchronously.
background_caching_job.attempt_to_run_background_caching_job(
self._underlying_runner, user_pipeline, options)
if (background_caching_job.has_source_to_cache(user_pipeline) and
not background_caching_job.is_a_test_stream_service_running(
user_pipeline)):
streaming_cache_manager = ie.current_env().get_cache_manager(
user_pipeline)
# Only make the server if it doesn't exist already.
if (streaming_cache_manager and
not ie.current_env().get_test_stream_service_controller(
user_pipeline)):
def exception_handler(e):
_LOGGER.error(str(e))
return True
test_stream_service = TestStreamServiceController(
streaming_cache_manager, exception_handler=exception_handler)
test_stream_service.start()
ie.current_env().set_test_stream_service_controller(
user_pipeline, test_stream_service)
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
pipeline_instrument.instrumented_pipeline_proto(),
self._underlying_runner,
options)
if ie.current_env().get_test_stream_service_controller(user_pipeline):
endpoint = ie.current_env().get_test_stream_service_controller(
user_pipeline).endpoint
# TODO: make the StreamingCacheManager and TestStreamServiceController
# constructed when the InteractiveEnvironment is imported.
class TestStreamVisitor(PipelineVisitor):
def visit_transform(self, transform_node):
from apache_beam.testing.test_stream import TestStream
if (isinstance(transform_node.transform, TestStream) and
not transform_node.transform._events):
transform_node.transform._endpoint = endpoint
pipeline_to_execute.visit(TestStreamVisitor())
if not self._skip_display:
a_pipeline_graph = pipeline_graph.PipelineGraph(
pipeline_instrument.original_pipeline,
render_option=self._render_option)
a_pipeline_graph.display_graph()
main_job_result = PipelineResult(
pipeline_to_execute.run(), pipeline_instrument)
# In addition to this pipeline result setting, redundant result setting from
# outer scopes are also recommended since the user_pipeline might not be
# available from within this scope.
if user_pipeline:
ie.current_env().set_pipeline_result(user_pipeline, main_job_result)
if self._blocking:
main_job_result.wait_until_finish()
if main_job_result.state is beam.runners.runner.PipelineState.DONE:
# pylint: disable=dict-values-not-iterating
ie.current_env().mark_pcollection_computed(
pipeline_instrument.cached_pcolls)
return main_job_result
class PipelineResult(beam.runners.runner.PipelineResult):
"""Provides access to information about a pipeline."""
def __init__(self, underlying_result, pipeline_instrument):
"""Constructor of PipelineResult.
Args:
underlying_result: (PipelineResult) the result returned by the underlying
runner running the pipeline.
pipeline_instrument: (PipelineInstrument) pipeline instrument describing
the pipeline being executed with interactivity applied and related
metadata including where the interactivity-backing cache lies.
"""
super(PipelineResult, self).__init__(underlying_result.state)
self._underlying_result = underlying_result
self._pipeline_instrument = pipeline_instrument
@property
def state(self):
return self._underlying_result.state
def wait_until_finish(self):
self._underlying_result.wait_until_finish()
def get(self, pcoll, include_window_info=False):
"""Materializes the PCollection into a list.
If include_window_info is True, then returns the elements as
WindowedValues. Otherwise, return the element as itself.
"""
return list(self.read(pcoll, include_window_info))
def read(self, pcoll, include_window_info=False):
"""Reads the PCollection one element at a time from cache.
If include_window_info is True, then returns the elements as
WindowedValues. Otherwise, return the element as itself.
"""
key = self._pipeline_instrument.cache_key(pcoll)
cache_manager = ie.current_env().get_cache_manager(
self._pipeline_instrument.user_pipeline)
if cache_manager.exists('full', key):
coder = cache_manager.load_pcoder('full', key)
reader, _ = cache_manager.read('full', key)
return to_element_list(reader, coder, include_window_info)
else:
raise ValueError('PCollection not available, please run the pipeline.')
def cancel(self):
self._underlying_result.cancel() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/interactive_runner.py | 0.648355 | 0.210787 | interactive_runner.py | pypi |
from typing import Iterator
from typing import Optional
import apache_beam as beam # type: ignore
class UserPipelineTracker:
"""Tracks user pipelines from derived pipelines.
This data structure is similar to a disjoint set data structure. A derived
pipeline can only have one parent user pipeline. A user pipeline can have many
derived pipelines.
"""
def __init__(self):
self._user_pipelines: dict[beam.Pipeline, list[beam.Pipeline]] = {}
self._derived_pipelines: dict[beam.Pipeline] = {}
self._pid_to_pipelines: dict[beam.Pipeline] = {}
def __iter__(self) -> Iterator[beam.Pipeline]:
"""Iterates through all the user pipelines."""
for p in self._user_pipelines:
yield p
def _key(self, pipeline: beam.Pipeline) -> str:
return str(id(pipeline))
def evict(self, pipeline: beam.Pipeline) -> None:
"""Evicts the pipeline.
Removes the given pipeline and derived pipelines if a user pipeline.
Otherwise, removes the given derived pipeline.
"""
user_pipeline = self.get_user_pipeline(pipeline)
if user_pipeline:
for d in self._user_pipelines[user_pipeline]:
del self._derived_pipelines[d]
del self._user_pipelines[user_pipeline]
elif pipeline in self._derived_pipelines:
del self._derived_pipelines[pipeline]
def clear(self) -> None:
"""Clears the tracker of all user and derived pipelines."""
self._user_pipelines.clear()
self._derived_pipelines.clear()
self._pid_to_pipelines.clear()
def get_pipeline(self, pid: str) -> Optional[beam.Pipeline]:
"""Returns the pipeline corresponding to the given pipeline id."""
return self._pid_to_pipelines.get(pid, None)
def add_user_pipeline(self, p: beam.Pipeline) -> beam.Pipeline:
"""Adds a user pipeline with an empty set of derived pipelines."""
self._memoize_pipieline(p)
# Create a new node for the user pipeline if it doesn't exist already.
user_pipeline = self.get_user_pipeline(p)
if not user_pipeline:
user_pipeline = p
self._user_pipelines[p] = []
return user_pipeline
def _memoize_pipieline(self, p: beam.Pipeline) -> None:
"""Memoizes the pid of the pipeline to the pipeline object."""
pid = self._key(p)
if pid not in self._pid_to_pipelines:
self._pid_to_pipelines[pid] = p
def add_derived_pipeline(
self, maybe_user_pipeline: beam.Pipeline,
derived_pipeline: beam.Pipeline) -> None:
"""Adds a derived pipeline with the user pipeline.
If the `maybe_user_pipeline` is a user pipeline, then the derived pipeline
will be added to its set. Otherwise, the derived pipeline will be added to
the user pipeline of the `maybe_user_pipeline`.
By doing the above one can do:
p = beam.Pipeline()
derived1 = beam.Pipeline()
derived2 = beam.Pipeline()
ut = UserPipelineTracker()
ut.add_derived_pipeline(p, derived1)
ut.add_derived_pipeline(derived1, derived2)
# Returns p.
ut.get_user_pipeline(derived2)
"""
self._memoize_pipieline(maybe_user_pipeline)
self._memoize_pipieline(derived_pipeline)
# Cannot add a derived pipeline twice.
assert derived_pipeline not in self._derived_pipelines
# Get the "true" user pipeline. This allows for the user to derive a
# pipeline from another derived pipeline, use both as arguments, and this
# method will still get the correct user pipeline.
user = self.add_user_pipeline(maybe_user_pipeline)
# Map the derived pipeline to the user pipeline.
self._derived_pipelines[derived_pipeline] = user
self._user_pipelines[user].append(derived_pipeline)
def get_user_pipeline(self, p: beam.Pipeline) -> Optional[beam.Pipeline]:
"""Returns the user pipeline of the given pipeline.
If the given pipeline has no user pipeline, i.e. not added to this tracker,
then this returns None. If the given pipeline is a user pipeline then this
returns the same pipeline. If the given pipeline is a derived pipeline then
this returns the user pipeline.
"""
# If `p` is a user pipeline then return it.
if p in self._user_pipelines:
return p
# If `p` exists then return its user pipeline.
if p in self._derived_pipelines:
return self._derived_pipelines[p]
# Otherwise, `p` is not in this tracker.
return None | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/user_pipeline_tracker.py | 0.942069 | 0.539954 | user_pipeline_tracker.py | pypi |
from __future__ import absolute_import
import hashlib
import json
import logging
import pandas as pd
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.testing.test_stream import WindowedValueHolder
def to_element_list(
reader, # type: Generator[Union[TestStreamPayload.Event, WindowedValueHolder]]
coder, # type: Coder
include_window_info, # type: bool
n=None, # type: int
include_time_events=False, # type: bool
):
# type: (...) -> List[WindowedValue]
"""Returns an iterator that properly decodes the elements from the reader.
"""
# Defining a generator like this makes it easier to limit the count of
# elements read. Otherwise, the count limit would need to be duplicated.
def elements():
for e in reader:
if isinstance(e, TestStreamPayload.Event):
if (e.HasField('watermark_event') or
e.HasField('processing_time_event')):
if include_time_events:
yield e
else:
for tv in e.element_event.elements:
decoded = coder.decode(tv.encoded_element)
yield (
decoded.windowed_value
if include_window_info else decoded.windowed_value.value)
elif isinstance(e, WindowedValueHolder):
yield (
e.windowed_value if include_window_info else e.windowed_value.value)
else:
yield e
# Because we can yield multiple elements from a single TestStreamFileRecord,
# we have to limit the count here to ensure that `n` is fulfilled.
count = 0
for e in elements():
if n and count >= n:
break
yield e
if not isinstance(e, TestStreamPayload.Event):
count += 1
def elements_to_df(elements, include_window_info=False):
# type: (List[WindowedValue], bool) -> DataFrame
"""Parses the given elements into a Dataframe.
If the elements are a list of WindowedValues, then it will break out the
elements into their own DataFrame and return it. If include_window_info is
True, then it will concatenate the windowing information onto the elements
DataFrame.
"""
rows = []
windowed_info = []
for e in elements:
rows.append(e.value)
if include_window_info:
windowed_info.append([e.timestamp.micros, e.windows, e.pane_info])
rows_df = pd.DataFrame(rows)
if include_window_info:
windowed_info_df = pd.DataFrame(
windowed_info, columns=['event_time', 'windows', 'pane_info'])
final_df = pd.concat([rows_df, windowed_info_df], axis=1)
else:
final_df = rows_df
return final_df
def register_ipython_log_handler():
# type: () -> None
"""Adds the IPython handler to a dummy parent logger (named
'apache_beam.runners.interactive') of all interactive modules' loggers so that
if is_in_notebook, logging displays the logs as HTML in frontends.
"""
# apache_beam.runners.interactive is not a module, thus this "root" logger is
# a dummy one created to hold the IPython log handler. When children loggers
# have propagate as True (by default) and logging level as NOTSET (by default,
# so the "root" logger's logging level takes effect), the IPython log handler
# will be triggered at the "root"'s own logging level. And if a child logger
# sets its logging level, it can take control back.
interactive_root_logger = logging.getLogger('apache_beam.runners.interactive')
if any([isinstance(h, IPythonLogHandler)
for h in interactive_root_logger.handlers]):
return
interactive_root_logger.setLevel(logging.INFO)
interactive_root_logger.addHandler(IPythonLogHandler())
# Disable the propagation so that logs emitted from interactive modules should
# only be handled by loggers and handlers defined within interactive packages.
interactive_root_logger.propagate = False
class IPythonLogHandler(logging.Handler):
"""A logging handler to display logs as HTML in IPython backed frontends."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
log_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div class="alert alert-{level}">{msg}</div>"""
logging_to_alert_level_map = {
logging.CRITICAL: 'danger',
logging.ERROR: 'danger',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'dark',
logging.NOTSET: 'light'
}
def emit(self, record):
try:
from html import escape
from IPython.core.display import HTML
from IPython.core.display import display
display(
HTML(
self.log_template.format(
level=self.logging_to_alert_level_map[record.levelno],
msg=escape(record.msg % record.args))))
except ImportError:
pass # NOOP when dependencies are not available.
def obfuscate(*inputs):
# type: (*Any) -> str
"""Obfuscates any inputs into a hexadecimal string."""
str_inputs = [str(input) for input in inputs]
merged_inputs = '_'.join(str_inputs)
return hashlib.md5(merged_inputs.encode('utf-8')).hexdigest()
class ProgressIndicator(object):
"""An indicator visualizing code execution in progress."""
# TODO(BEAM-7923): Switch to Google hosted CDN once
# https://code.google.com/archive/p/google-ajax-apis/issues/637 is resolved.
spinner_template = """
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<div id="{id}" class="spinner-border text-info" role="status">
</div>"""
spinner_removal_template = """
$("#{id}").remove();"""
def __init__(self, enter_text, exit_text):
# type: (str, str) -> None
self._id = 'progress_indicator_{}'.format(obfuscate(id(self)))
self._enter_text = enter_text
self._exit_text = exit_text
def __enter__(self):
try:
from IPython.core.display import HTML
from IPython.core.display import display
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
display(HTML(self.spinner_template.format(id=self._id)))
else:
display(self._enter_text)
except ImportError:
pass # NOOP when dependencies are not available.
def __exit__(self, exc_type, exc_value, traceback):
try:
from IPython.core.display import Javascript
from IPython.core.display import display
from IPython.core.display import display_javascript
from apache_beam.runners.interactive import interactive_environment as ie
if ie.current_env().is_in_notebook:
script = self.spinner_removal_template.format(id=self._id)
display_javascript(
Javascript(
ie._JQUERY_WITH_DATATABLE_TEMPLATE.format(
customized_script=script)))
else:
display(self._exit_text)
except ImportError:
pass # NOOP when dependencies are not avaialble.
def progress_indicated(func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""A decorator using a unique progress indicator as a context manager to
execute the given function within."""
def run_within_progress_indicator(*args, **kwargs):
with ProgressIndicator('Processing...', 'Done.'):
return func(*args, **kwargs)
return run_within_progress_indicator
def as_json(func):
# type: (Callable[..., Any]) -> Callable[..., str]
"""A decorator convert python objects returned by callables to json
string.
The decorated function should always return an object parsable by json.dumps.
If the object is not parsable, the str() of original object is returned
instead.
"""
def return_as_json(*args, **kwargs):
try:
return_value = func(*args, **kwargs)
return json.dumps(return_value)
except TypeError:
return str(return_value)
return return_as_json | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/utils.py | 0.642657 | 0.195921 | utils.py | pypi |
from __future__ import absolute_import
import apache_beam as beam
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.testing.test_stream import TestStream
class PipelineFragment(object):
"""A fragment of a pipeline definition.
A pipeline fragment is built from the original pipeline definition to include
only PTransforms that are necessary to produce the given PCollections.
"""
def __init__(self, pcolls, options=None):
"""Constructor of PipelineFragment.
Args:
pcolls: (List[PCollection]) a list of PCollections to build pipeline
fragment for.
options: (PipelineOptions) the pipeline options for the implicit
pipeline run.
"""
assert len(pcolls) > 0, (
'Need at least 1 PCollection as the target data to build a pipeline '
'fragment that produces it.')
for pcoll in pcolls:
assert isinstance(pcoll, beam.pvalue.PCollection), (
'{} is not an apache_beam.pvalue.PCollection.'.format(pcoll))
# No modification to self._user_pipeline is allowed.
self._user_pipeline = pcolls[0].pipeline
# These are user PCollections. Do not use them to deduce anything that
# will be executed by any runner. Instead, use
# `self._runner_pcolls_to_user_pcolls.keys()` to get copied PCollections.
self._pcolls = set(pcolls)
for pcoll in self._pcolls:
assert pcoll.pipeline is self._user_pipeline, (
'{} belongs to a different user pipeline than other PCollections '
'given and cannot be used to build a pipeline fragment that produces '
'the given PCollections.'.format(pcoll))
self._options = options
# A copied pipeline instance for modification without changing the user
# pipeline instance held by the end user. This instance can be processed
# into a pipeline fragment that later run by the underlying runner.
self._runner_pipeline = self._build_runner_pipeline()
_, self._context = self._runner_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
from apache_beam.runners.interactive import pipeline_instrument as instr
self._runner_pcoll_to_id = instr.pcolls_to_pcoll_id(
self._runner_pipeline, self._context)
# Correlate components in the runner pipeline to components in the user
# pipeline. The target pcolls are the pcolls given and defined in the user
# pipeline.
self._id_to_target_pcoll = self._calculate_target_pcoll_ids()
self._label_to_user_transform = self._calculate_user_transform_labels()
# Below will give us the 1:1 correlation between
# PCollections/AppliedPTransforms from the copied runner pipeline and
# PCollections/AppliedPTransforms from the user pipeline.
# (Dict[PCollection, PCollection])
(
self._runner_pcolls_to_user_pcolls,
# (Dict[AppliedPTransform, AppliedPTransform])
self._runner_transforms_to_user_transforms
) = self._build_correlation_between_pipelines(
self._runner_pcoll_to_id,
self._id_to_target_pcoll,
self._label_to_user_transform)
# Below are operated on the runner pipeline.
(self._necessary_transforms,
self._necessary_pcollections) = self._mark_necessary_transforms_and_pcolls(
self._runner_pcolls_to_user_pcolls)
self._runner_pipeline = self._prune_runner_pipeline_to_fragment(
self._runner_pipeline, self._necessary_transforms)
def deduce_fragment(self):
"""Deduce the pipeline fragment as an apache_beam.Pipeline instance."""
fragment = beam.pipeline.Pipeline.from_runner_api(
self._runner_pipeline.to_runner_api(use_fake_coders=True),
self._runner_pipeline.runner,
self._options)
ie.current_env().add_derived_pipeline(self._runner_pipeline, fragment)
return fragment
def run(self, display_pipeline_graph=False, use_cache=True, blocking=False):
"""Shorthand to run the pipeline fragment."""
try:
preserved_skip_display = self._runner_pipeline.runner._skip_display
preserved_force_compute = self._runner_pipeline.runner._force_compute
preserved_blocking = self._runner_pipeline.runner._blocking
self._runner_pipeline.runner._skip_display = not display_pipeline_graph
self._runner_pipeline.runner._force_compute = not use_cache
self._runner_pipeline.runner._blocking = blocking
return self.deduce_fragment().run()
finally:
self._runner_pipeline.runner._skip_display = preserved_skip_display
self._runner_pipeline.runner._force_compute = preserved_force_compute
self._runner_pipeline.runner._blocking = preserved_blocking
def _build_runner_pipeline(self):
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
self._user_pipeline.to_runner_api(use_fake_coders=True),
self._user_pipeline.runner,
self._options)
ie.current_env().add_derived_pipeline(self._user_pipeline, runner_pipeline)
return runner_pipeline
def _calculate_target_pcoll_ids(self):
pcoll_id_to_target_pcoll = {}
for pcoll in self._pcolls:
pcoll_id_to_target_pcoll[self._runner_pcoll_to_id.get(str(pcoll),
'')] = pcoll
return pcoll_id_to_target_pcoll
def _calculate_user_transform_labels(self):
label_to_user_transform = {}
class UserTransformVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node is not None:
label_to_user_transform[transform_node.full_label] = transform_node
v = UserTransformVisitor()
self._runner_pipeline.visit(v)
return label_to_user_transform
def _build_correlation_between_pipelines(
self, runner_pcoll_to_id, id_to_target_pcoll, label_to_user_transform):
runner_pcolls_to_user_pcolls = {}
runner_transforms_to_user_transforms = {}
class CorrelationVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
self._process_transform(transform_node)
for in_pcoll in transform_node.inputs:
self._process_pcoll(in_pcoll)
for out_pcoll in transform_node.outputs.values():
self._process_pcoll(out_pcoll)
def _process_pcoll(self, pcoll):
pcoll_id = runner_pcoll_to_id.get(str(pcoll), '')
if pcoll_id in id_to_target_pcoll:
runner_pcolls_to_user_pcolls[pcoll] = (id_to_target_pcoll[pcoll_id])
def _process_transform(self, transform_node):
if transform_node.full_label in label_to_user_transform:
runner_transforms_to_user_transforms[transform_node] = (
label_to_user_transform[transform_node.full_label])
v = CorrelationVisitor()
self._runner_pipeline.visit(v)
return runner_pcolls_to_user_pcolls, runner_transforms_to_user_transforms
def _mark_necessary_transforms_and_pcolls(self, runner_pcolls_to_user_pcolls):
necessary_transforms = set()
all_inputs = set()
updated_all_inputs = set(runner_pcolls_to_user_pcolls.keys())
# Do this until no more new PCollection is recorded.
while len(updated_all_inputs) != len(all_inputs):
all_inputs = set(updated_all_inputs)
for pcoll in all_inputs:
producer = pcoll.producer
while producer:
if producer in necessary_transforms:
break
# Mark the AppliedPTransform as necessary.
necessary_transforms.add(producer)
# Record all necessary input and side input PCollections.
updated_all_inputs.update(producer.inputs)
# pylint: disable=map-builtin-not-iterating
side_input_pvalues = set(
map(lambda side_input: side_input.pvalue, producer.side_inputs))
updated_all_inputs.update(side_input_pvalues)
# Go to its parent AppliedPTransform.
producer = producer.parent
return necessary_transforms, all_inputs
def _prune_runner_pipeline_to_fragment(
self, runner_pipeline, necessary_transforms):
class PruneVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
if isinstance(transform_node.transform, TestStream):
return
pruned_parts = list(transform_node.parts)
for part in transform_node.parts:
if part not in necessary_transforms:
pruned_parts.remove(part)
transform_node.parts = tuple(pruned_parts)
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node not in necessary_transforms:
transform_node.parent = None
v = PruneVisitor()
runner_pipeline.visit(v)
return runner_pipeline | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/pipeline_fragment.py | 0.897173 | 0.399285 | pipeline_fragment.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import apache_beam as beam
from apache_beam.runners.interactive.utils import as_json
from apache_beam.runners.interactive.utils import obfuscate
class InteractiveEnvironmentInspector(object):
"""Inspector that converts information of the current interactive environment
including pipelines and pcollections into JSON data suitable for messaging
with applications within/outside the Python kernel.
The usage is always that the application side reads the inspectables or
list_inspectables first then communicates back to the kernel and get_val for
usage on the kernel side.
"""
def __init__(self):
self._inspectables = {}
self._anonymous = {}
self._inspectable_pipelines = set()
@property
def inspectables(self):
"""Lists pipelines and pcollections assigned to variables as inspectables.
"""
self._inspectables = inspect()
return self._inspectables
@property
def inspectable_pipelines(self):
"""Returns a dictionary of all inspectable pipelines. The keys are
stringified id of pipeline instances.
This includes user defined pipeline assigned to variables and anonymous
pipelines with inspectable PCollections.
If a user defined pipeline is not within the returned dict, it can be
considered out of scope, and all resources and memory states related to it
should be released.
"""
_ = self.list_inspectables()
return self._inspectable_pipelines
@as_json
def list_inspectables(self):
"""Lists inspectables in JSON format.
When listing, pcollections are organized by the pipeline they belong to.
If a pipeline is no longer assigned to a variable but its pcollections
assigned to variables are still in scope, the pipeline will be given a name
as 'anonymous_pipeline[id:$inMemoryId]'.
The listing doesn't contain object values of the pipelines or pcollections.
The obfuscated identifier can be used to trace back to those values in the
kernel.
The listing includes anonymous pipelines that are not assigned to variables
but still containing inspectable PCollections.
"""
listing = {}
pipelines = inspect_pipelines()
for pipeline, name in pipelines.items():
metadata = meta(name, pipeline)
listing[obfuscate(metadata)] = {'metadata': metadata, 'pcolls': {}}
for identifier, inspectable in self.inspectables.items():
if inspectable['metadata']['type'] == 'pcollection':
pipeline = inspectable['value'].pipeline
if pipeline not in list(pipelines.keys()):
pipeline_name = synthesize_pipeline_name(pipeline)
pipelines[pipeline] = pipeline_name
pipeline_metadata = meta(pipeline_name, pipeline)
pipeline_identifier = obfuscate(pipeline_metadata)
self._anonymous[pipeline_identifier] = {
'metadata': pipeline_metadata, 'value': pipeline
}
listing[pipeline_identifier] = {
'metadata': pipeline_metadata,
'pcolls': {
identifier: inspectable['metadata']
}
}
else:
pipeline_identifier = obfuscate(meta(pipelines[pipeline], pipeline))
listing[pipeline_identifier]['pcolls'][identifier] = inspectable[
'metadata']
self._inspectable_pipelines = dict(
(str(id(pipeline)), pipeline) for pipeline in pipelines)
return listing
def get_val(self, identifier):
"""Retrieves the in memory object itself by identifier.
The retrieved object could be a pipeline or a pcollection. If the
identifier is not recognized, return None.
The identifier can refer to an anonymous pipeline and the object will still
be retrieved.
"""
inspectable = self._inspectables.get(identifier, None)
if inspectable:
return inspectable['value']
inspectable = self._anonymous.get(identifier, None)
if inspectable:
return inspectable['value']
return None
def get_pcoll_data(self, identifier, include_window_info=False):
"""Retrieves the json formatted PCollection data.
If no PCollection value can be retieved from the given identifier, an empty
json string will be returned.
"""
value = self.get_val(identifier)
if isinstance(value, beam.pvalue.PCollection):
from apache_beam.runners.interactive import interactive_beam as ib
dataframe = ib.collect(value, include_window_info=include_window_info)
return dataframe.to_json(orient='table')
return {}
def inspect():
"""Inspects current interactive environment to track metadata and values of
pipelines and pcollections.
Each pipeline and pcollections tracked is given a unique identifier.
"""
from apache_beam.runners.interactive import interactive_environment as ie
inspectables = {}
for watching in ie.current_env().watching():
for name, value in watching:
# Ignore synthetic vars created by Interactive Beam itself.
if name.startswith('synthetic_var_'):
continue
metadata = meta(name, value)
identifier = obfuscate(metadata)
if isinstance(value, (beam.pipeline.Pipeline, beam.pvalue.PCollection)):
inspectables[identifier] = {'metadata': metadata, 'value': value}
return inspectables
def inspect_pipelines():
"""Inspects current interactive environment to track all pipelines assigned
to variables. The keys are pipeline objects and values are pipeline names.
"""
from apache_beam.runners.interactive import interactive_environment as ie
pipelines = {}
for watching in ie.current_env().watching():
for name, value in watching:
if isinstance(value, beam.pipeline.Pipeline):
pipelines[value] = name
return pipelines
def meta(name, val):
"""Generates meta data for the given name and value."""
return {
'name': name, 'inMemoryId': id(val), 'type': type(val).__name__.lower()
}
def synthesize_pipeline_name(val):
"""Synthesizes a pipeline name for the given pipeline object."""
return 'anonymous_pipeline[id:{}]'.format(id(val)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/messaging/interactive_environment_inspector.py | 0.797439 | 0.506897 | interactive_environment_inspector.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import subprocess
from typing import TYPE_CHECKING
from typing import Optional
from typing import Type
from future.utils import with_metaclass
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.runners.interactive.display.pipeline_graph import PipelineGraph
class PipelineGraphRenderer(with_metaclass(abc.ABCMeta, BeamPlugin)): # type: ignore[misc]
"""Abstract class for renderers, who decide how pipeline graphs are rendered.
"""
@classmethod
@abc.abstractmethod
def option(cls):
# type: () -> str
"""The corresponding rendering option for the renderer.
"""
raise NotImplementedError
@abc.abstractmethod
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
"""Renders the pipeline graph in HTML-compatible format.
Args:
pipeline_graph: (pipeline_graph.PipelineGraph) the graph to be rendererd.
Returns:
unicode, str or bytes that can be expressed as HTML.
"""
raise NotImplementedError
class MuteRenderer(PipelineGraphRenderer):
"""Use this renderer to mute the pipeline display.
"""
@classmethod
def option(cls):
# type: () -> str
return 'mute'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return ''
class TextRenderer(PipelineGraphRenderer):
"""This renderer simply returns the dot representation in text format.
"""
@classmethod
def option(cls):
# type: () -> str
return 'text'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return pipeline_graph.get_dot()
class PydotRenderer(PipelineGraphRenderer):
"""This renderer renders the graph using pydot.
It depends on
1. The software Graphviz: https://www.graphviz.org/
2. The python module pydot: https://pypi.org/project/pydot/
"""
@classmethod
def option(cls):
# type: () -> str
return 'graph'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return pipeline_graph._get_graph().create_svg().decode("utf-8") # pylint: disable=protected-access
def get_renderer(option=None):
# type: (Optional[str]) -> Type[PipelineGraphRenderer]
"""Get an instance of PipelineGraphRenderer given rendering option.
Args:
option: (str) the rendering option.
Returns:
(PipelineGraphRenderer)
"""
if option is None:
if os.name == 'nt':
exists = subprocess.call(['where', 'dot.exe']) == 0
else:
exists = subprocess.call(['which', 'dot']) == 0
if exists:
option = 'graph'
else:
option = 'text'
renderer = [
r for r in PipelineGraphRenderer.get_all_subclasses()
if option == r.option()
]
if len(renderer) == 0:
raise ValueError()
elif len(renderer) == 1:
return renderer[0]()
else:
raise ValueError('Found more than one renderer for option: %s', option) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/display/pipeline_graph_renderer.py | 0.840455 | 0.203747 | pipeline_graph_renderer.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from apache_beam.runners.interactive.display import pipeline_graph
def nice_str(o):
s = repr(o)
s = s.replace('"', "'")
s = s.replace('\\', '|')
s = re.sub(r'[^\x20-\x7F]', ' ', s)
assert '"' not in s
if len(s) > 35:
s = s[:35] + '...'
return s
def format_sample(contents, count=1000):
contents = list(contents)
elems = ', '.join([nice_str(o) for o in contents[:count]])
if len(contents) > count:
elems += ', ...'
assert '"' not in elems
return '{%s}' % elems
class InteractivePipelineGraph(pipeline_graph.PipelineGraph):
"""Creates the DOT representation of an interactive pipeline. Thread-safe."""
def __init__(
self,
pipeline,
required_transforms=None,
referenced_pcollections=None,
cached_pcollections=None):
"""Constructor of PipelineGraph.
Args:
pipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.
required_transforms: (list/set of str) ID of top level PTransforms that
lead to visible results.
referenced_pcollections: (list/set of str) ID of PCollections that are
referenced by top level PTransforms executed (i.e.
required_transforms)
cached_pcollections: (set of str) a set of PCollection IDs of those whose
cached results are used in the execution.
"""
self._required_transforms = required_transforms or set()
self._referenced_pcollections = referenced_pcollections or set()
self._cached_pcollections = cached_pcollections or set()
super(InteractivePipelineGraph, self).__init__(
pipeline=pipeline,
default_vertex_attrs={
'color': 'gray', 'fontcolor': 'gray'
},
default_edge_attrs={'color': 'gray'})
transform_updates, pcollection_updates = self._generate_graph_update_dicts()
self._update_graph(transform_updates, pcollection_updates)
def update_pcollection_stats(self, pcollection_stats):
"""Updates PCollection stats.
Args:
pcollection_stats: (dict of dict) maps PCollection IDs to informations. In
particular, we only care about the field 'sample' which should be a
the PCollection result in as a list.
"""
edge_dict = {}
for pcoll_id, stats in pcollection_stats.items():
attrs = {}
pcoll_list = stats['sample']
if pcoll_list:
attrs['label'] = format_sample(pcoll_list, 1)
attrs['labeltooltip'] = format_sample(pcoll_list, 10)
else:
attrs['label'] = '?'
edge_dict[pcoll_id] = attrs
self._update_graph(edge_dict=edge_dict)
def _generate_graph_update_dicts(self):
"""Generate updates specific to interactive pipeline.
Returns:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes
edge_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes
"""
transform_dict = {} # maps PTransform IDs to properties
pcoll_dict = {} # maps PCollection IDs to properties
for transform_id, transform_proto in self._top_level_transforms():
transform_dict[transform_proto.unique_name] = {
'required': transform_id in self._required_transforms
}
for pcoll_id in transform_proto.outputs.values():
pcoll_dict[pcoll_id] = {
'cached': pcoll_id in self._cached_pcollections,
'referenced': pcoll_id in self._referenced_pcollections
}
def vertex_properties_to_attributes(vertex):
"""Converts PCollection properties to DOT vertex attributes."""
attrs = {}
if 'leaf' in vertex:
attrs['style'] = 'invis'
elif vertex.get('required'):
attrs['color'] = 'blue'
attrs['fontcolor'] = 'blue'
else:
attrs['color'] = 'grey'
return attrs
def edge_properties_to_attributes(edge):
"""Converts PTransform properties to DOT edge attributes."""
attrs = {}
if edge.get('cached'):
attrs['color'] = 'red'
elif edge.get('referenced'):
attrs['color'] = 'black'
else:
attrs['color'] = 'grey'
return attrs
vertex_dict = {} # maps vertex names to attributes
edge_dict = {} # maps edge names to attributes
for transform_name, transform_properties in transform_dict.items():
vertex_dict[transform_name] = vertex_properties_to_attributes(
transform_properties)
for pcoll_id, pcoll_properties in pcoll_dict.items():
edge_dict[pcoll_id] = edge_properties_to_attributes(pcoll_properties)
return vertex_dict, edge_dict | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/display/interactive_pipeline_graph.py | 0.810479 | 0.295116 | interactive_pipeline_graph.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import threading
from typing import DefaultDict
from typing import Dict
from typing import Iterator
from typing import List
from typing import Tuple
from typing import Union
import pydot
import apache_beam as beam
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as inst
from apache_beam.runners.interactive.display import pipeline_graph_renderer
# pylint does not understand context
# pylint:disable=dangerous-default-value
class PipelineGraph(object):
"""Creates a DOT representing the pipeline. Thread-safe. Runner agnostic."""
def __init__(
self,
pipeline, # type: Union[beam_runner_api_pb2.Pipeline, beam.Pipeline]
default_vertex_attrs={'shape': 'box'},
default_edge_attrs=None,
render_option=None):
"""Constructor of PipelineGraph.
Examples:
graph = pipeline_graph.PipelineGraph(pipeline_proto)
graph.get_dot()
or
graph = pipeline_graph.PipelineGraph(pipeline)
graph.get_dot()
Args:
pipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.
default_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes
default_edge_attrs: (Dict[str, str]) a dict of default edge attributes
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
"""
self._lock = threading.Lock()
self._graph = None # type: pydot.Dot
self._pipeline_instrument = None
if isinstance(pipeline, beam.Pipeline):
self._pipeline_instrument = inst.PipelineInstrument(
pipeline, pipeline._options)
# The pre-process links user pipeline to runner pipeline through analysis
# but without mutating runner pipeline.
self._pipeline_instrument.preprocess()
if isinstance(pipeline, beam_runner_api_pb2.Pipeline):
self._pipeline_proto = pipeline
elif isinstance(pipeline, beam.Pipeline):
self._pipeline_proto = pipeline.to_runner_api()
else:
raise TypeError(
'pipeline should either be a %s or %s, while %s is given' %
(beam_runner_api_pb2.Pipeline, beam.Pipeline, type(pipeline)))
# A dict from PCollection ID to a list of its consuming Transform IDs
self._consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[str]]
# A dict from PCollection ID to its producing Transform ID
self._producers = {} # type: Dict[str, str]
for transform_id, transform_proto in self._top_level_transforms():
for pcoll_id in transform_proto.inputs.values():
self._consumers[pcoll_id].append(transform_id)
for pcoll_id in transform_proto.outputs.values():
self._producers[pcoll_id] = transform_id
default_vertex_attrs = default_vertex_attrs or {'shape': 'box'}
if 'color' not in default_vertex_attrs:
default_vertex_attrs['color'] = 'blue'
if 'fontcolor' not in default_vertex_attrs:
default_vertex_attrs['fontcolor'] = 'blue'
vertex_dict, edge_dict = self._generate_graph_dicts()
self._construct_graph(
vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs)
self._renderer = pipeline_graph_renderer.get_renderer(render_option)
def get_dot(self):
# type: () -> str
return self._get_graph().to_string()
def display_graph(self):
"""Displays the graph generated."""
rendered_graph = self._renderer.render_pipeline_graph(self)
if ie.current_env().is_in_notebook:
try:
from IPython.core import display
display.display(display.HTML(rendered_graph))
except ImportError: # Unlikely to happen when is_in_notebook.
logging.warning(
'Failed to import IPython display module when current '
'environment is in a notebook. Cannot display the '
'pipeline graph.')
def _top_level_transforms(self):
# type: () -> Iterator[Tuple[str, beam_runner_api_pb2.PTransform]]
"""Yields all top level PTransforms (subtransforms of the root PTransform).
Yields: (str, PTransform proto) ID, proto pair of top level PTransforms.
"""
transforms = self._pipeline_proto.components.transforms
for root_transform_id in self._pipeline_proto.root_transform_ids:
root_transform_proto = transforms[root_transform_id]
for top_level_transform_id in root_transform_proto.subtransforms:
top_level_transform_proto = transforms[top_level_transform_id]
yield top_level_transform_id, top_level_transform_proto
def _decorate(self, value):
"""Decorates label-ish values used for rendering in dot language.
Escapes special characters in the given str value for dot language. All
PTransform unique names are escaped implicitly in this module when building
dot representation. Otherwise, special characters will break the graph
rendered or cause runtime errors.
"""
# Replace py str literal `\\` which is `\` in dot with py str literal
# `\\\\` which is `\\` in dot so that dot `\\` can be rendered as `\`. Then
# replace `"` with `\\"` so that the dot generated will be `\"` and be
# rendered as `"`.
return '"{}"'.format(value.replace('\\', '\\\\').replace('"', '\\"'))
def _generate_graph_dicts(self):
"""From pipeline_proto and other info, generate the graph.
Returns:
vertex_dict: (Dict[str, Dict[str, str]]) vertex mapped to attributes.
edge_dict: (Dict[(str, str), Dict[str, str]]) vertex pair mapped to the
edge's attribute.
"""
transforms = self._pipeline_proto.components.transforms
# A dict from vertex name (i.e. PCollection ID) to its attributes.
vertex_dict = collections.defaultdict(dict)
# A dict from vertex name pairs defining the edge (i.e. a pair of PTransform
# IDs defining the PCollection) to its attributes.
edge_dict = collections.defaultdict(dict)
self._edge_to_vertex_pairs = collections.defaultdict(list)
for _, transform in self._top_level_transforms():
vertex_dict[self._decorate(transform.unique_name)] = {}
for pcoll_id in transform.outputs.values():
pcoll_node = None
if self._pipeline_instrument:
pcoll_node = self._pipeline_instrument.cacheable_var_by_pcoll_id(
pcoll_id)
# If no PipelineInstrument is available or the PCollection is not
# watched.
if not pcoll_node:
pcoll_node = 'pcoll%s' % (hash(pcoll_id) % 10000)
vertex_dict[pcoll_node] = {
'shape': 'circle',
'label': '', # The pcoll node has no name.
}
# There is PipelineInstrument and the PCollection is watched with an
# assigned variable.
else:
vertex_dict[pcoll_node] = {'shape': 'circle'}
if pcoll_id not in self._consumers:
self._edge_to_vertex_pairs[pcoll_id].append(
(self._decorate(transform.unique_name), pcoll_node))
edge_dict[(self._decorate(transform.unique_name), pcoll_node)] = {}
else:
for consumer in self._consumers[pcoll_id]:
producer_name = self._decorate(transform.unique_name)
consumer_name = self._decorate(transforms[consumer].unique_name)
self._edge_to_vertex_pairs[pcoll_id].append(
(producer_name, pcoll_node))
edge_dict[(producer_name, pcoll_node)] = {}
self._edge_to_vertex_pairs[pcoll_id].append(
(pcoll_node, consumer_name))
edge_dict[(pcoll_node, consumer_name)] = {}
return vertex_dict, edge_dict
def _get_graph(self):
"""Returns pydot.Dot object for the pipeline graph.
The purpose of this method is to avoid accessing the graph while it is
updated. No one except for this method should be accessing _graph directly.
Returns:
(pydot.Dot)
"""
with self._lock:
return self._graph
def _construct_graph(
self, vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs):
"""Constructs the pydot.Dot object for the pipeline graph.
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: (Dict[(str, str), Dict[str, str]]) maps vertex name pairs to
attributes
default_vertex_attrs: (Dict[str, str]) a dict of attributes
default_edge_attrs: (Dict[str, str]) a dict of attributes
"""
with self._lock:
self._graph = pydot.Dot()
if default_vertex_attrs:
self._graph.set_node_defaults(**default_vertex_attrs)
if default_edge_attrs:
self._graph.set_edge_defaults(**default_edge_attrs)
self._vertex_refs = {} # Maps vertex name to pydot.Node
self._edge_refs = {} # Maps vertex name pairs to pydot.Edge
for vertex, vertex_attrs in vertex_dict.items():
vertex_ref = pydot.Node(vertex, **vertex_attrs)
self._vertex_refs[vertex] = vertex_ref
self._graph.add_node(vertex_ref)
for edge, edge_attrs in edge_dict.items():
vertex_src = self._vertex_refs[edge[0]]
vertex_dst = self._vertex_refs[edge[1]]
edge_ref = pydot.Edge(vertex_src, vertex_dst, **edge_attrs)
self._edge_refs[edge] = edge_ref
self._graph.add_edge(edge_ref)
def _update_graph(self, vertex_dict=None, edge_dict=None):
"""Updates the pydot.Dot object with the given attribute update
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: This should be
Either (Dict[str, Dict[str, str]]) which maps edge names to attributes
Or (Dict[(str, str), Dict[str, str]]) which maps vertex pairs to edge
attributes
"""
def set_attrs(ref, attrs):
for attr_name, attr_val in attrs.items():
ref.set(attr_name, attr_val)
with self._lock:
if vertex_dict:
for vertex, vertex_attrs in vertex_dict.items():
set_attrs(self._vertex_refs[vertex], vertex_attrs)
if edge_dict:
for edge, edge_attrs in edge_dict.items():
if isinstance(edge, tuple):
set_attrs(self._edge_refs[edge], edge_attrs)
else:
for vertex_pair in self._edge_to_vertex_pairs[edge]:
set_attrs(self._edge_refs[vertex_pair], edge_attrs) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/interactive/display/pipeline_graph.py | 0.891528 | 0.212559 | pipeline_graph.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import numbers
import sys
from collections import defaultdict
from future.utils import iteritems
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metric import MetricResults
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
_LOGGER = logging.getLogger(__name__)
def _get_match(proto, filter_fn):
"""Finds and returns the first element that matches a query.
If no element matches the query, it throws ValueError.
If more than one element matches the query, it returns only the first.
"""
query = [elm for elm in proto if filter_fn(elm)]
if len(query) == 0:
raise ValueError('Could not find element')
elif len(query) > 1:
raise ValueError('Too many matches')
return query[0]
# V1b3 MetricStructuredName keys to accept and copy to the MetricKey labels.
STEP_LABEL = 'step'
STRUCTURED_NAME_LABELS = set(
['execution_step', 'original_name', 'output_user_name'])
class DataflowMetrics(MetricResults):
"""Implementation of MetricResults class for the Dataflow runner."""
def __init__(self, dataflow_client=None, job_result=None, job_graph=None):
"""Initialize the Dataflow metrics object.
Args:
dataflow_client: apiclient.DataflowApplicationClient to interact with the
dataflow service.
job_result: DataflowPipelineResult with the state and id information of
the job.
job_graph: apiclient.Job instance to be able to translate between internal
step names (e.g. "s2"), and user step names (e.g. "split").
"""
super(DataflowMetrics, self).__init__()
self._dataflow_client = dataflow_client
self.job_result = job_result
self._queried_after_termination = False
self._cached_metrics = None
self._job_graph = job_graph
@staticmethod
def _is_counter(metric_result):
return isinstance(metric_result.attempted, numbers.Number)
@staticmethod
def _is_distribution(metric_result):
return isinstance(metric_result.attempted, DistributionResult)
def _translate_step_name(self, internal_name):
"""Translate between internal step names (e.g. "s1") and user step names."""
if not self._job_graph:
raise ValueError(
'Could not translate the internal step name %r since job graph is '
'not available.' % internal_name)
user_step_name = None
if (self._job_graph and internal_name in
self._job_graph.proto_pipeline.components.transforms.keys()):
# Dataflow Runner v2 with portable job submission uses proto transform map
# IDs for step names. Also PTransform.unique_name maps to user step names.
# Hence we lookup user step names based on the proto.
user_step_name = self._job_graph.proto_pipeline.components.transforms[
internal_name].unique_name
else:
try:
step = _get_match(
self._job_graph.proto.steps, lambda x: x.name == internal_name)
user_step_name = _get_match(
step.properties.additionalProperties,
lambda x: x.key == 'user_name').value.string_value
except ValueError:
pass # Exception is handled below.
if not user_step_name:
raise ValueError(
'Could not translate the internal step name %r.' % internal_name)
return user_step_name
def _get_metric_key(self, metric):
"""Populate the MetricKey object for a queried metric result."""
step = ""
name = metric.name.name # Always extract a name
labels = dict()
try: # Try to extract the user step name.
# If ValueError is thrown within this try-block, it is because of
# one of the following:
# 1. Unable to translate the step name. Only happening with improperly
# formatted job graph (unlikely), or step name not being the internal
# step name (only happens for unstructured-named metrics).
# 2. Unable to unpack [step] or [namespace]; which should only happen
# for unstructured names.
step = _get_match(
metric.name.context.additionalProperties,
lambda x: x.key == STEP_LABEL).value
step = self._translate_step_name(step)
except ValueError:
pass
namespace = "dataflow/v1b3" # Try to extract namespace or add a default.
try:
namespace = _get_match(
metric.name.context.additionalProperties,
lambda x: x.key == 'namespace').value
except ValueError:
pass
for kv in metric.name.context.additionalProperties:
if kv.key in STRUCTURED_NAME_LABELS:
labels[kv.key] = kv.value
# Package everything besides namespace and name the labels as well,
# including unmodified step names to assist in integration the exact
# unmodified values which come from dataflow.
return MetricKey(step, MetricName(namespace, name), labels=labels)
def _populate_metrics(self, response, result, user_metrics=False):
"""Move metrics from response to results as MetricResults."""
if user_metrics:
metrics = [
metric for metric in response.metrics if metric.name.origin == 'user'
]
else:
metrics = [
metric for metric in response.metrics
if metric.name.origin == 'dataflow/v1b3'
]
# Get the tentative/committed versions of every metric together.
metrics_by_name = defaultdict(lambda: {})
for metric in metrics:
if (metric.name.name.endswith('_MIN') or
metric.name.name.endswith('_MAX') or
metric.name.name.endswith('_MEAN') or
metric.name.name.endswith('_COUNT')):
# The Dataflow Service presents distribution metrics in two ways:
# One way is as a single distribution object with all its fields, and
# another way is as four different scalar metrics labeled as _MIN,
# _MAX, _COUNT_, _MEAN.
# TODO(pabloem) remove these when distributions are not being broken up
# in the service.
# The second way is only useful for the UI, and should be ignored.
continue
is_tentative = [
prop for prop in metric.name.context.additionalProperties
if prop.key == 'tentative' and prop.value == 'true'
]
tentative_or_committed = 'tentative' if is_tentative else 'committed'
metric_key = self._get_metric_key(metric)
if metric_key is None:
continue
metrics_by_name[metric_key][tentative_or_committed] = metric
# Now we create the MetricResult elements.
for metric_key, metric in iteritems(metrics_by_name):
attempted = self._get_metric_value(metric['tentative'])
committed = self._get_metric_value(metric['committed'])
result.append(
MetricResult(metric_key, attempted=attempted, committed=committed))
def _get_metric_value(self, metric):
"""Get a metric result object from a MetricUpdate from Dataflow API."""
if metric is None:
return None
if metric.scalar is not None:
return metric.scalar.integer_value
elif metric.distribution is not None:
dist_count = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'count').value.integer_value
dist_min = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'min').value.integer_value
dist_max = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'max').value.integer_value
dist_sum = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'sum').value.integer_value
if not dist_sum:
# distribution metric is not meant to use on large values, but in case
# it is, the value can overflow and become double_value, the correctness
# of the value may not be guaranteed.
_LOGGER.info(
"Distribution metric sum value seems to have "
"overflowed integer_value range, the correctness of sum or mean "
"value may not be guaranteed: %s" % metric.distribution)
dist_sum = int(
_get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'sum').value.double_value)
return DistributionResult(
DistributionData(dist_sum, dist_count, dist_min, dist_max))
else:
return None
def _get_metrics_from_dataflow(self, job_id=None):
"""Return cached metrics or query the dataflow service."""
if not job_id:
try:
job_id = self.job_result.job_id()
except AttributeError:
job_id = None
if not job_id:
raise ValueError('Can not query metrics. Job id is unknown.')
if self._cached_metrics:
return self._cached_metrics
job_metrics = self._dataflow_client.get_job_metrics(job_id)
# If we cannot determine that the job has terminated,
# then metrics will not change and we can cache them.
if self.job_result and self.job_result.is_in_terminal_state():
self._cached_metrics = job_metrics
return job_metrics
def all_metrics(self, job_id=None):
"""Return all user and system metrics from the dataflow service."""
metric_results = []
response = self._get_metrics_from_dataflow(job_id=job_id)
self._populate_metrics(response, metric_results, user_metrics=True)
self._populate_metrics(response, metric_results, user_metrics=False)
return metric_results
def query(self, filter=None):
metric_results = []
response = self._get_metrics_from_dataflow()
self._populate_metrics(response, metric_results, user_metrics=True)
return {
self.COUNTERS: [
elm for elm in metric_results if self.matches(filter, elm.key) and
DataflowMetrics._is_counter(elm)
],
self.DISTRIBUTIONS: [
elm for elm in metric_results if self.matches(filter, elm.key) and
DataflowMetrics._is_distribution(elm)
],
self.GAUGES: []
} # TODO(pabloem): Add Gauge support for dataflow.
def main(argv):
"""Print the metric results for a the dataflow --job_id and --project.
Instead of running an entire pipeline which takes several minutes, use this
main method to display MetricResults for a specific --job_id and --project
which takes only a few seconds.
"""
# TODO(BEAM-6833): The MetricResults do not show translated step names as the
# job_graph is not provided to DataflowMetrics.
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
if argv[0] == __file__:
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job_id', type=str, help='The job id to query metrics for.')
parser.add_argument(
'-p',
'--project',
type=str,
help='The project name to query metrics for.')
flags = parser.parse_args(argv)
# Get a Dataflow API client and set its project and job_id in the options.
options = PipelineOptions()
gcloud_options = options.view_as(GoogleCloudOptions)
gcloud_options.project = flags.project
dataflow_client = apiclient.DataflowApplicationClient(options)
df_metrics = DataflowMetrics(dataflow_client)
all_metrics = df_metrics.all_metrics(job_id=flags.job_id)
_LOGGER.info(
'Printing all MetricResults for %s in %s', flags.job_id, flags.project)
for metric_result in all_metrics:
_LOGGER.info(metric_result)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/dataflow/dataflow_metrics.py | 0.676727 | 0.287906 | dataflow_metrics.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import time
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
SLEEP_TIME_SECS = 1
_LOGGER = logging.getLogger(__name__)
class StreamingUserMetricsDoFn(beam.DoFn):
"""Generates user metrics and outputs same element."""
def __init__(self):
self.double_message_counter = Metrics.counter(
self.__class__, 'double_msg_counter_name')
self.msg_len_dist_metric = Metrics.distribution(
self.__class__, 'msg_len_dist_metric_name')
def start_bundle(self):
time.sleep(SLEEP_TIME_SECS)
def process(self, element):
"""Returns the processed element and increments the metrics."""
text_line = element.strip()
self.double_message_counter.inc()
self.double_message_counter.inc()
self.msg_len_dist_metric.update(len(text_line))
_LOGGER.debug("Done processing returning element array: '%s'", element)
return [element]
def finish_bundle(self):
time.sleep(SLEEP_TIME_SECS)
def run(argv=None):
"""Given an initialized Pipeline applies transforms and runs it."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
required=True,
help=(
'Output PubSub topic of the form '
'"projects/<PROJECT>/topic/<TOPIC>".'))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--input_topic',
help=(
'Input PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
group.add_argument(
'--input_subscription',
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>."'))
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(StandardOptions).streaming = True
pipeline = beam.Pipeline(options=pipeline_options)
_ = (
pipeline
| beam.io.ReadFromPubSub(subscription=known_args.input_subscription)
| 'generate_metrics' >> (beam.ParDo(StreamingUserMetricsDoFn()))
| 'dump_to_pub' >> beam.io.WriteToPubSub(known_args.output_topic))
result = pipeline.run()
result.wait_until_finish()
return result
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/dataflow/dataflow_exercise_streaming_metrics_pipeline.py | 0.747432 | 0.151247 | dataflow_exercise_streaming_metrics_pipeline.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import time
from hamcrest.library.number.ordering_comparison import greater_than
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.testing.metric_result_matchers import DistributionMatcher
from apache_beam.testing.metric_result_matchers import MetricResultMatcher
SLEEP_TIME_SECS = 1
INPUT = [0, 0, 0, 100]
METRIC_NAMESPACE = (
'apache_beam.runners.dataflow.'
'dataflow_exercise_metrics_pipeline.UserMetricsDoFn')
def metric_matchers():
"""MetricResult matchers common to all tests."""
# TODO(ajamato): Matcher for the 'metrics' step's ElementCount.
# TODO(ajamato): Matcher for the 'metrics' step's MeanByteCount.
# TODO(ajamato): Matcher for the start and finish exec times.
# TODO(ajamato): Matcher for a gauge metric once implemented in dataflow.
matchers = [
# User Counter Metrics.
MetricResultMatcher(
name='total_values',
namespace=METRIC_NAMESPACE,
step='metrics',
attempted=sum(INPUT),
committed=sum(INPUT)),
MetricResultMatcher(
name='ExecutionTime_StartBundle',
step='metrics',
attempted=greater_than(0),
committed=greater_than(0)),
MetricResultMatcher(
name='ExecutionTime_ProcessElement',
step='metrics',
attempted=greater_than(0),
committed=greater_than(0)),
MetricResultMatcher(
name='ExecutionTime_FinishBundle',
step='metrics',
attempted=greater_than(0),
committed=greater_than(0)),
MetricResultMatcher(
name='distribution_values',
namespace=METRIC_NAMESPACE,
step='metrics',
attempted=DistributionMatcher(
sum_value=sum(INPUT),
count_value=len(INPUT),
min_value=min(INPUT),
max_value=max(INPUT)),
committed=DistributionMatcher(
sum_value=sum(INPUT),
count_value=len(INPUT),
min_value=min(INPUT),
max_value=max(INPUT)),
),
# Element count and MeanByteCount for a User ParDo.
MetricResultMatcher(
name='ElementCount',
labels={
'output_user_name': 'metrics-out0',
'original_name': 'metrics-out0-ElementCount'
},
attempted=greater_than(0),
committed=greater_than(0)),
MetricResultMatcher(
name='MeanByteCount',
labels={
'output_user_name': 'metrics-out0',
'original_name': 'metrics-out0-MeanByteCount'
},
attempted=greater_than(0),
committed=greater_than(0))
]
pcoll_names = [
'GroupByKey/Reify-out0',
'GroupByKey/Read-out0',
'map_to_common_key-out0',
'GroupByKey/GroupByWindow-out0',
'GroupByKey/Read-out0',
'GroupByKey/Reify-out0'
]
for name in pcoll_names:
matchers.extend([
MetricResultMatcher(
name='ElementCount',
labels={
'output_user_name': name,
'original_name': '%s-ElementCount' % name
},
attempted=greater_than(0),
committed=greater_than(0)),
MetricResultMatcher(
name='MeanByteCount',
labels={
'output_user_name': name,
'original_name': '%s-MeanByteCount' % name
},
attempted=greater_than(0),
committed=greater_than(0)),
])
return matchers
class UserMetricsDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def __init__(self):
self.total_metric = Metrics.counter(self.__class__, 'total_values')
self.dist_metric = Metrics.distribution(
self.__class__, 'distribution_values')
# TODO(ajamato): Add a verifier for gauge once it is supported by the SDKs
# and runners.
self.latest_metric = Metrics.gauge(self.__class__, 'latest_value')
def start_bundle(self):
time.sleep(SLEEP_TIME_SECS)
def process(self, element):
"""Returns the processed element and increments the metrics."""
elem_int = int(element)
self.total_metric.inc(elem_int)
self.dist_metric.update(elem_int)
self.latest_metric.set(elem_int)
time.sleep(SLEEP_TIME_SECS)
return [elem_int]
def finish_bundle(self):
time.sleep(SLEEP_TIME_SECS)
def apply_and_run(pipeline):
"""Given an initialized Pipeline applies transforms and runs it."""
_ = (
pipeline
| beam.Create(INPUT)
| 'metrics' >> (beam.ParDo(UserMetricsDoFn()))
| 'map_to_common_key' >> beam.Map(lambda x: ('key', x))
| beam.GroupByKey()
| 'm_out' >> beam.FlatMap(
lambda x: [
1,
2,
3,
4,
5,
beam.pvalue.TaggedOutput('once', x),
beam.pvalue.TaggedOutput('twice', x),
beam.pvalue.TaggedOutput('twice', x)
]))
result = pipeline.run()
result.wait_until_finish()
return result | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/dataflow/dataflow_exercise_metrics_pipeline.py | 0.489259 | 0.287557 | dataflow_exercise_metrics_pipeline.py | pypi |
# All constants are for internal use only; no backwards-compatibility
# guarantees.
# pytype: skip-file
from __future__ import absolute_import
# Standard file names used for staging files.
from builtins import object
# Referenced by Dataflow legacy worker.
from apache_beam.runners.internal.names import PICKLED_MAIN_SESSION_FILE # pylint: disable=unused-import
# String constants related to sources framework
SOURCE_FORMAT = 'custom_source'
SOURCE_TYPE = 'CustomSourcesType'
SERIALIZED_SOURCE_KEY = 'serialized_source'
# In a released SDK, container tags are selected based on the SDK version.
# Unreleased versions use container versions based on values of
# BEAM_CONTAINER_VERSION and BEAM_FNAPI_CONTAINER_VERSION (see below).
# Update this version to the next version whenever there is a change that will
# require changes to legacy Dataflow worker execution environment.
BEAM_CONTAINER_VERSION = 'beam-master-20201214'
# Update this version to the next version whenever there is a change that
# requires changes to SDK harness container or SDK harness launcher.
BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20201214'
DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'gcr.io/cloud-dataflow/v1beta3'
class TransformNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Transform strings as they are expected in the CloudWorkflow protos.
"""
COLLECTION_TO_SINGLETON = 'CollectionToSingleton'
COMBINE = 'CombineValues'
CREATE_PCOLLECTION = 'CreateCollection'
DO = 'ParallelDo'
FLATTEN = 'Flatten'
GROUP = 'GroupByKey'
READ = 'ParallelRead'
WRITE = 'ParallelWrite'
class PropertyNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Property strings as they are expected in the CloudWorkflow protos.
"""
# If uses_keyed_state, whether the state can be sharded.
ALLOWS_SHARDABLE_STATE = 'allows_shardable_state'
BIGQUERY_CREATE_DISPOSITION = 'create_disposition'
BIGQUERY_DATASET = 'dataset'
BIGQUERY_EXPORT_FORMAT = 'bigquery_export_format'
BIGQUERY_FLATTEN_RESULTS = 'bigquery_flatten_results'
BIGQUERY_KMS_KEY = 'bigquery_kms_key'
BIGQUERY_PROJECT = 'project'
BIGQUERY_QUERY = 'bigquery_query'
BIGQUERY_SCHEMA = 'schema'
BIGQUERY_TABLE = 'table'
BIGQUERY_USE_LEGACY_SQL = 'bigquery_use_legacy_sql'
BIGQUERY_WRITE_DISPOSITION = 'write_disposition'
DISPLAY_DATA = 'display_data'
ELEMENT = 'element'
ELEMENTS = 'elements'
ENCODING = 'encoding'
FILE_PATTERN = 'filepattern'
FILE_NAME_PREFIX = 'filename_prefix'
FILE_NAME_SUFFIX = 'filename_suffix'
FORMAT = 'format'
INPUTS = 'inputs'
IMPULSE_ELEMENT = 'impulse_element'
NON_PARALLEL_INPUTS = 'non_parallel_inputs'
NUM_SHARDS = 'num_shards'
OUT = 'out'
OUTPUT = 'output'
OUTPUT_INFO = 'output_info'
OUTPUT_NAME = 'output_name'
PARALLEL_INPUT = 'parallel_input'
PIPELINE_PROTO_TRANSFORM_ID = 'pipeline_proto_transform_id'
# If the input element is a key/value pair, then the output element(s) all
# have the same key as the input.
PRESERVES_KEYS = 'preserves_keys'
PUBSUB_ID_LABEL = 'pubsub_id_label'
PUBSUB_SERIALIZED_ATTRIBUTES_FN = 'pubsub_serialized_attributes_fn'
PUBSUB_SUBSCRIPTION = 'pubsub_subscription'
PUBSUB_TIMESTAMP_ATTRIBUTE = 'pubsub_timestamp_label'
PUBSUB_TOPIC = 'pubsub_topic'
RESTRICTION_ENCODING = 'restriction_encoding'
SERIALIZED_FN = 'serialized_fn'
SHARD_NAME_TEMPLATE = 'shard_template'
SOURCE_STEP_INPUT = 'custom_source_step_input'
SERIALIZED_TEST_STREAM = 'serialized_test_stream'
STEP_NAME = 'step_name'
USE_INDEXED_FORMAT = 'use_indexed_format'
USER_FN = 'user_fn'
USER_NAME = 'user_name'
USES_KEYED_STATE = 'uses_keyed_state'
VALIDATE_SINK = 'validate_sink'
VALIDATE_SOURCE = 'validate_source'
VALUE = 'value'
WINDOWING_STRATEGY = 'windowing_strategy' | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/dataflow/internal/names.py | 0.567697 | 0.157882 | names.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from typing import TYPE_CHECKING
from typing import Optional
from apache_beam import pvalue
from apache_beam.io import iobase
from apache_beam.transforms import ptransform
from apache_beam.transforms.display import HasDisplayData
if TYPE_CHECKING:
from apache_beam import coders
_LOGGER = logging.getLogger(__name__)
def _dict_printable_fields(dict_object, skip_fields):
"""Returns a list of strings for the interesting fields of a dict."""
return [
'%s=%r' % (name, value) for name,
value in dict_object.items()
# want to output value 0 but not None nor []
if (value or value == 0) and name not in skip_fields
]
_minor_fields = [
'coder',
'key_coder',
'value_coder',
'config_bytes',
'elements',
'append_trailing_newlines',
'strip_trailing_newlines',
'compression_type'
]
class NativeSource(iobase.SourceBase):
"""A source implemented by Dataflow service.
This class is to be only inherited by sources natively implemented by Cloud
Dataflow service, hence should not be sub-classed by users.
This class is deprecated and should not be used to define new sources.
"""
coder = None # type: Optional[coders.Coder]
def reader(self):
"""Returns a NativeSourceReader instance associated with this source."""
raise NotImplementedError
def is_bounded(self):
return True
def __repr__(self):
return '<{name} {vals}>'.format(
name=self.__class__.__name__,
vals=', '.join(_dict_printable_fields(self.__dict__, _minor_fields)))
class NativeSourceReader(object):
"""A reader for a source implemented by Dataflow service."""
def __enter__(self):
"""Opens everything necessary for a reader to function properly."""
raise NotImplementedError
def __exit__(self, exception_type, exception_value, traceback):
"""Cleans up after a reader executed."""
raise NotImplementedError
def __iter__(self):
"""Returns an iterator over all the records of the source."""
raise NotImplementedError
@property
def returns_windowed_values(self):
"""Returns whether this reader returns windowed values."""
return False
def get_progress(self):
"""Returns a representation of how far the reader has read.
Returns:
A SourceReaderProgress object that gives the current progress of the
reader.
"""
def request_dynamic_split(self, dynamic_split_request):
"""Attempts to split the input in two parts.
The two parts are named the "primary" part and the "residual" part. The
current 'NativeSourceReader' keeps processing the primary part, while the
residual part will be processed elsewhere (e.g. perhaps on a different
worker).
The primary and residual parts, if concatenated, must represent the
same input as the current input of this 'NativeSourceReader' before this
call.
The boundary between the primary part and the residual part is
specified in a framework-specific way using 'DynamicSplitRequest' e.g.,
if the framework supports the notion of positions, it might be a
position at which the input is asked to split itself (which is not
necessarily the same position at which it *will* split itself); it
might be an approximate fraction of input, or something else.
This function returns a 'DynamicSplitResult', which encodes, in a
framework-specific way, the information sufficient to construct a
description of the resulting primary and residual inputs. For example, it
might, again, be a position demarcating these parts, or it might be a pair
of fully-specified input descriptions, or something else.
After a successful call to 'request_dynamic_split()', subsequent calls
should be interpreted relative to the new primary.
Args:
dynamic_split_request: A 'DynamicSplitRequest' describing the split
request.
Returns:
'None' if the 'DynamicSplitRequest' cannot be honored (in that
case the input represented by this 'NativeSourceReader' stays the same),
or a 'DynamicSplitResult' describing how the input was split into a
primary and residual part.
"""
_LOGGER.debug(
'SourceReader %r does not support dynamic splitting. Ignoring dynamic '
'split request: %r',
self,
dynamic_split_request)
class ReaderProgress(object):
"""A representation of how far a NativeSourceReader has read."""
def __init__(
self,
position=None,
percent_complete=None,
remaining_time=None,
consumed_split_points=None,
remaining_split_points=None):
self._position = position
if percent_complete is not None:
percent_complete = float(percent_complete)
if percent_complete < 0 or percent_complete > 1:
raise ValueError(
'The percent_complete argument was %f. Must be in range [0, 1].' %
percent_complete)
self._percent_complete = percent_complete
self._remaining_time = remaining_time
self._consumed_split_points = consumed_split_points
self._remaining_split_points = remaining_split_points
@property
def position(self):
"""Returns progress, represented as a ReaderPosition object."""
return self._position
@property
def percent_complete(self):
"""Returns progress, represented as a percentage of total work.
Progress range from 0.0 (beginning, nothing complete) to 1.0 (end of the
work range, entire WorkItem complete).
Returns:
Progress represented as a percentage of total work.
"""
return self._percent_complete
@property
def remaining_time(self):
"""Returns progress, represented as an estimated time remaining."""
return self._remaining_time
@property
def consumed_split_points(self):
return self._consumed_split_points
@property
def remaining_split_points(self):
return self._remaining_split_points
class ReaderPosition(object):
"""A representation of position in an iteration of a 'NativeSourceReader'."""
def __init__(
self,
end=None,
key=None,
byte_offset=None,
record_index=None,
shuffle_position=None,
concat_position=None):
"""Initializes ReaderPosition.
A ReaderPosition may get instantiated for one of these position types. Only
one of these should be specified.
Args:
end: position is past all other positions. For example, this may be used
to represent the end position of an unbounded range.
key: position is a string key.
byte_offset: position is a byte offset.
record_index: position is a record index
shuffle_position: position is a base64 encoded shuffle position.
concat_position: position is a 'ConcatPosition'.
"""
self.end = end
self.key = key
self.byte_offset = byte_offset
self.record_index = record_index
self.shuffle_position = shuffle_position
if concat_position is not None:
assert isinstance(concat_position, ConcatPosition)
self.concat_position = concat_position
class ConcatPosition(object):
"""A position that encapsulate an inner position and an index.
This is used to represent the position of a source that encapsulate several
other sources.
"""
def __init__(self, index, position):
"""Initializes ConcatPosition.
Args:
index: index of the source currently being read.
position: inner position within the source currently being read.
"""
if position is not None:
assert isinstance(position, ReaderPosition)
self.index = index
self.position = position
class DynamicSplitRequest(object):
"""Specifies how 'NativeSourceReader.request_dynamic_split' should split.
"""
def __init__(self, progress):
assert isinstance(progress, ReaderProgress)
self.progress = progress
class DynamicSplitResult(object):
pass
class DynamicSplitResultWithPosition(DynamicSplitResult):
def __init__(self, stop_position):
assert isinstance(stop_position, ReaderPosition)
self.stop_position = stop_position
class NativeSink(HasDisplayData):
"""A sink implemented by Dataflow service.
This class is to be only inherited by sinks natively implemented by Cloud
Dataflow service, hence should not be sub-classed by users.
"""
def writer(self):
"""Returns a SinkWriter for this source."""
raise NotImplementedError
def __repr__(self):
return '<{name} {vals}>'.format(
name=self.__class__.__name__,
vals=_dict_printable_fields(self.__dict__, _minor_fields))
class NativeSinkWriter(object):
"""A writer for a sink implemented by Dataflow service."""
def __enter__(self):
"""Opens everything necessary for a writer to function properly."""
raise NotImplementedError
def __exit__(self, exception_type, exception_value, traceback):
"""Cleans up after a writer executed."""
raise NotImplementedError
@property
def takes_windowed_values(self):
"""Returns whether this writer takes windowed values."""
return False
def Write(self, o): # pylint: disable=invalid-name
"""Writes a record to the sink associated with this writer."""
raise NotImplementedError
class _NativeWrite(ptransform.PTransform):
"""A PTransform for writing to a Dataflow native sink.
These are sinks that are implemented natively by the Dataflow service
and hence should not be updated by users. These sinks are processed
using a Dataflow native write transform.
Applying this transform results in a ``pvalue.PDone``.
"""
def __init__(self, sink):
"""Initializes a Write transform.
Args:
sink: Sink to use for the write
"""
super(_NativeWrite, self).__init__()
self.sink = sink
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PDone(pcoll.pipeline) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/dataflow/native_io/iobase.py | 0.932292 | 0.33425 | iobase.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import threading
from builtins import object
from typing import TYPE_CHECKING
from typing import Dict
from typing import Iterable
from typing import List
from typing import Set
from typing import Tuple
from apache_beam import pipeline
from apache_beam import pvalue
from apache_beam.runners.direct.util import TimerFiring
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import TIME_GRANULARITY
if TYPE_CHECKING:
from apache_beam.pipeline import AppliedPTransform
from apache_beam.runners.direct.bundle_factory import _Bundle
from apache_beam.utils.timestamp import Timestamp
class WatermarkManager(object):
"""For internal use only; no backwards-compatibility guarantees.
Tracks and updates watermarks for all AppliedPTransforms."""
WATERMARK_POS_INF = MAX_TIMESTAMP
WATERMARK_NEG_INF = MIN_TIMESTAMP
def __init__(
self, clock, root_transforms, value_to_consumers, transform_keyed_states):
self._clock = clock
self._root_transforms = root_transforms
self._value_to_consumers = value_to_consumers
self._transform_keyed_states = transform_keyed_states
# AppliedPTransform -> TransformWatermarks
self._transform_to_watermarks = {
} # type: Dict[AppliedPTransform, _TransformWatermarks]
for root_transform in root_transforms:
self._transform_to_watermarks[root_transform] = _TransformWatermarks(
self._clock, transform_keyed_states[root_transform], root_transform)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._transform_to_watermarks[consumer] = _TransformWatermarks(
self._clock, transform_keyed_states[consumer], consumer)
for consumers in value_to_consumers.values():
for consumer in consumers:
self._update_input_transform_watermarks(consumer)
def _update_input_transform_watermarks(self, applied_ptransform):
# type: (AppliedPTransform) -> None
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
input_transform_watermarks = []
for input_pvalue in applied_ptransform.inputs:
assert input_pvalue.producer or isinstance(input_pvalue, pvalue.PBegin)
if input_pvalue.producer:
input_transform_watermarks.append(
self.get_watermarks(input_pvalue.producer))
self._transform_to_watermarks[
applied_ptransform].update_input_transform_watermarks(
input_transform_watermarks)
def get_watermarks(self, applied_ptransform):
# type: (AppliedPTransform) -> _TransformWatermarks
"""Gets the input and output watermarks for an AppliedPTransform.
If the applied_ptransform has not processed any elements, return a
watermark with minimum value.
Args:
applied_ptransform: AppliedPTransform to get the watermarks for.
Returns:
A snapshot (TransformWatermarks) of the input watermark and output
watermark for the provided transform.
"""
# TODO(altay): Composite transforms should have a composite watermark. Until
# then they are represented by their last transform.
while applied_ptransform.parts:
applied_ptransform = applied_ptransform.parts[-1]
return self._transform_to_watermarks[applied_ptransform]
def update_watermarks(self,
completed_committed_bundle, # type: _Bundle
applied_ptransform, # type: AppliedPTransform
completed_timers,
outputs,
unprocessed_bundles,
keyed_earliest_holds,
side_inputs_container
):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
self._update_pending(
completed_committed_bundle,
applied_ptransform,
completed_timers,
outputs,
unprocessed_bundles)
tw = self.get_watermarks(applied_ptransform)
tw.hold(keyed_earliest_holds)
return self._refresh_watermarks(applied_ptransform, side_inputs_container)
def _update_pending(self,
input_committed_bundle,
applied_ptransform, # type: AppliedPTransform
completed_timers,
output_committed_bundles, # type: Iterable[_Bundle]
unprocessed_bundles # type: Iterable[_Bundle]
):
"""Updated list of pending bundles for the given AppliedPTransform."""
# Update pending elements. Filter out empty bundles. They do not impact
# watermarks and should not trigger downstream execution.
for output in output_committed_bundles:
if output.has_elements():
if output.pcollection in self._value_to_consumers:
consumers = self._value_to_consumers[output.pcollection]
for consumer in consumers:
consumer_tw = self._transform_to_watermarks[consumer]
consumer_tw.add_pending(output)
completed_tw = self._transform_to_watermarks[applied_ptransform]
completed_tw.update_timers(completed_timers)
for unprocessed_bundle in unprocessed_bundles:
completed_tw.add_pending(unprocessed_bundle)
assert input_committed_bundle or applied_ptransform in self._root_transforms
if input_committed_bundle and input_committed_bundle.has_elements():
completed_tw.remove_pending(input_committed_bundle)
def _refresh_watermarks(self, applied_ptransform, side_inputs_container):
assert isinstance(applied_ptransform, pipeline.AppliedPTransform)
unblocked_tasks = []
tw = self.get_watermarks(applied_ptransform)
if tw.refresh():
for pval in applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval, )
for v in pvals:
if v in self._value_to_consumers: # If there are downstream consumers
consumers = self._value_to_consumers[v]
for consumer in consumers:
unblocked_tasks.extend(
self._refresh_watermarks(consumer, side_inputs_container))
# Notify the side_inputs_container.
unblocked_tasks.extend(
side_inputs_container.
update_watermarks_for_transform_and_unblock_tasks(
applied_ptransform, tw))
return unblocked_tasks
def extract_all_timers(self):
# type: () -> Tuple[List[Tuple[AppliedPTransform, List[TimerFiring]]], bool]
"""Extracts fired timers for all transforms
and reports if there are any timers set."""
all_timers = [] # type: List[Tuple[AppliedPTransform, List[TimerFiring]]]
has_realtime_timer = False
for applied_ptransform, tw in self._transform_to_watermarks.items():
fired_timers, had_realtime_timer = tw.extract_transform_timers()
if fired_timers:
# We should sort the timer firings, so they are fired in order.
fired_timers.sort(key=lambda ft: ft.timestamp)
all_timers.append((applied_ptransform, fired_timers))
if (had_realtime_timer and
tw.output_watermark < WatermarkManager.WATERMARK_POS_INF):
has_realtime_timer = True
return all_timers, has_realtime_timer
class _TransformWatermarks(object):
"""Tracks input and output watermarks for an AppliedPTransform."""
def __init__(self, clock, keyed_states, transform):
self._clock = clock
self._keyed_states = keyed_states
self._input_transform_watermarks = [] # type: List[_TransformWatermarks]
self._input_watermark = WatermarkManager.WATERMARK_NEG_INF
self._output_watermark = WatermarkManager.WATERMARK_NEG_INF
self._keyed_earliest_holds = {}
# Scheduled bundles targeted for this transform.
self._pending = set() # type: Set[_Bundle]
self._fired_timers = set()
self._lock = threading.Lock()
self._label = str(transform)
def update_input_transform_watermarks(self, input_transform_watermarks):
# type: (List[_TransformWatermarks]) -> None
with self._lock:
self._input_transform_watermarks = input_transform_watermarks
def update_timers(self, completed_timers):
with self._lock:
for timer_firing in completed_timers:
self._fired_timers.remove(timer_firing)
@property
def input_watermark(self):
# type: () -> Timestamp
with self._lock:
return self._input_watermark
@property
def output_watermark(self):
# type: () -> Timestamp
with self._lock:
return self._output_watermark
def hold(self, keyed_earliest_holds):
with self._lock:
for key, hold_value in keyed_earliest_holds.items():
self._keyed_earliest_holds[key] = hold_value
if (hold_value is None or
hold_value == WatermarkManager.WATERMARK_POS_INF):
del self._keyed_earliest_holds[key]
def add_pending(self, pending):
# type: (_Bundle) -> None
with self._lock:
self._pending.add(pending)
def remove_pending(self, completed):
# type: (_Bundle) -> None
with self._lock:
# Ignore repeated removes. This will happen if a transform has a repeated
# input.
if completed in self._pending:
self._pending.remove(completed)
def refresh(self):
# type: () -> bool
"""Refresh the watermark for a given transform.
This method looks at the watermark coming from all input PTransforms, and
the timestamp of the minimum element, as well as any watermark holds.
Returns:
True if the watermark has advanced, and False if it has not.
"""
with self._lock:
min_pending_timestamp = WatermarkManager.WATERMARK_POS_INF
has_pending_elements = False
for input_bundle in self._pending:
# TODO(ccy): we can have the Bundle class keep track of the minimum
# timestamp so we don't have to do an iteration here.
for wv in input_bundle.get_elements_iterable():
has_pending_elements = True
if wv.timestamp < min_pending_timestamp:
min_pending_timestamp = wv.timestamp
# If there is a pending element with a certain timestamp, we can at most
# advance our watermark to the maximum timestamp less than that
# timestamp.
pending_holder = WatermarkManager.WATERMARK_POS_INF
if has_pending_elements:
pending_holder = min_pending_timestamp - TIME_GRANULARITY
input_watermarks = [
tw.output_watermark for tw in self._input_transform_watermarks
]
input_watermarks.append(WatermarkManager.WATERMARK_POS_INF)
producer_watermark = min(input_watermarks)
self._input_watermark = max(
self._input_watermark, min(pending_holder, producer_watermark))
earliest_hold = WatermarkManager.WATERMARK_POS_INF
for hold in self._keyed_earliest_holds.values():
if hold < earliest_hold:
earliest_hold = hold
new_output_watermark = min(self._input_watermark, earliest_hold)
advanced = new_output_watermark > self._output_watermark
self._output_watermark = new_output_watermark
return advanced
@property
def synchronized_processing_output_time(self):
return self._clock.time()
def extract_transform_timers(self):
# type: () -> Tuple[List[TimerFiring], bool]
"""Extracts fired timers and reports of any timers set per transform."""
with self._lock:
fired_timers = []
has_realtime_timer = False
for encoded_key, state in self._keyed_states.items():
timers, had_realtime_timer = state.get_timers(
watermark=self._input_watermark,
processing_time=self._clock.time())
if had_realtime_timer:
has_realtime_timer = True
for expired in timers:
window, (name, time_domain, timestamp, dynamic_timer_tag) = expired
fired_timers.append(
TimerFiring(
encoded_key,
window,
name,
time_domain,
timestamp,
dynamic_timer_tag=dynamic_timer_tag))
self._fired_timers.update(fired_timers)
return fired_timers, has_realtime_timer | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/watermark_manager.py | 0.759315 | 0.451992 | watermark_manager.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import threading
from builtins import object
from collections import defaultdict
from apache_beam.metrics.cells import CounterAggregator
from apache_beam.metrics.cells import DistributionAggregator
from apache_beam.metrics.cells import GaugeAggregator
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metric import MetricResults
class DirectMetrics(MetricResults):
def __init__(self):
self._counters = defaultdict(lambda: DirectMetric(CounterAggregator()))
self._distributions = defaultdict(
lambda: DirectMetric(DistributionAggregator()))
self._gauges = defaultdict(lambda: DirectMetric(GaugeAggregator()))
def _apply_operation(self, bundle, updates, op):
for k, v in updates.counters.items():
op(self._counters[k], bundle, v)
for k, v in updates.distributions.items():
op(self._distributions[k], bundle, v)
for k, v in updates.gauges.items():
op(self._gauges[k], bundle, v)
def commit_logical(self, bundle, updates):
op = lambda obj, bundle, update: obj.commit_logical(bundle, update)
self._apply_operation(bundle, updates, op)
def commit_physical(self, bundle, updates):
op = lambda obj, bundle, update: obj.commit_physical(bundle, update)
self._apply_operation(bundle, updates, op)
def update_physical(self, bundle, updates):
op = lambda obj, bundle, update: obj.update_physical(bundle, update)
self._apply_operation(bundle, updates, op)
def query(self, filter=None):
counters = [
MetricResult(
MetricKey(k.step, k.metric),
v.extract_committed(),
v.extract_latest_attempted()) for k,
v in self._counters.items() if self.matches(filter, k)
]
distributions = [
MetricResult(
MetricKey(k.step, k.metric),
v.extract_committed(),
v.extract_latest_attempted()) for k,
v in self._distributions.items() if self.matches(filter, k)
]
gauges = [
MetricResult(
MetricKey(k.step, k.metric),
v.extract_committed(),
v.extract_latest_attempted()) for k,
v in self._gauges.items() if self.matches(filter, k)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class DirectMetric(object):
""" Keeps a consistent state for a single metric.
It keeps track of the metric's physical and logical updates.
It's thread safe.
"""
def __init__(self, aggregator):
self.aggregator = aggregator
self._attempted_lock = threading.Lock()
self.finished_attempted = aggregator.identity_element()
self.inflight_attempted = {}
self._committed_lock = threading.Lock()
self.finished_committed = aggregator.identity_element()
def commit_logical(self, bundle, update):
with self._committed_lock:
self.finished_committed = self.aggregator.combine(
update, self.finished_committed)
def commit_physical(self, bundle, update):
with self._attempted_lock:
self.inflight_attempted[bundle] = update
self.finished_attempted = self.aggregator.combine(
update, self.finished_attempted)
del self.inflight_attempted[bundle]
def update_physical(self, bundle, update):
self.inflight_attempted[bundle] = update
def extract_committed(self):
return self.aggregator.result(self.finished_committed)
def extract_latest_attempted(self):
res = self.finished_attempted
for _, u in self.inflight_attempted.items():
res = self.aggregator.combine(res, u)
return self.aggregator.result(res) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/direct_metrics.py | 0.802091 | 0.230075 | direct_metrics.py | pypi |
"""Support for user state in the BundleBasedDirectRunner."""
# pytype: skip-file
from __future__ import absolute_import
import copy
import itertools
from apache_beam.transforms import userstate
from apache_beam.transforms.trigger import _ListStateTag
from apache_beam.transforms.trigger import _ReadModifyWriteStateTag
from apache_beam.transforms.trigger import _SetStateTag
class DirectRuntimeState(userstate.RuntimeState):
def __init__(self, state_spec, state_tag, current_value_accessor):
self._state_spec = state_spec
self._state_tag = state_tag
self._current_value_accessor = current_value_accessor
@staticmethod
def for_spec(state_spec, state_tag, current_value_accessor):
if isinstance(state_spec, userstate.ReadModifyWriteStateSpec):
return ReadModifyWriteRuntimeState(
state_spec, state_tag, current_value_accessor)
elif isinstance(state_spec, userstate.BagStateSpec):
return BagRuntimeState(state_spec, state_tag, current_value_accessor)
elif isinstance(state_spec, userstate.CombiningValueStateSpec):
return CombiningValueRuntimeState(
state_spec, state_tag, current_value_accessor)
elif isinstance(state_spec, userstate.SetStateSpec):
return SetRuntimeState(state_spec, state_tag, current_value_accessor)
else:
raise ValueError('Invalid state spec: %s' % state_spec)
def _encode(self, value):
return self._state_spec.coder.encode(value)
def _decode(self, value):
return self._state_spec.coder.decode(value)
# Sentinel designating an unread value.
UNREAD_VALUE = object()
class ReadModifyWriteRuntimeState(DirectRuntimeState,
userstate.ReadModifyWriteRuntimeState):
def __init__(self, state_spec, state_tag, current_value_accessor):
super(ReadModifyWriteRuntimeState,
self).__init__(state_spec, state_tag, current_value_accessor)
self._value = UNREAD_VALUE
self._cleared = False
self._modified = False
def read(self):
if self._cleared:
return None
if self._value is UNREAD_VALUE:
self._value = self._current_value_accessor()
if not self._value:
return None
return self._decode(self._value[0])
def write(self, value):
self._cleared = False
self._modified = True
self._value = [self._encode(value)]
def clear(self):
self._cleared = True
self._modified = False
self._value = []
def is_cleared(self):
return self._cleared
def is_modified(self):
return self._modified
class BagRuntimeState(DirectRuntimeState, userstate.BagRuntimeState):
def __init__(self, state_spec, state_tag, current_value_accessor):
super(BagRuntimeState,
self).__init__(state_spec, state_tag, current_value_accessor)
self._cached_value = UNREAD_VALUE
self._cleared = False
self._new_values = []
def read(self):
if self._cached_value is UNREAD_VALUE:
self._cached_value = self._current_value_accessor()
if not self._cleared:
encoded_values = itertools.chain(self._cached_value, self._new_values)
else:
encoded_values = self._new_values
return (self._decode(v) for v in encoded_values)
def add(self, value):
self._new_values.append(self._encode(value))
def clear(self):
self._cleared = True
self._cached_value = []
self._new_values = []
class SetRuntimeState(DirectRuntimeState, userstate.SetRuntimeState):
def __init__(self, state_spec, state_tag, current_value_accessor):
super(SetRuntimeState,
self).__init__(state_spec, state_tag, current_value_accessor)
self._current_accumulator = UNREAD_VALUE
self._modified = False
def _read_initial_value(self):
if self._current_accumulator is UNREAD_VALUE:
self._current_accumulator = {
self._decode(a)
for a in self._current_value_accessor()
}
def read(self):
self._read_initial_value()
return self._current_accumulator
def add(self, value):
self._read_initial_value()
self._modified = True
self._current_accumulator.add(value)
def clear(self):
self._current_accumulator = set()
self._modified = True
def is_modified(self):
return self._modified
class CombiningValueRuntimeState(DirectRuntimeState,
userstate.CombiningValueRuntimeState):
"""Combining value state interface object passed to user code."""
def __init__(self, state_spec, state_tag, current_value_accessor):
super(CombiningValueRuntimeState,
self).__init__(state_spec, state_tag, current_value_accessor)
self._current_accumulator = UNREAD_VALUE
self._modified = False
self._combine_fn = copy.deepcopy(state_spec.combine_fn)
self._combine_fn.setup()
self._finalized = False
def _read_initial_value(self):
if self._current_accumulator is UNREAD_VALUE:
existing_accumulators = list(
self._decode(a) for a in self._current_value_accessor())
if existing_accumulators:
self._current_accumulator = self._combine_fn.merge_accumulators(
existing_accumulators)
else:
self._current_accumulator = self._combine_fn.create_accumulator()
def read(self):
self._read_initial_value()
return self._combine_fn.extract_output(self._current_accumulator)
def add(self, value):
self._read_initial_value()
self._modified = True
self._current_accumulator = self._combine_fn.add_input(
self._current_accumulator, value)
def clear(self):
self._modified = True
self._current_accumulator = self._combine_fn.create_accumulator()
def finalize(self):
if not self._finalized:
self._combine_fn.teardown()
self._finalized = True
class DirectUserStateContext(userstate.UserStateContext):
"""userstate.UserStateContext for the BundleBasedDirectRunner.
The DirectUserStateContext buffers up updates that are to be committed
by the TransformEvaluator after running a DoFn.
"""
def __init__(self, step_context, dofn, key_coder):
self.step_context = step_context
self.dofn = dofn
self.key_coder = key_coder
self.all_state_specs, self.all_timer_specs = userstate.get_dofn_specs(dofn)
self.state_tags = {}
for state_spec in self.all_state_specs:
state_key = 'user/%s' % state_spec.name
if isinstance(state_spec, userstate.ReadModifyWriteStateSpec):
state_tag = _ReadModifyWriteStateTag(state_key)
elif isinstance(state_spec, userstate.BagStateSpec):
state_tag = _ListStateTag(state_key)
elif isinstance(state_spec, userstate.CombiningValueStateSpec):
state_tag = _ListStateTag(state_key)
elif isinstance(state_spec, userstate.SetStateSpec):
state_tag = _SetStateTag(state_key)
else:
raise ValueError('Invalid state spec: %s' % state_spec)
self.state_tags[state_spec] = state_tag
self.cached_states = {}
self.cached_timers = {}
def get_timer(
self, timer_spec: userstate.TimerSpec, key, window, timestamp,
pane) -> userstate.RuntimeTimer:
assert timer_spec in self.all_timer_specs
encoded_key = self.key_coder.encode(key)
cache_key = (encoded_key, window, timer_spec)
if cache_key not in self.cached_timers:
self.cached_timers[cache_key] = userstate.RuntimeTimer()
return self.cached_timers[cache_key]
def get_state(self, state_spec, key, window):
assert state_spec in self.all_state_specs
encoded_key = self.key_coder.encode(key)
cache_key = (encoded_key, window, state_spec)
if cache_key not in self.cached_states:
state_tag = self.state_tags[state_spec]
value_accessor = (
lambda: self._get_underlying_state(state_spec, key, window))
self.cached_states[cache_key] = DirectRuntimeState.for_spec(
state_spec, state_tag, value_accessor)
return self.cached_states[cache_key]
def _get_underlying_state(self, state_spec, key, window):
state_tag = self.state_tags[state_spec]
encoded_key = self.key_coder.encode(key)
return (
self.step_context.get_keyed_state(encoded_key).get_state(
window, state_tag))
def commit(self):
# Commit state modifications.
for cache_key, runtime_state in self.cached_states.items():
encoded_key, window, state_spec = cache_key
state = self.step_context.get_keyed_state(encoded_key)
state_tag = self.state_tags[state_spec]
if isinstance(state_spec, userstate.BagStateSpec):
if runtime_state._cleared:
state.clear_state(window, state_tag)
for new_value in runtime_state._new_values:
state.add_state(window, state_tag, new_value)
elif isinstance(state_spec, userstate.CombiningValueStateSpec):
if runtime_state._modified:
state.clear_state(window, state_tag)
state.add_state(
window,
state_tag,
state_spec.coder.encode(runtime_state._current_accumulator))
elif isinstance(state_spec, userstate.SetStateSpec):
if runtime_state.is_modified():
state.clear_state(window, state_tag)
for new_value in runtime_state._current_accumulator:
state.add_state(
window, state_tag, state_spec.coder.encode(new_value))
elif isinstance(state_spec, userstate.ReadModifyWriteStateSpec):
if runtime_state.is_cleared():
state.clear_state(window, state_tag)
if runtime_state.is_modified():
state.clear_state(window, state_tag)
state.add_state(window, state_tag, runtime_state._value)
else:
raise ValueError('Invalid state spec: %s' % state_spec)
# Commit new timers.
for cache_key, runtime_timer in self.cached_timers.items():
encoded_key, window, timer_spec = cache_key
state = self.step_context.get_keyed_state(encoded_key)
timer_name = 'user/%s' % timer_spec.name
for dynamic_timer_tag, timer in runtime_timer._timer_recordings.items():
if timer.cleared:
state.clear_timer(
window,
timer_name,
timer_spec.time_domain,
dynamic_timer_tag=dynamic_timer_tag)
if timer.timestamp:
# TODO(ccy): add corresponding watermark holds after the DirectRunner
# allows for keyed watermark holds.
state.set_timer(
window,
timer_name,
timer_spec.time_domain,
timer.timestamp,
dynamic_timer_tag=dynamic_timer_tag)
def reset(self):
for state in self.cached_states.values():
state.finalize()
self.cached_states = {}
self.cached_timers = {} | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/direct_userstate.py | 0.712932 | 0.219024 | direct_userstate.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import threading
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from apache_beam.runners.direct.direct_metrics import DirectMetrics
from apache_beam.runners.direct.executor import TransformExecutor
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.transforms import sideinputs
from apache_beam.transforms.trigger import InMemoryUnmergedState
from apache_beam.utils import counters
if TYPE_CHECKING:
from apache_beam import pvalue
from apache_beam.pipeline import AppliedPTransform
from apache_beam.runners.direct.bundle_factory import BundleFactory, _Bundle
from apache_beam.runners.direct.util import TimerFiring
from apache_beam.runners.direct.util import TransformResult
from apache_beam.runners.direct.watermark_manager import _TransformWatermarks
from apache_beam.utils.timestamp import Timestamp
class _ExecutionContext(object):
"""Contains the context for the execution of a single PTransform.
It holds the watermarks for that transform, as well as keyed states.
"""
def __init__(
self,
watermarks, # type: _TransformWatermarks
keyed_states):
self.watermarks = watermarks
self.keyed_states = keyed_states
self._step_context = None
def get_step_context(self):
if not self._step_context:
self._step_context = DirectStepContext(self.keyed_states)
return self._step_context
def reset(self):
self._step_context = None
class _SideInputView(object):
def __init__(self, view):
self._view = view
self.blocked_tasks = collections.deque()
self.elements = []
self.value = None
self.watermark = None
def __repr__(self):
elements_string = (
', '.join(str(elm) for elm in self.elements) if self.elements else '[]')
return '_SideInputView(elements=%s)' % elements_string
class _SideInputsContainer(object):
"""An in-process container for side inputs.
It provides methods for blocking until a side-input is available and writing
to a side input.
"""
def __init__(self, side_inputs):
# type: (Iterable[pvalue.AsSideInput]) -> None
self._lock = threading.Lock()
self._views = {} # type: Dict[pvalue.AsSideInput, _SideInputView]
self._transform_to_side_inputs = collections.defaultdict(
list
) # type: DefaultDict[Optional[AppliedPTransform], List[pvalue.AsSideInput]]
# this appears unused:
self._side_input_to_blocked_tasks = collections.defaultdict(list) # type: ignore
for side in side_inputs:
self._views[side] = _SideInputView(side)
self._transform_to_side_inputs[side.pvalue.producer].append(side)
def __repr__(self):
views_string = (
', '.join(str(elm)
for elm in self._views.values()) if self._views else '[]')
return '_SideInputsContainer(_views=%s)' % views_string
def get_value_or_block_until_ready(self,
side_input,
task, # type: TransformExecutor
block_until # type: Timestamp
):
# type: (...) -> Any
"""Returns the value of a view whose task is unblocked or blocks its task.
It gets the value of a view whose watermark has been updated and
surpasses a given value.
Args:
side_input: ``_UnpickledSideInput`` value.
task: ``TransformExecutor`` task waiting on a side input.
block_until: Timestamp after which the task gets unblocked.
Returns:
The ``SideInputMap`` value of a view when the tasks it blocks are
unblocked. Otherwise, None.
"""
with self._lock:
view = self._views[side_input]
if view.watermark and view.watermark.output_watermark >= block_until:
view.value = self._pvalue_to_value(side_input, view.elements)
return view.value
else:
view.blocked_tasks.append((task, block_until))
task.blocked = True
def add_values(self, side_input, values):
with self._lock:
view = self._views[side_input]
view.elements.extend(values)
def update_watermarks_for_transform_and_unblock_tasks(
self, ptransform, watermark):
# type: (...) -> List[Tuple[TransformExecutor, Timestamp]]
"""Updates _SideInputsContainer after a watermark update and unbloks tasks.
It traverses the list of side inputs per PTransform and calls
_update_watermarks_for_side_input_and_unblock_tasks to unblock tasks.
Args:
ptransform: Value of a PTransform.
watermark: Value of the watermark after an update for a PTransform.
Returns:
Tasks that get unblocked as a result of the watermark advancing.
"""
unblocked_tasks = []
for side in self._transform_to_side_inputs[ptransform]:
unblocked_tasks.extend(
self._update_watermarks_for_side_input_and_unblock_tasks(
side, watermark))
return unblocked_tasks
def _update_watermarks_for_side_input_and_unblock_tasks(
self, side_input, watermark):
# type: (...) -> List[Tuple[TransformExecutor, Timestamp]]
"""Helps update _SideInputsContainer after a watermark update.
For each view of the side input, it updates the value of the watermark
recorded when the watermark moved and unblocks tasks accordingly.
Args:
side_input: ``_UnpickledSideInput`` value.
watermark: Value of the watermark after an update for a PTransform.
Returns:
Tasks that get unblocked as a result of the watermark advancing.
"""
with self._lock:
view = self._views[side_input]
view.watermark = watermark
unblocked_tasks = []
tasks_just_unblocked = []
for task, block_until in view.blocked_tasks:
if watermark.output_watermark >= block_until:
view.value = self._pvalue_to_value(side_input, view.elements)
unblocked_tasks.append(task)
tasks_just_unblocked.append((task, block_until))
task.blocked = False
for task in tasks_just_unblocked:
view.blocked_tasks.remove(task)
return unblocked_tasks
def _pvalue_to_value(self, side_input, values):
"""Given a side input, returns the associated value in its requested form.
Args:
side_input: _UnpickledSideInput object.
values: Iterable values associated with the side input.
Returns:
The side input in its requested form.
Raises:
ValueError: If values cannot be converted into the requested form.
"""
return sideinputs.SideInputMap(
type(side_input), side_input._view_options(), values)
class EvaluationContext(object):
"""Evaluation context with the global state information of the pipeline.
The evaluation context for a specific pipeline being executed by the
DirectRunner. Contains state shared within the execution across all
transforms.
EvaluationContext contains shared state for an execution of the
DirectRunner that can be used while evaluating a PTransform. This
consists of views into underlying state and watermark implementations, access
to read and write side inputs, and constructing counter sets and
execution contexts. This includes executing callbacks asynchronously when
state changes to the appropriate point (e.g. when a side input is
requested and known to be empty).
EvaluationContext also handles results by committing finalizing
bundles based on the current global state and updating the global state
appropriately. This includes updating the per-(step,key) state, updating
global watermarks, and executing any callbacks that can be executed.
"""
def __init__(self,
pipeline_options,
bundle_factory, # type: BundleFactory
root_transforms,
value_to_consumers,
step_names,
views, # type: Iterable[pvalue.AsSideInput]
clock
):
self.pipeline_options = pipeline_options
self._bundle_factory = bundle_factory
self._root_transforms = root_transforms
self._value_to_consumers = value_to_consumers
self._step_names = step_names
self.views = views
self._pcollection_to_views = collections.defaultdict(
list) # type: DefaultDict[pvalue.PValue, List[pvalue.AsSideInput]]
for view in views:
self._pcollection_to_views[view.pvalue].append(view)
self._transform_keyed_states = self._initialize_keyed_states(
root_transforms, value_to_consumers)
self._side_inputs_container = _SideInputsContainer(views)
self._watermark_manager = WatermarkManager(
clock,
root_transforms,
value_to_consumers,
self._transform_keyed_states)
self._pending_unblocked_tasks = [
] # type: List[Tuple[TransformExecutor, Timestamp]]
self._counter_factory = counters.CounterFactory()
self._metrics = DirectMetrics()
self._lock = threading.Lock()
self.shutdown_requested = False
def _initialize_keyed_states(self, root_transforms, value_to_consumers):
"""Initialize user state dicts.
These dicts track user state per-key, per-transform and per-window.
"""
transform_keyed_states = {}
for transform in root_transforms:
transform_keyed_states[transform] = {}
for consumers in value_to_consumers.values():
for consumer in consumers:
transform_keyed_states[consumer] = {}
return transform_keyed_states
def metrics(self):
# TODO. Should this be made a @property?
return self._metrics
def is_root_transform(self, applied_ptransform):
# type: (AppliedPTransform) -> bool
return applied_ptransform in self._root_transforms
def handle_result(self,
completed_bundle, # type: _Bundle
completed_timers,
result # type: TransformResult
):
"""Handle the provided result produced after evaluating the input bundle.
Handle the provided TransformResult, produced after evaluating
the provided committed bundle (potentially None, if the result of a root
PTransform).
The result is the output of running the transform contained in the
TransformResult on the contents of the provided bundle.
Args:
completed_bundle: the bundle that was processed to produce the result.
completed_timers: the timers that were delivered to produce the
completed_bundle.
result: the ``TransformResult`` of evaluating the input bundle
Returns:
the committed bundles contained within the handled result.
"""
with self._lock:
committed_bundles, unprocessed_bundles = self._commit_bundles(
result.uncommitted_output_bundles,
result.unprocessed_bundles)
self._metrics.commit_logical(
completed_bundle, result.logical_metric_updates)
# If the result is for a view, update side inputs container.
self._update_side_inputs_container(committed_bundles, result)
# Tasks generated from unblocked side inputs as the watermark progresses.
tasks = self._watermark_manager.update_watermarks(
completed_bundle,
result.transform,
completed_timers,
committed_bundles,
unprocessed_bundles,
result.keyed_watermark_holds,
self._side_inputs_container)
self._pending_unblocked_tasks.extend(tasks)
if result.counters:
for counter in result.counters:
merged_counter = self._counter_factory.get_counter(
counter.name, counter.combine_fn)
merged_counter.accumulator.merge([counter.accumulator])
# Commit partial GBK states
existing_keyed_state = self._transform_keyed_states[result.transform]
for k, v in result.partial_keyed_state.items():
existing_keyed_state[k] = v
return committed_bundles
def _update_side_inputs_container(self,
committed_bundles, # type: Iterable[_Bundle]
result # type: TransformResult
):
"""Update the side inputs container if we are outputting into a side input.
Look at the result, and if it's outputing into a PCollection that we have
registered as a PCollectionView, we add the result to the PCollectionView.
"""
if (result.uncommitted_output_bundles and
result.uncommitted_output_bundles[0].pcollection in
self._pcollection_to_views):
for view in self._pcollection_to_views[
result.uncommitted_output_bundles[0].pcollection]:
for committed_bundle in committed_bundles:
# side_input must be materialized.
self._side_inputs_container.add_values(
view, committed_bundle.get_elements_iterable(make_copy=True))
def get_aggregator_values(self, aggregator_or_name):
return self._counter_factory.get_aggregator_values(aggregator_or_name)
def schedule_pending_unblocked_tasks(self, executor_service):
if self._pending_unblocked_tasks:
with self._lock:
for task in self._pending_unblocked_tasks:
executor_service.submit(task)
self._pending_unblocked_tasks = []
def _commit_bundles(self,
uncommitted_bundles, # type: Iterable[_Bundle]
unprocessed_bundles # type: Iterable[_Bundle]
):
# type: (...) -> Tuple[Tuple[_Bundle, ...], Tuple[_Bundle, ...]]
"""Commits bundles and returns a immutable set of committed bundles."""
for in_progress_bundle in uncommitted_bundles:
producing_applied_ptransform = in_progress_bundle.pcollection.producer
watermarks = self._watermark_manager.get_watermarks(
producing_applied_ptransform)
in_progress_bundle.commit(watermarks.synchronized_processing_output_time)
for unprocessed_bundle in unprocessed_bundles:
unprocessed_bundle.commit(None)
return tuple(uncommitted_bundles), tuple(unprocessed_bundles)
def get_execution_context(self, applied_ptransform):
# type: (AppliedPTransform) -> _ExecutionContext
return _ExecutionContext(
self._watermark_manager.get_watermarks(applied_ptransform),
self._transform_keyed_states[applied_ptransform])
def create_bundle(self, output_pcollection):
# type: (Union[pvalue.PBegin, pvalue.PCollection]) -> _Bundle
"""Create an uncommitted bundle for the specified PCollection."""
return self._bundle_factory.create_bundle(output_pcollection)
def create_empty_committed_bundle(self, output_pcollection):
# type: (pvalue.PCollection) -> _Bundle
"""Create empty bundle useful for triggering evaluation."""
return self._bundle_factory.create_empty_committed_bundle(
output_pcollection)
def extract_all_timers(self):
# type: () -> Tuple[List[Tuple[AppliedPTransform, List[TimerFiring]]], bool]
return self._watermark_manager.extract_all_timers()
def is_done(self, transform=None):
# type: (Optional[AppliedPTransform]) -> bool
"""Checks completion of a step or the pipeline.
Args:
transform: AppliedPTransform to check for completion.
Returns:
True if the step will not produce additional output. If transform is None
returns true if all steps are done.
"""
if transform:
return self._is_transform_done(transform)
for applied_ptransform in self._step_names:
if not self._is_transform_done(applied_ptransform):
return False
return True
def _is_transform_done(self, transform):
# type: (AppliedPTransform) -> bool
tw = self._watermark_manager.get_watermarks(transform)
return tw.output_watermark == WatermarkManager.WATERMARK_POS_INF
def get_value_or_block_until_ready(self, side_input, task, block_until):
assert isinstance(task, TransformExecutor)
return self._side_inputs_container.get_value_or_block_until_ready(
side_input, task, block_until)
def shutdown(self):
self.shutdown_requested = True
class DirectUnmergedState(InMemoryUnmergedState):
"""UnmergedState implementation for the DirectRunner."""
def __init__(self):
super(DirectUnmergedState, self).__init__(defensive_copy=False)
class DirectStepContext(object):
"""Context for the currently-executing step."""
def __init__(self, existing_keyed_state):
self.existing_keyed_state = existing_keyed_state
# In order to avoid partial writes of a bundle, every time
# existing_keyed_state is accessed, a copy of the state is made
# to be transferred to the bundle state once the bundle is committed.
self.partial_keyed_state = {}
def get_keyed_state(self, key):
if not self.existing_keyed_state.get(key):
self.existing_keyed_state[key] = DirectUnmergedState()
if not self.partial_keyed_state.get(key):
self.partial_keyed_state[key] = self.existing_keyed_state[key].copy()
return self.partial_keyed_state[key] | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/evaluation_context.py | 0.883015 | 0.429728 | evaluation_context.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import itertools
import logging
import sys
import threading
import traceback
from builtins import object
from builtins import range
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import FrozenSet
from typing import Optional
from typing import Set
from weakref import WeakValueDictionary
from future.moves import queue
from future.utils import raise_
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.runners.worker import statesampler
from apache_beam.transforms import sideinputs
from apache_beam.utils import counters
if TYPE_CHECKING:
from apache_beam import pvalue
from apache_beam.runners.direct.bundle_factory import _Bundle
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.transform_evaluator import TransformEvaluatorRegistry
_LOGGER = logging.getLogger(__name__)
class _ExecutorService(object):
"""Thread pool for executing tasks in parallel."""
class CallableTask(object):
def call(self, state_sampler):
pass
@property
def name(self):
return None
class _ExecutorServiceWorker(threading.Thread):
"""Worker thread for executing a single task at a time."""
# Amount to block waiting for getting an item from the queue in seconds.
TIMEOUT = 5
def __init__(
self,
queue, # type: queue.Queue[_ExecutorService.CallableTask]
index):
super(_ExecutorService._ExecutorServiceWorker, self).__init__()
self.queue = queue
self._index = index
self._default_name = 'ExecutorServiceWorker-' + str(index)
self._update_name()
self.shutdown_requested = False
# Stop worker thread when main thread exits.
self.daemon = True
self.start()
def _update_name(self, task=None):
if task and task.name:
name = task.name
else:
name = self._default_name
self.name = 'Thread: %d, %s (%s)' % (
self._index, name, 'executing' if task else 'idle')
def _get_task_or_none(self):
# type: () -> Optional[_ExecutorService.CallableTask]
try:
# Do not block indefinitely, otherwise we may not act for a requested
# shutdown.
return self.queue.get(
timeout=_ExecutorService._ExecutorServiceWorker.TIMEOUT)
except queue.Empty:
return None
def run(self):
state_sampler = statesampler.StateSampler('', counters.CounterFactory())
statesampler.set_current_tracker(state_sampler)
while not self.shutdown_requested:
task = self._get_task_or_none()
if task:
try:
if not self.shutdown_requested:
self._update_name(task)
task.call(state_sampler)
self._update_name()
finally:
self.queue.task_done()
def shutdown(self):
self.shutdown_requested = True
def __init__(self, num_workers):
self.queue = queue.Queue(
) # type: queue.Queue[_ExecutorService.CallableTask]
self.workers = [
_ExecutorService._ExecutorServiceWorker(self.queue, i)
for i in range(num_workers)
]
self.shutdown_requested = False
def submit(self, task):
# type: (_ExecutorService.CallableTask) -> None
assert isinstance(task, _ExecutorService.CallableTask)
if not self.shutdown_requested:
self.queue.put(task)
def await_completion(self):
for worker in self.workers:
worker.join()
def shutdown(self):
self.shutdown_requested = True
for worker in self.workers:
worker.shutdown()
# Consume all the remaining items in the queue
while not self.queue.empty():
try:
self.queue.get_nowait()
self.queue.task_done()
except queue.Empty:
continue
# All existing threads will eventually terminate (after they complete their
# last task).
class _TransformEvaluationState(object):
def __init__(
self,
executor_service,
scheduled # type: Set[TransformExecutor]
):
self.executor_service = executor_service
self.scheduled = scheduled
def schedule(self, work):
self.scheduled.add(work)
self.executor_service.submit(work)
def complete(self, completed_work):
self.scheduled.remove(completed_work)
class _ParallelEvaluationState(_TransformEvaluationState):
"""A TransformEvaluationState with unlimited parallelism.
Any TransformExecutor scheduled will be immediately submitted to the
ExecutorService.
A principal use of this is for evaluators that can generate output bundles
only using the input bundle (e.g. ParDo).
"""
pass
class _SerialEvaluationState(_TransformEvaluationState):
"""A TransformEvaluationState with a single work queue.
Any TransformExecutor scheduled will be placed on the work queue. Only one
item of work will be submitted to the ExecutorService at any time.
A principal use of this is for evaluators that keeps a global state such as
_GroupByKeyOnly.
"""
def __init__(self, executor_service, scheduled):
super(_SerialEvaluationState, self).__init__(executor_service, scheduled)
self.serial_queue = collections.deque()
self.currently_evaluating = None
self._lock = threading.Lock()
def complete(self, completed_work):
self._update_currently_evaluating(None, completed_work)
super(_SerialEvaluationState, self).complete(completed_work)
def schedule(self, new_work):
self._update_currently_evaluating(new_work, None)
def _update_currently_evaluating(self, new_work, completed_work):
with self._lock:
if new_work:
self.serial_queue.append(new_work)
if completed_work:
assert self.currently_evaluating == completed_work
self.currently_evaluating = None
if self.serial_queue and not self.currently_evaluating:
next_work = self.serial_queue.pop()
self.currently_evaluating = next_work
super(_SerialEvaluationState, self).schedule(next_work)
class _TransformExecutorServices(object):
"""Schedules and completes TransformExecutors.
Controls the concurrency as appropriate for the applied transform the executor
exists for.
"""
def __init__(self, executor_service):
# type: (_ExecutorService) -> None
self._executor_service = executor_service
self._scheduled = set() # type: Set[TransformExecutor]
self._parallel = _ParallelEvaluationState(
self._executor_service, self._scheduled)
self._serial_cache = WeakValueDictionary(
) # type: WeakValueDictionary[Any, _SerialEvaluationState]
def parallel(self):
# type: () -> _ParallelEvaluationState
return self._parallel
def serial(self, step):
# type: (Any) -> _SerialEvaluationState
cached = self._serial_cache.get(step)
if not cached:
cached = _SerialEvaluationState(self._executor_service, self._scheduled)
self._serial_cache[step] = cached
return cached
@property
def executors(self):
# type: () -> FrozenSet[TransformExecutor]
return frozenset(self._scheduled)
class _CompletionCallback(object):
"""The default completion callback.
The default completion callback is used to complete transform evaluations
that are triggered due to the arrival of elements from an upstream transform,
or for a source transform.
"""
def __init__(self,
evaluation_context, # type: EvaluationContext
all_updates,
timer_firings=None
):
self._evaluation_context = evaluation_context
self._all_updates = all_updates
self._timer_firings = timer_firings or []
def handle_result(
self, transform_executor, input_committed_bundle, transform_result):
output_committed_bundles = self._evaluation_context.handle_result(
input_committed_bundle, self._timer_firings, transform_result)
for output_committed_bundle in output_committed_bundles:
self._all_updates.offer(
_ExecutorServiceParallelExecutor._ExecutorUpdate(
transform_executor, committed_bundle=output_committed_bundle))
for unprocessed_bundle in transform_result.unprocessed_bundles:
self._all_updates.offer(
_ExecutorServiceParallelExecutor._ExecutorUpdate(
transform_executor, unprocessed_bundle=unprocessed_bundle))
return output_committed_bundles
def handle_exception(self, transform_executor, exception):
self._all_updates.offer(
_ExecutorServiceParallelExecutor._ExecutorUpdate(
transform_executor, exception=exception))
class TransformExecutor(_ExecutorService.CallableTask):
"""For internal use only; no backwards-compatibility guarantees.
TransformExecutor will evaluate a bundle using an applied ptransform.
A CallableTask responsible for constructing a TransformEvaluator and
evaluating it on some bundle of input, and registering the result using the
completion callback.
"""
_MAX_RETRY_PER_BUNDLE = 4
def __init__(self,
transform_evaluator_registry, # type: TransformEvaluatorRegistry
evaluation_context, # type: EvaluationContext
input_bundle, # type: _Bundle
fired_timers,
applied_ptransform,
completion_callback,
transform_evaluation_state # type: _TransformEvaluationState
):
self._transform_evaluator_registry = transform_evaluator_registry
self._evaluation_context = evaluation_context
self._input_bundle = input_bundle
# For non-empty bundles, store the window of the max EOW.
# TODO(mariagh): Move to class _Bundle's inner _StackedWindowedValues
self._latest_main_input_window = None
if input_bundle.has_elements():
self._latest_main_input_window = input_bundle._elements[0].windows[0]
for elem in input_bundle.get_elements_iterable():
if elem.windows[0].end > self._latest_main_input_window.end:
self._latest_main_input_window = elem.windows[0]
self._fired_timers = fired_timers
self._applied_ptransform = applied_ptransform
self._completion_callback = completion_callback
self._transform_evaluation_state = transform_evaluation_state
self._side_input_values = {} # type: Dict[pvalue.AsSideInput, Any]
self.blocked = False
self._call_count = 0
self._retry_count = 0
self._max_retries_per_bundle = TransformExecutor._MAX_RETRY_PER_BUNDLE
def call(self, state_sampler):
self._call_count += 1
assert self._call_count <= (1 + len(self._applied_ptransform.side_inputs))
metrics_container = MetricsContainer(self._applied_ptransform.full_label)
start_state = state_sampler.scoped_state(
self._applied_ptransform.full_label,
'start',
metrics_container=metrics_container)
process_state = state_sampler.scoped_state(
self._applied_ptransform.full_label,
'process',
metrics_container=metrics_container)
finish_state = state_sampler.scoped_state(
self._applied_ptransform.full_label,
'finish',
metrics_container=metrics_container)
with start_state:
# Side input initialization should be accounted for in start_state.
for side_input in self._applied_ptransform.side_inputs:
# Find the projection of main's window onto the side input's window.
window_mapping_fn = side_input._view_options().get(
'window_mapping_fn', sideinputs._global_window_mapping_fn)
main_onto_side_window = window_mapping_fn(
self._latest_main_input_window)
block_until = main_onto_side_window.end
if side_input not in self._side_input_values:
value = self._evaluation_context.get_value_or_block_until_ready(
side_input, self, block_until)
if not value:
# Monitor task will reschedule this executor once the side input is
# available.
return
self._side_input_values[side_input] = value
side_input_values = [
self._side_input_values[side_input]
for side_input in self._applied_ptransform.side_inputs
]
while self._retry_count < self._max_retries_per_bundle:
try:
self.attempt_call(
metrics_container,
side_input_values,
start_state,
process_state,
finish_state)
break
except Exception as e:
self._retry_count += 1
_LOGGER.error(
'Exception at bundle %r, due to an exception.\n %s',
self._input_bundle,
traceback.format_exc())
if self._retry_count == self._max_retries_per_bundle:
_LOGGER.error(
'Giving up after %s attempts.', self._max_retries_per_bundle)
self._completion_callback.handle_exception(self, e)
self._evaluation_context.metrics().commit_physical(
self._input_bundle, metrics_container.get_cumulative())
self._transform_evaluation_state.complete(self)
def attempt_call(
self,
metrics_container,
side_input_values,
start_state,
process_state,
finish_state):
"""Attempts to run a bundle."""
evaluator = self._transform_evaluator_registry.get_evaluator(
self._applied_ptransform, self._input_bundle, side_input_values)
with start_state:
evaluator.start_bundle()
with process_state:
if self._fired_timers:
for timer_firing in self._fired_timers:
evaluator.process_timer_wrapper(timer_firing)
if self._input_bundle:
for value in self._input_bundle.get_elements_iterable():
evaluator.process_element(value)
with finish_state:
result = evaluator.finish_bundle()
result.logical_metric_updates = metrics_container.get_cumulative()
self._completion_callback.handle_result(self, self._input_bundle, result)
return result
class Executor(object):
"""For internal use only; no backwards-compatibility guarantees."""
def __init__(self, *args, **kwargs):
self._executor = _ExecutorServiceParallelExecutor(*args, **kwargs)
def start(self, roots):
self._executor.start(roots)
def await_completion(self):
self._executor.await_completion()
def shutdown(self):
self._executor.request_shutdown()
class _ExecutorServiceParallelExecutor(object):
"""An internal implementation for Executor."""
NUM_WORKERS = 1
def __init__(
self,
value_to_consumers,
transform_evaluator_registry,
evaluation_context # type: EvaluationContext
):
self.executor_service = _ExecutorService(
_ExecutorServiceParallelExecutor.NUM_WORKERS)
self.transform_executor_services = _TransformExecutorServices(
self.executor_service)
self.value_to_consumers = value_to_consumers
self.transform_evaluator_registry = transform_evaluator_registry
self.evaluation_context = evaluation_context
self.all_updates = _ExecutorServiceParallelExecutor._TypedUpdateQueue(
_ExecutorServiceParallelExecutor._ExecutorUpdate)
self.visible_updates = _ExecutorServiceParallelExecutor._TypedUpdateQueue(
_ExecutorServiceParallelExecutor._VisibleExecutorUpdate)
self.default_completion_callback = _CompletionCallback(
evaluation_context, self.all_updates)
def start(self, roots):
self.root_nodes = frozenset(roots)
self.all_nodes = frozenset(
itertools.chain(
roots, *itertools.chain(self.value_to_consumers.values())))
self.node_to_pending_bundles = {}
for root_node in self.root_nodes:
provider = (
self.transform_evaluator_registry.get_root_bundle_provider(root_node))
self.node_to_pending_bundles[root_node] = provider.get_root_bundles()
self.executor_service.submit(
_ExecutorServiceParallelExecutor._MonitorTask(self))
def await_completion(self):
update = self.visible_updates.take()
try:
if update.exception:
t, v, tb = update.exc_info
raise_(t, v, tb)
finally:
self.executor_service.shutdown()
self.executor_service.await_completion()
def request_shutdown(self):
self.executor_service.shutdown()
self.executor_service.await_completion()
self.evaluation_context.shutdown()
def schedule_consumers(self, committed_bundle):
# type: (_Bundle) -> None
if committed_bundle.pcollection in self.value_to_consumers:
consumers = self.value_to_consumers[committed_bundle.pcollection]
for applied_ptransform in consumers:
self.schedule_consumption(
applied_ptransform,
committed_bundle, [],
self.default_completion_callback)
def schedule_unprocessed_bundle(self, applied_ptransform, unprocessed_bundle):
self.node_to_pending_bundles[applied_ptransform].append(unprocessed_bundle)
def schedule_consumption(self,
consumer_applied_ptransform,
committed_bundle, # type: _Bundle
fired_timers,
on_complete
):
"""Schedules evaluation of the given bundle with the transform."""
assert consumer_applied_ptransform
assert committed_bundle
assert on_complete
if self.transform_evaluator_registry.should_execute_serially(
consumer_applied_ptransform):
transform_executor_service = self.transform_executor_services.serial(
consumer_applied_ptransform) # type: _TransformEvaluationState
else:
transform_executor_service = self.transform_executor_services.parallel()
transform_executor = TransformExecutor(
self.transform_evaluator_registry,
self.evaluation_context,
committed_bundle,
fired_timers,
consumer_applied_ptransform,
on_complete,
transform_executor_service)
transform_executor_service.schedule(transform_executor)
class _TypedUpdateQueue(object):
"""Type checking update queue with blocking and non-blocking operations."""
def __init__(self, item_type):
self._item_type = item_type
self._queue = queue.Queue()
def poll(self):
try:
item = self._queue.get_nowait()
self._queue.task_done()
return item
except queue.Empty:
return None
def take(self):
# The implementation of Queue.Queue.get() does not propagate
# KeyboardInterrupts when a timeout is not used. We therefore use a
# one-second timeout in the following loop to allow KeyboardInterrupts
# to be correctly propagated.
while True:
try:
item = self._queue.get(timeout=1)
self._queue.task_done()
return item
except queue.Empty:
pass
def offer(self, item):
assert isinstance(item, self._item_type)
self._queue.put_nowait(item)
class _ExecutorUpdate(object):
"""An internal status update on the state of the executor."""
def __init__(
self,
transform_executor,
committed_bundle=None,
unprocessed_bundle=None,
exception=None):
self.transform_executor = transform_executor
# Exactly one of them should be not-None
assert sum(
[bool(committed_bundle), bool(unprocessed_bundle),
bool(exception)]) == 1
self.committed_bundle = committed_bundle
self.unprocessed_bundle = unprocessed_bundle
self.exception = exception
self.exc_info = sys.exc_info()
if self.exc_info[1] is not exception:
# Not the right exception.
self.exc_info = (exception, None, None)
class _VisibleExecutorUpdate(object):
"""An update of interest to the user.
Used for awaiting the completion to decide whether to return normally or
raise an exception.
"""
def __init__(self, exc_info=(None, None, None)):
self.finished = exc_info[0] is not None
self.exception = exc_info[1] or exc_info[0]
self.exc_info = exc_info
class _MonitorTask(_ExecutorService.CallableTask):
"""MonitorTask continuously runs to ensure that pipeline makes progress."""
def __init__(self, executor):
# type: (_ExecutorServiceParallelExecutor) -> None
self._executor = executor
@property
def name(self):
return 'monitor'
def call(self, state_sampler):
try:
update = self._executor.all_updates.poll()
while update:
if update.committed_bundle:
self._executor.schedule_consumers(update.committed_bundle)
elif update.unprocessed_bundle:
self._executor.schedule_unprocessed_bundle(
update.transform_executor._applied_ptransform,
update.unprocessed_bundle)
else:
assert update.exception
_LOGGER.warning(
'A task failed with exception: %s', update.exception)
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor._VisibleExecutorUpdate(
update.exc_info))
update = self._executor.all_updates.poll()
self._executor.evaluation_context.schedule_pending_unblocked_tasks(
self._executor.executor_service)
self._add_work_if_necessary(self._fire_timers())
except Exception as e: # pylint: disable=broad-except
_LOGGER.error('Monitor task died due to exception.\n %s', e)
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor._VisibleExecutorUpdate(
sys.exc_info()))
finally:
if not self._should_shutdown():
self._executor.executor_service.submit(self)
def _should_shutdown(self):
# type: () -> bool
"""Checks whether the pipeline is completed and should be shut down.
If there is anything in the queue of tasks to do or
if there are any realtime timers set, do not shut down.
Otherwise, check if all the transforms' watermarks are complete.
If they are not, the pipeline is not progressing (stall detected).
Whether the pipeline has stalled or not, the executor should shut
down the pipeline.
Returns:
True only if the pipeline has reached a terminal state and should
be shut down.
"""
if self._is_executing():
# There are some bundles still in progress.
return False
watermark_manager = self._executor.evaluation_context._watermark_manager
_, any_unfired_realtime_timers = watermark_manager.extract_all_timers()
if any_unfired_realtime_timers:
return False
else:
if self._executor.evaluation_context.is_done():
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor._VisibleExecutorUpdate())
else:
# Nothing is scheduled for execution, but watermarks incomplete.
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor._VisibleExecutorUpdate((
Exception('Monitor task detected a pipeline stall.'),
None,
None)))
self._executor.executor_service.shutdown()
return True
def _fire_timers(self):
"""Schedules triggered consumers if any timers fired.
Returns:
True if timers fired.
"""
transform_fired_timers, _ = (
self._executor.evaluation_context.extract_all_timers())
for applied_ptransform, fired_timers in transform_fired_timers:
# Use an empty committed bundle. just to trigger.
empty_bundle = (
self._executor.evaluation_context.create_empty_committed_bundle(
applied_ptransform.inputs[0]))
timer_completion_callback = _CompletionCallback(
self._executor.evaluation_context,
self._executor.all_updates,
timer_firings=fired_timers)
self._executor.schedule_consumption(
applied_ptransform,
empty_bundle,
fired_timers,
timer_completion_callback)
return bool(transform_fired_timers)
def _is_executing(self):
# type: () -> bool
"""Checks whether the job is still executing.
Returns:
True if there is at least one non-blocked TransformExecutor active."""
executors = self._executor.transform_executor_services.executors
if not executors:
# Nothing is executing.
return False
# Ensure that at least one of those executors is not blocked.
for transform_executor in executors:
if not transform_executor.blocked:
return True
return False
def _add_work_if_necessary(self, timers_fired):
"""Adds more work from the roots if pipeline requires more input.
If all active TransformExecutors are in a blocked state, add more work
from root nodes that may have additional work. This ensures that if a
pipeline has elements available from the root nodes it will add those
elements when necessary.
Args:
timers_fired: True if any timers fired prior to this call.
"""
# If any timers have fired, they will add more work; No need to add more.
if timers_fired:
return
if self._is_executing():
# We have at least one executor that can proceed without adding
# additional work.
return
# All current TransformExecutors are blocked; add more work from any
# pending bundles.
for applied_ptransform in self._executor.all_nodes:
if not self._executor.evaluation_context.is_done(applied_ptransform):
pending_bundles = self._executor.node_to_pending_bundles.get(
applied_ptransform, [])
for bundle in pending_bundles:
self._executor.schedule_consumption(
applied_ptransform,
bundle, [],
self._executor.default_completion_callback)
self._executor.node_to_pending_bundles[applied_ptransform] = [] | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/executor.py | 0.719384 | 0.161651 | executor.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import itertools
import typing
import apache_beam as beam
from apache_beam import typehints
from apache_beam.internal.util import ArgumentPlaceholder
from apache_beam.transforms.combiners import _CurriedFn
from apache_beam.utils.windowed_value import WindowedValue
class LiftedCombinePerKey(beam.PTransform):
"""An implementation of CombinePerKey that does mapper-side pre-combining.
"""
def __init__(self, combine_fn, args, kwargs):
args_to_check = itertools.chain(args, kwargs.values())
if isinstance(combine_fn, _CurriedFn):
args_to_check = itertools.chain(
args_to_check, combine_fn.args, combine_fn.kwargs.values())
if any(isinstance(arg, ArgumentPlaceholder) for arg in args_to_check):
# This isn't implemented in dataflow either...
raise NotImplementedError('Deferred CombineFn side inputs.')
self._combine_fn = beam.transforms.combiners.curry_combine_fn(
combine_fn, args, kwargs)
def expand(self, pcoll):
return (
pcoll
| beam.ParDo(PartialGroupByKeyCombiningValues(self._combine_fn))
| beam.GroupByKey()
| beam.ParDo(FinishCombine(self._combine_fn)))
class PartialGroupByKeyCombiningValues(beam.DoFn):
"""Aggregates values into a per-key-window cache.
As bundles are in-memory-sized, we don't bother flushing until the very end.
"""
def __init__(self, combine_fn):
self._combine_fn = combine_fn
def setup(self):
self._combine_fn.setup()
def start_bundle(self):
self._cache = collections.defaultdict(self._combine_fn.create_accumulator)
def process(self, element, window=beam.DoFn.WindowParam):
k, vi = element
self._cache[k, window] = self._combine_fn.add_input(
self._cache[k, window], vi)
def finish_bundle(self):
for (k, w), va in self._cache.items():
# We compact the accumulator since a GBK (which necessitates encoding)
# will follow.
yield WindowedValue((k, self._combine_fn.compact(va)), w.end, (w, ))
def teardown(self):
self._combine_fn.teardown()
def default_type_hints(self):
hints = self._combine_fn.get_type_hints()
K = typehints.TypeVariable('K')
if hints.input_types:
args, kwargs = hints.input_types
args = (typehints.Tuple[K, args[0]], ) + args[1:]
hints = hints.with_input_types(*args, **kwargs)
else:
hints = hints.with_input_types(typehints.Tuple[K, typing.Any])
hints = hints.with_output_types(typehints.Tuple[K, typing.Any])
return hints
class FinishCombine(beam.DoFn):
"""Merges partially combined results.
"""
def __init__(self, combine_fn):
self._combine_fn = combine_fn
def setup(self):
self._combine_fn.setup()
def process(self, element):
k, vs = element
return [(
k,
self._combine_fn.extract_output(
self._combine_fn.merge_accumulators(vs)))]
def teardown(self):
self._combine_fn.teardown()
def default_type_hints(self):
hints = self._combine_fn.get_type_hints()
K = typehints.TypeVariable('K')
hints = hints.with_input_types(typehints.Tuple[K, typing.Any])
if hints.output_types:
main_output_type = hints.simple_output_type('')
hints = hints.with_output_types(typehints.Tuple[K, main_output_type])
return hints | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/direct/helper_transforms.py | 0.779616 | 0.28284 | helper_transforms.py | pypi |
# pytype: skip-file
"""Starts a service for running portable beam pipelines.
The basic usage is simply
python -m apache_beam.runners.portability.local_job_service_main
Many other options are also supported, such as starting in the background or
passing in a lockfile to ensure that only one copy of the service is running
at a time. Pass --help to see them all.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import os
import pathlib
import signal
import subprocess
import sys
import time
from apache_beam.runners.portability import local_job_service
_LOGGER = logging.getLogger(__name__)
def run(argv):
if argv[0] == __file__:
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--port',
'--job_port',
type=int,
default=0,
help='port on which to serve the job api')
parser.add_argument('--staging_dir')
parser.add_argument(
'--pid_file', help='File in which to store the process id of the server.')
parser.add_argument(
'--port_file', help='File in which to store the port of the server.')
parser.add_argument(
'--background',
action='store_true',
help='Start the server up as a background process.'
' Will fail if pid_file already exists, unless --stop is also specified.')
parser.add_argument(
'--stderr_file',
help='Where to write stderr (if not specified, merged with stdout).')
parser.add_argument(
'--stdout_file', help='Where to write stdout for background job service.')
parser.add_argument(
'--stop',
action='store_true',
help='Stop the existing process, if any, specified in pid_file.'
' Will not start up a new service unless --background is specified.')
options = parser.parse_args(argv)
if options.stop:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --stop')
if os.path.exists(options.pid_file):
with open(options.pid_file) as fin:
pid = int(fin.read())
print('Killing process at', pid)
try:
os.kill(pid, signal.SIGTERM)
except Exception:
print('Process', pid, 'already killed.')
os.unlink(options.pid_file)
else:
print('Process id file', options.pid_file, 'already removed.')
if not options.background:
return
if options.background:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --start')
if options.stop:
argv.remove('--stop')
argv.remove('--background')
if not options.port_file:
options.port_file = os.path.splitext(options.pid_file)[0] + '.port'
argv.append('--port_file')
argv.append(options.port_file)
if not options.stdout_file:
raise RuntimeError('--stdout_file must be specified with --background')
os.makedirs(pathlib.PurePath(options.stdout_file).parent, exist_ok=True)
stdout_dest = open(options.stdout_file, mode='w')
if options.stderr_file:
os.makedirs(pathlib.PurePath(options.stderr_file).parent, exist_ok=True)
stderr_dest = open(options.stderr_file, mode='w')
else:
stderr_dest = subprocess.STDOUT
subprocess.Popen([
sys.executable,
'-m',
'apache_beam.runners.portability.local_job_service_main'
] + argv,
stderr=stderr_dest,
stdout=stdout_dest)
print('Waiting for server to start up...')
while not os.path.exists(options.port_file):
time.sleep(.1)
with open(options.port_file) as fin:
port = fin.read()
print('Server started at port', port)
return
if options.pid_file:
print('Writing process id to', options.pid_file)
os.makedirs(pathlib.PurePath(options.pid_file).parent, exist_ok=True)
fd = os.open(options.pid_file, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, 'w') as fout:
fout.write(str(os.getpid()))
try:
job_servicer = local_job_service.LocalJobServicer(options.staging_dir)
port = job_servicer.start_grpc_server(options.port)
try:
if options.port_file:
print('Writing port to', options.port_file)
os.makedirs(pathlib.PurePath(options.port_file).parent, exist_ok=True)
with open(options.port_file + '.tmp', 'w') as fout:
fout.write(str(port))
os.rename(options.port_file + '.tmp', options.port_file)
serve("Listening for beam jobs on port %d." % port, job_servicer)
finally:
job_servicer.stop()
finally:
if options.pid_file and os.path.exists(options.pid_file):
os.unlink(options.pid_file)
if options.port_file and os.path.exists(options.port_file):
os.unlink(options.port_file)
def serve(msg, job_servicer):
logging_delay = 30
while True:
_LOGGER.info(msg)
time.sleep(logging_delay)
logging_delay *= 1.25
if __name__ == '__main__':
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
run(sys.argv) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/portability/local_job_service_main.py | 0.494141 | 0.178436 | local_job_service_main.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import re
import sys
import urllib
from apache_beam.options import pipeline_options
from apache_beam.runners.portability import flink_uber_jar_job_server
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_runner
MAGIC_HOST_NAMES = ['[local]', '[auto]']
_LOGGER = logging.getLogger(__name__)
class FlinkRunner(portable_runner.PortableRunner):
def run_pipeline(self, pipeline, options):
portable_options = options.view_as(pipeline_options.PortableOptions)
flink_options = options.view_as(pipeline_options.FlinkRunnerOptions)
if (flink_options.flink_master in MAGIC_HOST_NAMES and
not portable_options.environment_type and
not portable_options.output_executable_path):
portable_options.environment_type = 'LOOPBACK'
return super(FlinkRunner, self).run_pipeline(pipeline, options)
def default_job_server(self, options):
flink_options = options.view_as(pipeline_options.FlinkRunnerOptions)
flink_master = self.add_http_scheme(flink_options.flink_master)
flink_options.flink_master = flink_master
if (flink_options.flink_submit_uber_jar and
flink_master not in MAGIC_HOST_NAMES):
if sys.version_info < (3, 6):
raise ValueError(
'flink_submit_uber_jar requires Python 3.6+, current version %s' %
sys.version)
# This has to be changed [auto], otherwise we will attempt to submit a
# the pipeline remotely on the Flink JobMaster which will _fail_.
# DO NOT CHANGE the following line, unless you have tested this.
flink_options.flink_master = '[auto]'
return flink_uber_jar_job_server.FlinkUberJarJobServer(
flink_master, options)
else:
return job_server.StopOnExitJobServer(FlinkJarJobServer(options))
def create_job_service_handle(self, job_service, options):
return portable_runner.JobServiceHandle(
job_service,
options,
retain_unknown_options=options.view_as(
pipeline_options.FlinkRunnerOptions).flink_submit_uber_jar)
@staticmethod
def add_http_scheme(flink_master):
"""Adds a http protocol scheme if none provided."""
flink_master = flink_master.strip()
if not flink_master in MAGIC_HOST_NAMES and \
not re.search('^http[s]?://', flink_master):
_LOGGER.info(
'Adding HTTP protocol scheme to flink_master parameter: '
'http://%s',
flink_master)
flink_master = 'http://' + flink_master
return flink_master
class FlinkJarJobServer(job_server.JavaJarJobServer):
def __init__(self, options):
super(FlinkJarJobServer, self).__init__(options)
options = options.view_as(pipeline_options.FlinkRunnerOptions)
self._jar = options.flink_job_server_jar
self._master_url = options.flink_master
self._flink_version = options.flink_version
def path_to_jar(self):
if self._jar:
if not os.path.exists(self._jar):
url = urllib.parse.urlparse(self._jar)
if not url.scheme:
raise ValueError(
'Unable to parse jar URL "%s". If using a full URL, make sure '
'the scheme is specified. If using a local file path, make sure '
'the file exists; you may have to first build the job server '
'using `./gradlew runners:flink:%s:job-server:shadowJar`.' %
(self._jar, self._flink_version))
return self._jar
else:
return self.path_to_beam_jar(
':runners:flink:%s:job-server:shadowJar' % self._flink_version)
def java_arguments(
self, job_port, artifact_port, expansion_port, artifacts_dir):
return [
'--flink-master',
self._master_url,
'--artifacts-dir',
artifacts_dir,
'--job-port',
job_port,
'--artifact-port',
artifact_port,
'--expansion-port',
expansion_port
] | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/portability/flink_runner.py | 0.548674 | 0.161883 | flink_runner.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import itertools
import logging
import os
import tempfile
import time
import urllib
import zipfile
import requests
from apache_beam.options import pipeline_options
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import job_server
from apache_beam.utils.timestamp import Timestamp
_LOGGER = logging.getLogger(__name__)
class SparkUberJarJobServer(abstract_job_service.AbstractJobServiceServicer):
"""A Job server which submits a self-contained Jar to a Spark cluster.
The jar contains the Beam pipeline definition, dependencies, and
the pipeline artifacts.
"""
def __init__(self, rest_url, options):
super(SparkUberJarJobServer, self).__init__()
self._rest_url = rest_url
self._executable_jar = (
options.view_as(
pipeline_options.SparkRunnerOptions).spark_job_server_jar)
self._artifact_port = (
options.view_as(pipeline_options.JobServerOptions).artifact_port)
self._temp_dir = tempfile.mkdtemp(prefix='apache-beam-spark')
def start(self):
return self
def stop(self):
pass
def executable_jar(self):
if self._executable_jar:
if not os.path.exists(self._executable_jar):
parsed = urllib.parse.urlparse(self._executable_jar)
if not parsed.scheme:
raise ValueError(
'Unable to parse jar URL "%s". If using a full URL, make sure '
'the scheme is specified. If using a local file path, make sure '
'the file exists; you may have to first build the job server '
'using `./gradlew runners:spark:job-server:shadowJar`.' %
self._executable_jar)
url = self._executable_jar
else:
url = job_server.JavaJarJobServer.path_to_beam_jar(
':runners:spark:job-server:shadowJar')
return job_server.JavaJarJobServer.local_jar(url)
def create_beam_job(self, job_id, job_name, pipeline, options):
return SparkBeamJob(
self._rest_url,
self.executable_jar(),
job_id,
job_name,
pipeline,
options,
artifact_port=self._artifact_port)
class SparkBeamJob(abstract_job_service.UberJarBeamJob):
"""Runs a single Beam job on Spark by staging all contents into a Jar
and uploading it via the Spark Rest API.
Note that the Spark Rest API is not enabled by default. It must be enabled by
setting the configuration property spark.master.rest.enabled to true."""
def __init__(
self,
rest_url,
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=0):
super(SparkBeamJob, self).__init__(
executable_jar,
job_id,
job_name,
pipeline,
options,
artifact_port=artifact_port)
self._rest_url = rest_url
# Message history is a superset of state history.
self._message_history = self._state_history[:]
def request(self, method, path, expected_status=200, **kwargs):
url = '%s/%s' % (self._rest_url, path)
response = method(url, **kwargs)
if response.status_code != expected_status:
raise RuntimeError(
"Request to %s failed with status %d: %s" %
(url, response.status_code, response.text))
if response.text:
return response.json()
def get(self, path, **kwargs):
return self.request(requests.get, path, **kwargs)
def post(self, path, **kwargs):
return self.request(requests.post, path, **kwargs)
def delete(self, path, **kwargs):
return self.request(requests.delete, path, **kwargs)
def _get_server_spark_version(self):
# Spark REST API doesn't seem to offer a dedicated endpoint for getting the
# version, but it does include the version in all responses, even errors.
return self.get('', expected_status=400)['serverSparkVersion']
def _get_client_spark_version_from_properties(self, jar):
"""Parse Spark version from spark-version-info.properties file in the jar.
https://github.com/apache/spark/blob/dddfeca175bdce5294debe00d4a993daef92ca60/build/spark-build-info#L30
"""
with zipfile.ZipFile(jar, 'a', compression=zipfile.ZIP_DEFLATED) as z:
with z.open('spark-version-info.properties') as fin:
for line in fin.read().decode('utf-8').splitlines():
split = list(map(lambda s: s.strip(), line.split('=')))
if len(split) == 2 and split[0] == 'version' and split[1] != '':
return split[1]
raise ValueError(
'Property "version" not found in spark-version-info.properties.')
def _get_client_spark_version(self, jar):
try:
return self._get_client_spark_version_from_properties(jar)
except Exception as e:
_LOGGER.debug(e)
server_version = self._get_server_spark_version()
_LOGGER.warning(
'Unable to parse Spark version from '
'spark-version-info.properties. Defaulting to %s' % server_version)
return server_version
def _create_submission_request(self, jar, job_name):
jar_url = "file:%s" % jar
return {
"action": "CreateSubmissionRequest",
"appArgs": [],
"appResource": jar_url,
"clientSparkVersion": self._get_client_spark_version(jar),
"environmentVariables": {},
"mainClass": "org.apache.beam.runners.spark.SparkPipelineRunner",
"sparkProperties": {
"spark.jars": jar_url,
"spark.app.name": job_name,
"spark.submit.deployMode": "cluster",
}
}
def run(self):
self._stop_artifact_service()
# Upload the jar and start the job.
self._spark_submission_id = self.post(
'v1/submissions/create',
json=self._create_submission_request(self._jar,
self._job_name))['submissionId']
_LOGGER.info('Submitted Spark job with ID %s' % self._spark_submission_id)
def cancel(self):
self.post('v1/submissions/kill/%s' % self._spark_submission_id)
@staticmethod
def _get_beam_state(spark_response):
return {
'SUBMITTED': beam_job_api_pb2.JobState.STARTING,
'RUNNING': beam_job_api_pb2.JobState.RUNNING,
'FINISHED': beam_job_api_pb2.JobState.DONE,
'RELAUNCHING': beam_job_api_pb2.JobState.RUNNING,
'UNKNOWN': beam_job_api_pb2.JobState.UNSPECIFIED,
'KILLED': beam_job_api_pb2.JobState.CANCELLED,
'FAILED': beam_job_api_pb2.JobState.FAILED,
'ERROR': beam_job_api_pb2.JobState.FAILED,
}.get(spark_response['driverState'], beam_job_api_pb2.JobState.UNSPECIFIED)
def _get_spark_status(self):
return self.get('v1/submissions/status/%s' % self._spark_submission_id)
def get_state(self):
response = self._get_spark_status()
state = self._get_beam_state(response)
timestamp = self.set_state(state)
if timestamp is None:
# State has not changed since last check. Use previous timestamp.
return super(SparkBeamJob, self).get_state()
else:
return state, timestamp
def _with_message_history(self, message_stream):
return itertools.chain(self._message_history[:], message_stream)
def _get_message_iter(self):
"""Returns an iterator of messages from the Spark server.
Note that while message history is de-duped, this function's returned
iterator may contain duplicate values."""
sleep_secs = 1.0
message_ix = 0
while True:
response = self._get_spark_status()
state = self._get_beam_state(response)
timestamp = Timestamp.now()
message = None
if 'message' in response:
importance = (
beam_job_api_pb2.JobMessage.MessageImportance.JOB_MESSAGE_ERROR
if state == beam_job_api_pb2.JobState.FAILED else
beam_job_api_pb2.JobMessage.MessageImportance.JOB_MESSAGE_BASIC)
message = beam_job_api_pb2.JobMessage(
message_id='message%d' % message_ix,
time=str(int(timestamp)),
importance=importance,
message_text=response['message'])
yield message
message_ix += 1
# TODO(BEAM-8983) In the event of a failure, query
# additional info from Spark master and/or workers.
check_timestamp = self.set_state(state)
if check_timestamp is not None:
if message:
self._message_history.append(message)
self._message_history.append((state, check_timestamp))
yield state, timestamp
sleep_secs = min(60, sleep_secs * 1.2)
time.sleep(sleep_secs)
def get_state_stream(self):
for msg in self._with_message_history(self._get_message_iter()):
if isinstance(msg, tuple):
state, timestamp = msg
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
for msg in self._with_message_history(self._get_message_iter()):
yield msg
if isinstance(msg, tuple):
state, _ = msg
if self.is_terminal_state(state):
break | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/portability/spark_uber_jar_job_server.py | 0.705684 | 0.1873 | spark_uber_jar_job_server.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import traceback
from apache_beam import pipeline as beam_pipeline
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_expansion_api_pb2
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability import portable_runner
from apache_beam.transforms import external
from apache_beam.transforms import ptransform
class ExpansionServiceServicer(
beam_expansion_api_pb2_grpc.ExpansionServiceServicer):
def __init__(self, options=None):
self._options = options or beam_pipeline.PipelineOptions(
environment_type=python_urns.EMBEDDED_PYTHON, sdk_location='container')
def Expand(self, request, context=None):
try:
pipeline = beam_pipeline.Pipeline(options=self._options)
def with_pipeline(component, pcoll_id=None):
component.pipeline = pipeline
if pcoll_id:
component.producer, component.tag = producers[pcoll_id]
# We need the lookup to resolve back to this id.
context.pcollections._obj_to_id[component] = pcoll_id
return component
context = pipeline_context.PipelineContext(
request.components,
default_environment=portable_runner.PortableRunner.
_create_environment(self._options),
namespace=request.namespace)
producers = {
pcoll_id: (context.transforms.get_by_id(t_id), pcoll_tag)
for t_id,
t_proto in request.components.transforms.items() for pcoll_tag,
pcoll_id in t_proto.outputs.items()
}
transform = with_pipeline(
ptransform.PTransform.from_runner_api(request.transform, context))
inputs = transform._pvaluish_from_dict({
tag:
with_pipeline(context.pcollections.get_by_id(pcoll_id), pcoll_id)
for tag,
pcoll_id in request.transform.inputs.items()
})
if not inputs:
inputs = pipeline
with external.ExternalTransform.outer_namespace(request.namespace):
result = pipeline.apply(
transform, inputs, request.transform.unique_name)
expanded_transform = pipeline._root_transform().parts[-1]
# TODO(BEAM-1833): Use named outputs internally.
if isinstance(result, dict):
expanded_transform.outputs = result
pipeline_proto = pipeline.to_runner_api(context=context)
# TODO(BEAM-1833): Use named inputs internally.
expanded_transform_id = context.transforms.get_id(expanded_transform)
expanded_transform_proto = pipeline_proto.components.transforms.pop(
expanded_transform_id)
expanded_transform_proto.inputs.clear()
expanded_transform_proto.inputs.update(request.transform.inputs)
for transform_id in pipeline_proto.root_transform_ids:
del pipeline_proto.components.transforms[transform_id]
return beam_expansion_api_pb2.ExpansionResponse(
components=pipeline_proto.components,
transform=expanded_transform_proto,
requirements=pipeline_proto.requirements)
except Exception: # pylint: disable=broad-except
return beam_expansion_api_pb2.ExpansionResponse(
error=traceback.format_exc()) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/runners/portability/expansion_service.py | 0.489015 | 0.180143 | expansion_service.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
from past.builtins import unicode
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.examples.wordcount_with_metrics import WordExtractingDoFn
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
def run(argv=None, save_main_session=True):
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
required=True,
help=(
'Output PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--input_topic',
help=(
'Input PubSub topic of the form '
'"projects/<PROJECT>/topics/<TOPIC>".'))
group.add_argument(
'--input_subscription',
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>."'))
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
pipeline_options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=pipeline_options) as p:
# Read from PubSub into a PCollection.
if known_args.input_subscription:
messages = (
p
| beam.io.ReadFromPubSub(subscription=known_args.input_subscription).
with_output_types(bytes))
else:
messages = (
p
| beam.io.ReadFromPubSub(
topic=known_args.input_topic).with_output_types(bytes))
lines = messages | 'decode' >> beam.Map(lambda x: x.decode('utf-8'))
# Count the occurrences of each word.
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
counts = (
lines
| 'split' >>
(beam.ParDo(WordExtractingDoFn()).with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| beam.WindowInto(window.FixedWindows(15, 0))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %d' % (word, count)
output = (
counts
| 'format' >> beam.Map(format_result)
| 'encode' >>
beam.Map(lambda x: x.encode('utf-8')).with_output_types(bytes))
# Write to PubSub.
# pylint: disable=expression-not-assigned
output | beam.io.WriteToPubSub(known_args.output_topic)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/streaming_wordcount.py | 0.717111 | 0.201754 | streaming_wordcount.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def process(self, element):
"""Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
"""
return re.findall(r'[\w\']+', element, re.UNICODE)
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
# The pipeline will be run on exiting the with block.
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection.
lines = p | 'Read' >> ReadFromText(known_args.input)
counts = (
lines
| 'Split' >>
(beam.ParDo(WordExtractingDoFn()).with_output_types(unicode))
| 'PairWIthOne' >> beam.Map(lambda x: (x, 1))
| 'GroupAndSum' >> beam.CombinePerKey(sum))
# Format the counts into a PCollection of strings.
def format_result(word, count):
return '%s: %d' % (word, count)
output = counts | 'Format' >> beam.MapTuple(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'Write' >> WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/wordcount.py | 0.804943 | 0.250317 | wordcount.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import re
import subprocess
import grpc
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
# avoid possible conflict with job-server embedded expansion service at 8097
EXPANSION_SERVICE_PORT = '8096'
EXPANSION_SERVICE_ADDR = 'localhost:%s' % EXPANSION_SERVICE_PORT
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def process(self, element):
"""Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
"""
text_line = element.strip()
return re.findall(r'[\w\']+', text_line)
def build_pipeline(p, input_file, output_file):
# Read the text file[pattern] into a PCollection.
lines = p | 'read' >> ReadFromText(input_file)
counts = (
lines
| 'split' >>
(beam.ParDo(WordExtractingDoFn()).with_output_types(unicode))
| 'count' >> beam.ExternalTransform(
'beam:transforms:xlang:count', None, EXPANSION_SERVICE_ADDR))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %d' % (word, count)
output = counts | 'format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> WriteToText(output_file)
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
parser.add_argument(
'--expansion_service_jar',
dest='expansion_service_jar',
required=True,
help='Jar file for expansion service')
known_args, pipeline_args = parser.parse_known_args()
pipeline_options = PipelineOptions(pipeline_args)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options.view_as(SetupOptions).save_main_session = True
try:
server = subprocess.Popen([
'java',
'-jar',
known_args.expansion_service_jar,
EXPANSION_SERVICE_PORT
])
with grpc.insecure_channel(EXPANSION_SERVICE_ADDR) as channel:
grpc.channel_ready_future(channel).result()
with beam.Pipeline(options=pipeline_options) as p:
# Preemptively start due to BEAM-6666.
p.runner.create_job_service(pipeline_options)
build_pipeline(p, known_args.input, known_args.output)
finally:
server.kill()
if __name__ == '__main__':
main() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/wordcount_xlang.py | 0.784979 | 0.19619 | wordcount_xlang.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.sql import SqlTransform
def run(output_topic, pipeline_args):
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=True)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| beam.io.ReadFromPubSub(
topic='projects/pubsub-public-data/topics/taxirides-realtime',
timestamp_attribute="ts").with_output_types(bytes)
| "Parse JSON payload" >> beam.Map(json.loads)
# Use beam.Row to create a schema-aware PCollection
| "Create beam Row" >> beam.Map(
lambda x: beam.Row(
ride_status=str(x['ride_status']),
passenger_count=int(x['passenger_count'])))
# SqlTransform will computes result within an existing window
| "15s fixed windows" >> beam.WindowInto(beam.window.FixedWindows(15))
# Aggregate drop offs and pick ups that occur within each 15s window
| SqlTransform(
"""
SELECT
ride_status,
COUNT(*) AS num_rides,
SUM(passenger_count) AS total_passengers
FROM PCOLLECTION
WHERE NOT ride_status = 'enroute'
GROUP BY ride_status""")
# SqlTransform yields python objects with attributes corresponding to
# the outputs of the query.
# Collect those attributes, as well as window information, into a dict
| "Assemble Dictionary" >> beam.Map(
lambda row,
window=beam.DoFn.WindowParam: {
"ride_status": row.ride_status,
"num_rides": row.num_rides,
"total_passengers": row.total_passengers,
"window_start": window.start.to_rfc3339(),
"window_end": window.end.to_rfc3339()
})
| "Convert to JSON" >> beam.Map(json.dumps)
| "UTF-8 encode" >> beam.Map(lambda s: s.encode("utf-8"))
| beam.io.WriteToPubSub(topic=output_topic))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
dest='output_topic',
required=True,
help=(
'Cloud PubSub topic to write to (e.g. '
'projects/my-project/topics/my-topic), must be created prior to '
'running the pipeline.'))
known_args, pipeline_args = parser.parse_known_args()
run(known_args.output_topic, pipeline_args) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/sql_taxi.py | 0.723212 | 0.237278 | sql_taxi.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
# pylint doesn't understand our pipeline syntax:
# pylint:disable=expression-not-assigned
class Count1(beam.PTransform):
"""Count as a subclass of PTransform, with an apply method."""
def expand(self, pcoll):
return (
pcoll
| 'ParWithOne' >> beam.Map(lambda v: (v, 1))
| beam.CombinePerKey(sum))
def run_count1(known_args, options):
"""Runs the first example pipeline."""
logging.info('Running first pipeline')
with beam.Pipeline(options=options) as p:
(
p | beam.io.ReadFromText(known_args.input)
| Count1()
| beam.io.WriteToText(known_args.output))
@beam.ptransform_fn
def Count2(pcoll): # pylint: disable=invalid-name
"""Count as a decorated function."""
return (
pcoll
| 'PairWithOne' >> beam.Map(lambda v: (v, 1))
| beam.CombinePerKey(sum))
def run_count2(known_args, options):
"""Runs the second example pipeline."""
logging.info('Running second pipeline')
with beam.Pipeline(options=options) as p:
(
p | ReadFromText(known_args.input)
| Count2() # pylint: disable=no-value-for-parameter
| WriteToText(known_args.output))
@beam.ptransform_fn
def Count3(pcoll, factor=1): # pylint: disable=invalid-name
"""Count as a decorated function with a side input.
Args:
pcoll: the PCollection passed in from the previous transform
factor: the amount by which to count
Returns:
A PCollection counting the number of times each unique element occurs.
"""
return (
pcoll
| 'PairWithOne' >> beam.Map(lambda v: (v, factor))
| beam.CombinePerKey(sum))
def run_count3(known_args, options):
"""Runs the third example pipeline."""
logging.info('Running third pipeline')
with beam.Pipeline(options=options) as p:
(
p | ReadFromText(known_args.input)
| Count3(2) # pylint: disable=no-value-for-parameter
| WriteToText(known_args.output))
def get_args(argv):
"""Determines user specified arguments from the given list of arguments.
Args:
argv: all arguments.
Returns:
A pair of argument lists containing known and remaining arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='Input file to process.')
parser.add_argument(
'--output', required=True, help='Output file to write results to.')
return parser.parse_known_args(argv)
def run(argv=None):
known_args, pipeline_args = get_args(argv)
options = PipelineOptions(pipeline_args)
run_count1(known_args, options)
run_count2(known_args, options)
run_count3(known_args, options)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/custom_ptransform.py | 0.857306 | 0.403626 | custom_ptransform.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import json
import logging
from builtins import object
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class JsonCoder(object):
"""A JSON coder interpreting each line as a JSON string."""
def encode(self, x):
return json.dumps(x)
def decode(self, x):
return json.loads(x)
def compute_points(record):
"""Compute points based on the record containing the match result.
The function assigns 3 points for a win, 1 point for a draw, and 0 points for
a loss (see http://en.wikipedia.org/wiki/Three_points_for_a_win).
"""
host_name, host_goals = record['host']
guest_name, guest_goals = record['guest']
if host_goals == guest_goals:
yield host_name, 1
yield guest_name, 1
elif host_goals > guest_goals:
yield host_name, 3
yield guest_name, 0
else:
yield host_name, 0
yield guest_name, 3
def run(argv=None):
"""Runs the workflow computing total points from a collection of matches."""
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='Input file to process.')
parser.add_argument(
'--output', required=True, help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
( # pylint: disable=expression-not-assigned
p
| 'read' >> ReadFromText(known_args.input, coder=JsonCoder())
| 'points' >> beam.FlatMap(compute_points)
| beam.CombinePerKey(sum)
| 'write' >> WriteToText(known_args.output, coder=JsonCoder()))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/coders.py | 0.738575 | 0.308906 | coders.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import re
import sys
from typing import Iterable
from typing import Optional
from typing import Text
import uuid
from builtins import object
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io.gcp.datastore.v1new.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1new.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1new.types import Entity
from apache_beam.io.gcp.datastore.v1new.types import Key
from apache_beam.io.gcp.datastore.v1new.types import Query
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
@beam.typehints.with_input_types(Entity)
@beam.typehints.with_output_types(Text)
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def __init__(self):
self.empty_line_counter = Metrics.counter('main', 'empty_lines')
self.word_length_counter = Metrics.counter('main', 'word_lengths')
self.word_counter = Metrics.counter('main', 'total_words')
self.word_lengths_dist = Metrics.distribution('main', 'word_len_dist')
def process(self, element):
# type: (Entity) -> Optional[Iterable[Text]]
"""Extract words from the 'content' property of Cloud Datastore entities.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the input entity to be processed
Returns:
A list of words found.
"""
text_line = element.properties.get('content', '')
if not text_line:
self.empty_line_counter.inc()
return None
words = re.findall(r'[A-Za-z\']+', text_line)
for w in words:
self.word_length_counter.inc(len(w))
self.word_lengths_dist.update(len(w))
self.word_counter.inc()
return words
class EntityWrapper(object):
"""Create a Cloud Datastore entity from the given string."""
def __init__(self, project, namespace, kind, ancestor):
self._project = project
self._namespace = namespace
self._kind = kind
self._ancestor = ancestor
def make_entity(self, content):
ancestor_key = Key([self._kind, self._ancestor],
self._namespace,
self._project)
# Namespace and project are inherited from parent key.
key = Key([self._kind, str(uuid.uuid4())], parent=ancestor_key)
entity = Entity(key)
entity.set_properties({'content': content})
return entity
def write_to_datastore(project, user_options, pipeline_options):
"""Creates a pipeline that writes entities to Cloud Datastore."""
with beam.Pipeline(options=pipeline_options) as p:
_ = (
p
| 'read' >> ReadFromText(user_options.input)
| 'create entity' >> beam.Map(
EntityWrapper(
project,
user_options.namespace,
user_options.kind,
user_options.ancestor).make_entity)
| 'write to datastore' >> WriteToDatastore(project))
def make_ancestor_query(project, kind, namespace, ancestor):
"""Creates a Cloud Datastore ancestor query.
The returned query will fetch all the entities that have the parent key name
set to the given `ancestor`.
"""
ancestor_key = Key([kind, ancestor], project=project, namespace=namespace)
return Query(kind, project, namespace, ancestor_key)
def read_from_datastore(project, user_options, pipeline_options):
"""Creates a pipeline that reads entities from Cloud Datastore."""
p = beam.Pipeline(options=pipeline_options)
# Create a query to read entities from datastore.
query = make_ancestor_query(
project, user_options.kind, user_options.namespace, user_options.ancestor)
# Read entities from Cloud Datastore into a PCollection.
lines = p | 'read from datastore' >> ReadFromDatastore(query)
# Count the occurrences of each word.
def count_ones(word_ones):
(word, ones) = word_ones
return word, sum(ones)
counts = (
lines
| 'split' >> beam.ParDo(WordExtractingDoFn())
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = counts | 'format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> beam.io.WriteToText(
file_path_prefix=user_options.output, num_shards=user_options.num_shards)
result = p.run()
# Wait until completion, main thread would access post-completion job results.
result.wait_until_finish()
return result
def run(argv=None):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument(
'--kind', dest='kind', required=True, help='Datastore Kind')
parser.add_argument(
'--namespace', dest='namespace', help='Datastore Namespace')
parser.add_argument(
'--ancestor',
dest='ancestor',
default='root',
help='The ancestor key name for all entities.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
parser.add_argument(
'--read_only',
action='store_true',
help='Read an existing dataset, do not write first')
parser.add_argument(
'--num_shards',
dest='num_shards',
type=int,
# If the system should choose automatically.
default=0,
help='Number of output shards')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
project = pipeline_options.view_as(GoogleCloudOptions).project
if project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# Write to Datastore if `read_only` options is not specified.
if not known_args.read_only:
write_to_datastore(project, known_args, pipeline_options)
# Read entities from Datastore.
result = read_from_datastore(project, known_args, pipeline_options)
empty_lines_filter = MetricsFilter().with_name('empty_lines')
query_result = result.metrics().query(empty_lines_filter)
if query_result['counters']:
empty_lines_counter = query_result['counters'][0]
logging.info('number of empty lines: %d', empty_lines_counter.committed)
else:
logging.warning('unable to retrieve counter metrics from runner')
word_lengths_filter = MetricsFilter().with_name('word_len_dist')
query_result = result.metrics().query(word_lengths_filter)
if query_result['distributions']:
word_lengths_dist = query_result['distributions'][0]
logging.info('average word length: %d', word_lengths_dist.committed.mean)
else:
logging.warning('unable to retrieve distribution metrics from runner')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/datastore_wordcount.py | 0.778018 | 0.246386 | datastore_wordcount.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import re
from builtins import next
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def run(argv=None, assert_results=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_email',
required=True,
help='Email database, with each line formatted as "name<TAB>email".')
parser.add_argument(
'--input_phone',
required=True,
help='Phonebook, with each line formatted as "name<TAB>phone number".')
parser.add_argument(
'--input_snailmail',
required=True,
help='Address database, with each line formatted as "name<TAB>address".')
parser.add_argument(
'--output_tsv', required=True, help='Tab-delimited output file.')
parser.add_argument(
'--output_stats',
required=True,
help='Output file for statistics about the input.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
# Helper: read a tab-separated key-value mapping from a text file,
# escape all quotes/backslashes, and convert it a PCollection of
# (key, value) pairs.
def read_kv_textfile(label, textfile):
return (
p
| 'Read: %s' % label >> ReadFromText(textfile)
| 'Backslash: %s' % label >>
beam.Map(lambda x: re.sub(r'\\', r'\\\\', x))
| 'EscapeQuotes: %s' % label >>
beam.Map(lambda x: re.sub(r'"', r'\"', x))
| 'Split: %s' % label >> beam.Map(lambda x: re.split(r'\t+', x, 1)))
# Read input databases.
email = read_kv_textfile('email', known_args.input_email)
phone = read_kv_textfile('phone', known_args.input_phone)
snailmail = read_kv_textfile('snailmail', known_args.input_snailmail)
# Group together all entries under the same name.
grouped = (email, phone, snailmail) | 'group_by_name' >> beam.CoGroupByKey()
# Prepare tab-delimited output; something like this:
# "name"<TAB>"email_1,email_2"<TAB>"phone"<TAB>"first_snailmail_only"
def format_as_tsv(name_email_phone_snailmail):
(name, (email, phone, snailmail)) = name_email_phone_snailmail
return '\t'.join([
'"%s"' % name,
'"%s"' % ','.join(email),
'"%s"' % ','.join(phone),
'"%s"' % next(iter(snailmail), '')
])
tsv_lines = grouped | beam.Map(format_as_tsv)
# Compute some stats about our database of people.
def without_email(name_email_phone_snailmail):
(_, (email, _, _)) = name_email_phone_snailmail
return not next(iter(email), None)
def without_phones(name_email_phone_snailmail):
(_, (_, phone, _)) = name_email_phone_snailmail
return not next(iter(phone), None)
def without_address(name_email_phone_snailmail):
(_, (_, _, snailmail)) = name_email_phone_snailmail
return not next(iter(snailmail), None)
luddites = grouped | beam.Filter(without_email) # People without email.
writers = grouped | beam.Filter(without_phones) # People without phones.
nomads = grouped | beam.Filter(without_address) # People without addresses.
num_luddites = luddites | 'Luddites' >> beam.combiners.Count.Globally()
num_writers = writers | 'Writers' >> beam.combiners.Count.Globally()
num_nomads = nomads | 'Nomads' >> beam.combiners.Count.Globally()
# Write tab-delimited output.
# pylint: disable=expression-not-assigned
tsv_lines | 'WriteTsv' >> WriteToText(known_args.output_tsv)
# TODO(silviuc): Move the assert_results logic to the unit test.
if assert_results is not None:
expected_luddites, expected_writers, expected_nomads = assert_results
assert_that(
num_luddites, equal_to([expected_luddites]), label='assert:luddites')
assert_that(
num_writers, equal_to([expected_writers]), label='assert:writers')
assert_that(
num_nomads, equal_to([expected_nomads]), label='assert:nomads')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/mergecontacts.py | 0.572603 | 0.188772 | mergecontacts.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
from builtins import range
from random import randrange
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.pvalue import AsList
from apache_beam.pvalue import AsSingleton
def create_groups(group_ids, corpus, word, ignore_corpus, ignore_word):
"""Generate groups given the input PCollections."""
def attach_corpus_fn(group, corpus, ignore):
selected = None
len_corpus = len(corpus)
while not selected:
c = list(corpus[randrange(0, len_corpus)].values())[0]
if c != ignore:
selected = c
yield (group, selected)
def attach_word_fn(group, words, ignore):
selected = None
len_words = len(words)
while not selected:
c = list(words[randrange(0, len_words)].values())[0]
if c != ignore:
selected = c
yield group + (selected, )
return (
group_ids
| 'attach corpus' >> beam.FlatMap(
attach_corpus_fn, AsList(corpus), AsSingleton(ignore_corpus))
| 'attach word' >> beam.FlatMap(
attach_word_fn, AsList(word), AsSingleton(ignore_word)))
def run(argv=None):
"""Run the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument('--output')
parser.add_argument('--ignore_corpus', default='')
parser.add_argument('--ignore_word', default='')
parser.add_argument('--num_groups')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
group_ids = []
for i in range(0, int(known_args.num_groups)):
group_ids.append('id' + str(i))
query_corpus = 'select UNIQUE(corpus) from publicdata:samples.shakespeare'
query_word = 'select UNIQUE(word) from publicdata:samples.shakespeare'
ignore_corpus = known_args.ignore_corpus
ignore_word = known_args.ignore_word
pcoll_corpus = p | 'read corpus' >> beam.io.ReadFromBigQuery(
query=query_corpus)
pcoll_word = p | 'read_words' >> beam.io.ReadFromBigQuery(query=query_word)
pcoll_ignore_corpus = p | 'create_ignore_corpus' >> beam.Create(
[ignore_corpus])
pcoll_ignore_word = p | 'create_ignore_word' >> beam.Create([ignore_word])
pcoll_group_ids = p | 'create groups' >> beam.Create(group_ids)
pcoll_groups = create_groups(
pcoll_group_ids,
pcoll_corpus,
pcoll_word,
pcoll_ignore_corpus,
pcoll_ignore_word)
# pylint:disable=expression-not-assigned
pcoll_groups | WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/bigquery_side_input.py | 0.666388 | 0.187504 | bigquery_side_input.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import apache_beam as beam
def count_tornadoes(input_data):
"""Workflow computing the number of tornadoes for each month that had one.
Args:
input_data: a PCollection of dictionaries representing table rows. Each
dictionary will have a 'month' and a 'tornado' key as described in the
module comment.
Returns:
A PCollection of dictionaries containing 'month' and 'tornado_count' keys.
Months without tornadoes are skipped.
"""
return (
input_data
| 'months with tornadoes' >> beam.FlatMap(
lambda row: [(int(row['month']), 1)] if row['tornado'] else [])
| 'monthly count' >> beam.CombinePerKey(sum)
| 'format' >>
beam.Map(lambda k_v: {
'month': k_v[0], 'tornado_count': k_v[1]
}))
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
default='clouddataflow-readonly:samples.weather_stations',
help=(
'Input BigQuery table to process specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'))
parser.add_argument(
'--output',
required=True,
help=(
'Output BigQuery table for results specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'))
parser.add_argument(
'--gcs_location',
required=False,
help=('GCS Location to store files to load '
'data into Bigquery'))
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
# Read the table rows into a PCollection.
rows = p | 'read' >> beam.io.ReadFromBigQuery(table=known_args.input)
counts = count_tornadoes(rows)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
counts | 'Write' >> beam.io.WriteToBigQuery(
known_args.output,
schema='month:INTEGER, tornado_count:INTEGER',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
# Run the pipeline (all operations are deferred until run() is called).
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/cookbook/bigquery_tornadoes.py | 0.791055 | 0.457924 | bigquery_tornadoes.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import sys
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.flink.flink_streaming_impulse_source import FlinkStreamingImpulseSource
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.transforms.trigger import AfterProcessingTime
from apache_beam.transforms.trigger import Repeatedly
def split(s):
a = s.split("-")
return a[0], int(a[1])
def count(x):
return x[0], sum(x[1])
def apply_timestamp(element):
import time
yield window.TimestampedValue(element, time.time())
def run(argv=None):
"""Build and run the pipeline."""
args = [
"--runner=PortableRunner", "--job_endpoint=localhost:8099", "--streaming"
]
if argv:
args.extend(argv)
parser = argparse.ArgumentParser()
parser.add_argument(
'--count',
dest='count',
default=0,
help='Number of triggers to generate '
'(0 means emit forever).')
parser.add_argument(
'--interval_ms',
dest='interval_ms',
default=500,
help='Interval between records per parallel '
'Flink subtask.')
known_args, pipeline_args = parser.parse_known_args(args)
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as p:
messages = (
p | FlinkStreamingImpulseSource().set_message_count(
known_args.count).set_interval_ms(known_args.interval_ms))
_ = (
messages | 'decode' >> beam.Map(lambda x: ('', 1))
| 'window' >> beam.WindowInto(
window.GlobalWindows(),
trigger=Repeatedly(AfterProcessingTime(5 * 1000)),
accumulation_mode=AccumulationMode.DISCARDING)
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count)
| 'log' >> beam.Map(lambda x: logging.info("%d" % x[1])))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run(sys.argv[1:]) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/flink/flink_streaming_impulse.py | 0.542136 | 0.286348 | flink_streaming_impulse.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import typing
import apache_beam as beam
from apache_beam.io.kafka import ReadFromKafka
from apache_beam.io.kafka import WriteToKafka
from apache_beam.options.pipeline_options import PipelineOptions
def run(bootstrap_servers, topic, pipeline_args):
# bootstrap_servers = '123.45.67.89:123:9092'
# topic = 'kafka_taxirides_realtime'
# pipeline_args = ['--project', 'my-project',
# '--runner', 'DataflowRunner',
# '--temp_location', 'my-temp-location',
# '--region', 'my-region',
# '--num_workers', 'my-num-workers',
# '--experiments', 'use_runner_v2']
pipeline_options = PipelineOptions(
pipeline_args, save_main_session=True, streaming=True)
window_size = 15 # size of the Window in seconds.
def log_ride(ride_bytes):
# Converting bytes record from Kafka to a dictionary.
import ast
ride = ast.literal_eval(ride_bytes.decode("UTF-8"))
logging.info(
'Found ride at latitude %r and longitude %r with %r '
'passengers',
ride['latitude'],
ride['longitude'],
ride['passenger_count'])
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| beam.io.ReadFromPubSub(
topic='projects/pubsub-public-data/topics/taxirides-realtime').
with_output_types(bytes)
| beam.Map(lambda x: (b'', x)).with_output_types(
typing.Tuple[bytes, bytes]) # Kafka write transforms expects KVs.
| beam.WindowInto(beam.window.FixedWindows(window_size))
| WriteToKafka(
producer_config={'bootstrap.servers': bootstrap_servers},
topic=topic))
_ = (
pipeline
| ReadFromKafka(
consumer_config={'bootstrap.servers': bootstrap_servers},
topics=[topic])
| beam.FlatMap(lambda kv: log_ride(kv[1])))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--bootstrap_servers',
dest='bootstrap_servers',
required=True,
help='Bootstrap servers for the Kafka cluster. Should be accessible by '
'the runner')
parser.add_argument(
'--topic',
dest='topic',
default='kafka_taxirides_realtime',
help='Kafka topic to write to and read from')
known_args, pipeline_args = parser.parse_known_args()
run(known_args.bootstrap_servers, known_args.topic, pipeline_args) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/kafkataxi/kafka_taxi.py | 0.640523 | 0.176778 | kafka_taxi.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def pardo_dofn(test=None):
# [START pardo_dofn]
import apache_beam as beam
class SplitWords(beam.DoFn):
def __init__(self, delimiter=','):
self.delimiter = delimiter
def process(self, text):
for word in text.split(self.delimiter):
yield word
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.ParDo(SplitWords(','))
| beam.Map(print))
# [END pardo_dofn]
if test:
test(plants)
def pardo_dofn_params(test=None):
# pylint: disable=line-too-long
# [START pardo_dofn_params]
import apache_beam as beam
class AnalyzeElement(beam.DoFn):
def process(
self,
elem,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
yield '\n'.join([
'# timestamp',
'type(timestamp) -> ' + repr(type(timestamp)),
'timestamp.micros -> ' + repr(timestamp.micros),
'timestamp.to_rfc3339() -> ' + repr(timestamp.to_rfc3339()),
'timestamp.to_utc_datetime() -> ' + repr(timestamp.to_utc_datetime()),
'',
'# window',
'type(window) -> ' + repr(type(window)),
'window.start -> {} ({})'.format(
window.start, window.start.to_utc_datetime()),
'window.end -> {} ({})'.format(
window.end, window.end.to_utc_datetime()),
'window.max_timestamp() -> {} ({})'.format(
window.max_timestamp(), window.max_timestamp().to_utc_datetime()),
])
with beam.Pipeline() as pipeline:
dofn_params = (
pipeline
| 'Create a single test element' >> beam.Create([':)'])
| 'Add timestamp (Spring equinox 2020)' >>
beam.Map(lambda elem: beam.window.TimestampedValue(elem, 1584675660))
|
'Fixed 30sec windows' >> beam.WindowInto(beam.window.FixedWindows(30))
| 'Analyze element' >> beam.ParDo(AnalyzeElement())
| beam.Map(print))
# [END pardo_dofn_params]
# pylint: enable=line-too-long
if test:
test(dofn_params)
def pardo_dofn_methods(test=None):
# [START pardo_dofn_methods]
import apache_beam as beam
class DoFnMethods(beam.DoFn):
def __init__(self):
print('__init__')
self.window = beam.window.GlobalWindow()
def setup(self):
print('setup')
def start_bundle(self):
print('start_bundle')
def process(self, element, window=beam.DoFn.WindowParam):
self.window = window
yield '* process: ' + element
def finish_bundle(self):
yield beam.utils.windowed_value.WindowedValue(
value='* finish_bundle: 🌱🌳🌍',
timestamp=0,
windows=[self.window],
)
def teardown(self):
print('teardown')
with beam.Pipeline() as pipeline:
results = (
pipeline
| 'Create inputs' >> beam.Create(['🍓', '🥕', '🍆', '🍅', '🥔'])
| 'DoFn methods' >> beam.ParDo(DoFnMethods())
| beam.Map(print))
# [END pardo_dofn_methods]
if test:
return test(results) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/pardo.py | 0.651909 | 0.293556 | pardo.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def map_simple(test=None):
# [START map_simple]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
' 🍓Strawberry \n',
' 🥕Carrot \n',
' 🍆Eggplant \n',
' 🍅Tomato \n',
' 🥔Potato \n',
])
| 'Strip' >> beam.Map(str.strip)
| beam.Map(print))
# [END map_simple]
if test:
test(plants)
def map_function(test=None):
# [START map_function]
import apache_beam as beam
def strip_header_and_newline(text):
return text.strip('# \n')
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(strip_header_and_newline)
| beam.Map(print))
# [END map_function]
if test:
test(plants)
def map_lambda(test=None):
# [START map_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(lambda text: text.strip('# \n'))
| beam.Map(print))
# [END map_lambda]
if test:
test(plants)
def map_multiple_arguments(test=None):
# [START map_multiple_arguments]
import apache_beam as beam
def strip(text, chars=None):
return text.strip(chars)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(strip, chars='# \n')
| beam.Map(print))
# [END map_multiple_arguments]
if test:
test(plants)
def map_tuple(test=None):
# [START map_tuple]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
])
| 'Format' >>
beam.MapTuple(lambda icon, plant: '{}{}'.format(icon, plant))
| beam.Map(print))
# [END map_tuple]
if test:
test(plants)
def map_side_inputs_singleton(test=None):
# [START map_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
chars = pipeline | 'Create chars' >> beam.Create(['# \n'])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(
lambda text,
chars: text.strip(chars),
chars=beam.pvalue.AsSingleton(chars),
)
| beam.Map(print))
# [END map_side_inputs_singleton]
if test:
test(plants)
def map_side_inputs_iter(test=None):
# [START map_side_inputs_iter]
import apache_beam as beam
with beam.Pipeline() as pipeline:
chars = pipeline | 'Create chars' >> beam.Create(['#', ' ', '\n'])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'# 🍓Strawberry\n',
'# 🥕Carrot\n',
'# 🍆Eggplant\n',
'# 🍅Tomato\n',
'# 🥔Potato\n',
])
| 'Strip header' >> beam.Map(
lambda text,
chars: text.strip(''.join(chars)),
chars=beam.pvalue.AsIter(chars),
)
| beam.Map(print))
# [END map_side_inputs_iter]
if test:
test(plants)
def map_side_inputs_dict(test=None):
# [START map_side_inputs_dict]
import apache_beam as beam
def replace_duration(plant, durations):
plant['duration'] = durations[plant['duration']]
return plant
with beam.Pipeline() as pipeline:
durations = pipeline | 'Durations' >> beam.Create([
(0, 'annual'),
(1, 'biennial'),
(2, 'perennial'),
])
plant_details = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 2
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 1
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 2
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 0
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 2
},
])
| 'Replace duration' >> beam.Map(
replace_duration,
durations=beam.pvalue.AsDict(durations),
)
| beam.Map(print))
# [END map_side_inputs_dict]
if test:
test(plant_details) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/map.py | 0.595257 | 0.256035 | map.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def partition_function(test=None):
# pylint: disable=line-too-long, expression-not-assigned
# [START partition_function]
import apache_beam as beam
durations = ['annual', 'biennial', 'perennial']
def by_duration(plant, num_partitions):
return durations.index(plant['duration'])
with beam.Pipeline() as pipeline:
annuals, biennials, perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'},
{'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'},
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'},
{'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'},
])
| 'Partition' >> beam.Partition(by_duration, len(durations))
)
annuals | 'Annuals' >> beam.Map(lambda x: print('annual: {}'.format(x)))
biennials | 'Biennials' >> beam.Map(
lambda x: print('biennial: {}'.format(x)))
perennials | 'Perennials' >> beam.Map(
lambda x: print('perennial: {}'.format(x)))
# [END partition_function]
# pylint: enable=line-too-long, expression-not-assigned
if test:
test(annuals, biennials, perennials)
def partition_lambda(test=None):
# pylint: disable=line-too-long, expression-not-assigned
# [START partition_lambda]
import apache_beam as beam
durations = ['annual', 'biennial', 'perennial']
with beam.Pipeline() as pipeline:
annuals, biennials, perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'},
{'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'},
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'},
{'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'},
])
| 'Partition' >> beam.Partition(
lambda plant, num_partitions: durations.index(plant['duration']),
len(durations),
)
)
annuals | 'Annuals' >> beam.Map(lambda x: print('annual: {}'.format(x)))
biennials | 'Biennials' >> beam.Map(
lambda x: print('biennial: {}'.format(x)))
perennials | 'Perennials' >> beam.Map(
lambda x: print('perennial: {}'.format(x)))
# [END partition_lambda]
# pylint: enable=line-too-long, expression-not-assigned
if test:
test(annuals, biennials, perennials)
def partition_multiple_arguments(test=None):
# pylint: disable=expression-not-assigned
# [START partition_multiple_arguments]
import apache_beam as beam
import json
def split_dataset(plant, num_partitions, ratio):
assert num_partitions == len(ratio)
bucket = sum(map(ord, json.dumps(plant))) % sum(ratio)
total = 0
for i, part in enumerate(ratio):
total += part
if bucket < total:
return i
return len(ratio) - 1
with beam.Pipeline() as pipeline:
train_dataset, test_dataset = (
pipeline
| 'Gardening plants' >> beam.Create([
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'},
{'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'},
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'},
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'},
{'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'},
])
| 'Partition' >> beam.Partition(split_dataset, 2, ratio=[8, 2])
)
train_dataset | 'Train' >> beam.Map(lambda x: print('train: {}'.format(x)))
test_dataset | 'Test' >> beam.Map(lambda x: print('test: {}'.format(x)))
# [END partition_multiple_arguments]
# pylint: enable=expression-not-assigned
if test:
test(train_dataset, test_dataset) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/partition.py | 0.682785 | 0.442697 | partition.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def withtimestamps_event_time(test=None):
# [START withtimestamps_event_time]
import apache_beam as beam
class GetTimestamp(beam.DoFn):
def process(self, plant, timestamp=beam.DoFn.TimestampParam):
yield '{} - {}'.format(timestamp.to_utc_datetime(), plant['name'])
with beam.Pipeline() as pipeline:
plant_timestamps = (
pipeline
| 'Garden plants' >> beam.Create([
{'name': 'Strawberry', 'season': 1585699200}, # April, 2020
{'name': 'Carrot', 'season': 1590969600}, # June, 2020
{'name': 'Artichoke', 'season': 1583020800}, # March, 2020
{'name': 'Tomato', 'season': 1588291200}, # May, 2020
{'name': 'Potato', 'season': 1598918400}, # September, 2020
])
| 'With timestamps' >> beam.Map(
lambda plant: beam.window.TimestampedValue(plant, plant['season']))
| 'Get timestamp' >> beam.ParDo(GetTimestamp())
| beam.Map(print)
)
# [END withtimestamps_event_time]
if test:
test(plant_timestamps)
def withtimestamps_logical_clock(test=None):
# [START withtimestamps_logical_clock]
import apache_beam as beam
class GetTimestamp(beam.DoFn):
def process(self, plant, timestamp=beam.DoFn.TimestampParam):
event_id = int(timestamp.micros / 1e6) # equivalent to seconds
yield '{} - {}'.format(event_id, plant['name'])
with beam.Pipeline() as pipeline:
plant_events = (
pipeline
| 'Garden plants' >> beam.Create([
{'name': 'Strawberry', 'event_id': 1},
{'name': 'Carrot', 'event_id': 4},
{'name': 'Artichoke', 'event_id': 2},
{'name': 'Tomato', 'event_id': 3},
{'name': 'Potato', 'event_id': 5},
])
| 'With timestamps' >> beam.Map(lambda plant: \
beam.window.TimestampedValue(plant, plant['event_id']))
| 'Get timestamp' >> beam.ParDo(GetTimestamp())
| beam.Map(print)
)
# [END withtimestamps_logical_clock]
if test:
test(plant_events)
def withtimestamps_processing_time(test=None):
# [START withtimestamps_processing_time]
import apache_beam as beam
import time
class GetTimestamp(beam.DoFn):
def process(self, plant, timestamp=beam.DoFn.TimestampParam):
yield '{} - {}'.format(timestamp.to_utc_datetime(), plant['name'])
with beam.Pipeline() as pipeline:
plant_processing_times = (
pipeline
| 'Garden plants' >> beam.Create([
{'name': 'Strawberry'},
{'name': 'Carrot'},
{'name': 'Artichoke'},
{'name': 'Tomato'},
{'name': 'Potato'},
])
| 'With timestamps' >> beam.Map(lambda plant: \
beam.window.TimestampedValue(plant, time.time()))
| 'Get timestamp' >> beam.ParDo(GetTimestamp())
| beam.Map(print)
)
# [END withtimestamps_processing_time]
if test:
test(plant_processing_times)
def time_tuple2unix_time():
# [START time_tuple2unix_time]
import time
time_tuple = time.strptime('2020-03-19 20:50:00', '%Y-%m-%d %H:%M:%S')
unix_time = time.mktime(time_tuple)
# [END time_tuple2unix_time]
return unix_time
def datetime2unix_time():
# [START datetime2unix_time]
import time
import datetime
now = datetime.datetime.now()
time_tuple = now.timetuple()
unix_time = time.mktime(time_tuple)
# [END datetime2unix_time]
return unix_time | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/withtimestamps.py | 0.61555 | 0.39636 | withtimestamps.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def filter_function(test=None):
# [START filter_function]
import apache_beam as beam
def is_perennial(plant):
return plant['duration'] == 'perennial'
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(is_perennial)
| beam.Map(print))
# [END filter_function]
if test:
test(perennials)
def filter_lambda(test=None):
# [START filter_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >>
beam.Filter(lambda plant: plant['duration'] == 'perennial')
| beam.Map(print))
# [END filter_lambda]
if test:
test(perennials)
def filter_multiple_arguments(test=None):
# [START filter_multiple_arguments]
import apache_beam as beam
def has_duration(plant, duration):
return plant['duration'] == duration
with beam.Pipeline() as pipeline:
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(has_duration, 'perennial')
| beam.Map(print))
# [END filter_multiple_arguments]
if test:
test(perennials)
def filter_side_inputs_singleton(test=None):
# [START filter_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
perennial = pipeline | 'Perennial' >> beam.Create(['perennial'])
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter perennials' >> beam.Filter(
lambda plant,
duration: plant['duration'] == duration,
duration=beam.pvalue.AsSingleton(perennial),
)
| beam.Map(print))
# [END filter_side_inputs_singleton]
if test:
test(perennials)
def filter_side_inputs_iter(test=None):
# [START filter_side_inputs_iter]
import apache_beam as beam
with beam.Pipeline() as pipeline:
valid_durations = pipeline | 'Valid durations' >> beam.Create([
'annual',
'biennial',
'perennial',
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'PERENNIAL'
},
])
| 'Filter valid plants' >> beam.Filter(
lambda plant,
valid_durations: plant['duration'] in valid_durations,
valid_durations=beam.pvalue.AsIter(valid_durations),
)
| beam.Map(print))
# [END filter_side_inputs_iter]
if test:
test(valid_plants)
def filter_side_inputs_dict(test=None):
# [START filter_side_inputs_dict]
import apache_beam as beam
with beam.Pipeline() as pipeline:
keep_duration = pipeline | 'Duration filters' >> beam.Create([
('annual', False),
('biennial', False),
('perennial', True),
])
perennials = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'
},
])
| 'Filter plants by duration' >> beam.Filter(
lambda plant,
keep_duration: keep_duration[plant['duration']],
keep_duration=beam.pvalue.AsDict(keep_duration),
)
| beam.Map(print))
# [END filter_side_inputs_dict]
if test:
test(perennials) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/filter.py | 0.60743 | 0.300021 | filter.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def flatmap_simple(test=None):
# [START flatmap_simple]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry 🥕Carrot 🍆Eggplant',
'🍅Tomato 🥔Potato',
])
| 'Split words' >> beam.FlatMap(str.split)
| beam.Map(print))
# [END flatmap_simple]
if test:
test(plants)
def flatmap_function(test=None):
# [START flatmap_function]
import apache_beam as beam
def split_words(text):
return text.split(',')
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words)
| beam.Map(print))
# [END flatmap_function]
if test:
test(plants)
def flatmap_lambda(test=None):
# [START flatmap_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(lambda elements: elements)
| beam.Map(print))
# [END flatmap_lambda]
if test:
test(plants)
def flatmap_generator(test=None):
# [START flatmap_generator]
import apache_beam as beam
def generate_elements(elements):
for element in elements:
yield element
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
['🍓Strawberry', '🥕Carrot', '🍆Eggplant'],
['🍅Tomato', '🥔Potato'],
])
| 'Flatten lists' >> beam.FlatMap(generate_elements)
| beam.Map(print))
# [END flatmap_generator]
if test:
test(plants)
def flatmap_multiple_arguments(test=None):
# [START flatmap_multiple_arguments]
import apache_beam as beam
def split_words(text, delimiter=None):
return text.split(delimiter)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(split_words, delimiter=',')
| beam.Map(print))
# [END flatmap_multiple_arguments]
if test:
test(plants)
def flatmap_tuple(test=None):
# [START flatmap_tuple]
import apache_beam as beam
def format_plant(icon, plant):
if icon:
yield '{}{}'.format(icon, plant)
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
('🍓', 'Strawberry'),
('🥕', 'Carrot'),
('🍆', 'Eggplant'),
('🍅', 'Tomato'),
('🥔', 'Potato'),
(None, 'Invalid'),
])
| 'Format' >> beam.FlatMapTuple(format_plant)
| beam.Map(print))
# [END flatmap_tuple]
if test:
test(plants)
def flatmap_side_inputs_singleton(test=None):
# [START flatmap_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
delimiter = pipeline | 'Create delimiter' >> beam.Create([','])
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.FlatMap(
lambda text,
delimiter: text.split(delimiter),
delimiter=beam.pvalue.AsSingleton(delimiter),
)
| beam.Map(print))
# [END flatmap_side_inputs_singleton]
if test:
test(plants)
def flatmap_side_inputs_iter(test=None):
# [START flatmap_side_inputs_iter]
import apache_beam as beam
def normalize_and_validate_durations(plant, valid_durations):
plant['duration'] = plant['duration'].lower()
if plant['duration'] in valid_durations:
yield plant
with beam.Pipeline() as pipeline:
valid_durations = pipeline | 'Valid durations' >> beam.Create([
'annual',
'biennial',
'perennial',
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 'Perennial'
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 'BIENNIAL'
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'
},
{
'icon': '🥔', 'name': 'Potato', 'duration': 'unknown'
},
])
| 'Normalize and validate durations' >> beam.FlatMap(
normalize_and_validate_durations,
valid_durations=beam.pvalue.AsIter(valid_durations),
)
| beam.Map(print))
# [END flatmap_side_inputs_iter]
if test:
test(valid_plants)
def flatmap_side_inputs_dict(test=None):
# [START flatmap_side_inputs_dict]
import apache_beam as beam
def replace_duration_if_valid(plant, durations):
if plant['duration'] in durations:
plant['duration'] = durations[plant['duration']]
yield plant
with beam.Pipeline() as pipeline:
durations = pipeline | 'Durations dict' >> beam.Create([
(0, 'annual'),
(1, 'biennial'),
(2, 'perennial'),
])
valid_plants = (
pipeline
| 'Gardening plants' >> beam.Create([
{
'icon': '🍓', 'name': 'Strawberry', 'duration': 2
},
{
'icon': '🥕', 'name': 'Carrot', 'duration': 1
},
{
'icon': '🍆', 'name': 'Eggplant', 'duration': 2
},
{
'icon': '🍅', 'name': 'Tomato', 'duration': 0
},
{
'icon': '🥔', 'name': 'Potato', 'duration': -1
},
])
| 'Replace duration if valid' >> beam.FlatMap(
replace_duration_if_valid,
durations=beam.pvalue.AsDict(durations),
)
| beam.Map(print))
# [END flatmap_side_inputs_dict]
if test:
test(valid_plants) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/flatmap.py | 0.656328 | 0.388879 | flatmap.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.