idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
232,500
def calc_log_size ( request , calc_id ) : try : response_data = logs . dbcmd ( 'get_log_size' , calc_id ) except dbapi . NotFound : return HttpResponseNotFound ( ) return HttpResponse ( content = json . dumps ( response_data ) , content_type = JSON )
Get the current number of lines in the log
75
9
232,501
def submit_job ( job_ini , username , hazard_job_id = None ) : job_id = logs . init ( 'job' ) oq = engine . job_from_file ( job_ini , job_id , username , hazard_calculation_id = hazard_job_id ) pik = pickle . dumps ( oq , protocol = 0 ) # human readable protocol code = RUNCALC % dict ( job_id = job_id , hazard_job_id = hazard_job_id , pik = pik , username = username ) tmp_py = gettemp ( code , suffix = '.py' ) # print(code, tmp_py) # useful when debugging devnull = subprocess . DEVNULL popen = subprocess . Popen ( [ sys . executable , tmp_py ] , stdin = devnull , stdout = devnull , stderr = devnull ) threading . Thread ( target = popen . wait ) . start ( ) logs . dbcmd ( 'update_job' , job_id , { 'pid' : popen . pid } ) return job_id , popen . pid
Create a job object from the given job . ini file in the job directory and run it in a new process . Returns the job ID and PID .
251
31
232,502
def calc_result ( request , result_id ) : # If the result for the requested ID doesn't exist, OR # the job which it is related too is not complete, # throw back a 404. try : job_id , job_status , job_user , datadir , ds_key = logs . dbcmd ( 'get_result' , result_id ) if not utils . user_has_permission ( request , job_user ) : return HttpResponseForbidden ( ) except dbapi . NotFound : return HttpResponseNotFound ( ) etype = request . GET . get ( 'export_type' ) export_type = etype or DEFAULT_EXPORT_TYPE tmpdir = tempfile . mkdtemp ( ) try : exported = core . export_from_db ( ( ds_key , export_type ) , job_id , datadir , tmpdir ) except DataStoreExportError as exc : # TODO: there should be a better error page return HttpResponse ( content = '%s: %s' % ( exc . __class__ . __name__ , exc ) , content_type = 'text/plain' , status = 500 ) if not exported : # Throw back a 404 if the exact export parameters are not supported return HttpResponseNotFound ( 'Nothing to export for export_type=%s, %s' % ( export_type , ds_key ) ) elif len ( exported ) > 1 : # Building an archive so that there can be a single file download archname = ds_key + '-' + export_type + '.zip' zipfiles ( exported , os . path . join ( tmpdir , archname ) ) exported = os . path . join ( tmpdir , archname ) else : # single file exported = exported [ 0 ] content_type = EXPORT_CONTENT_TYPE_MAP . get ( export_type , DEFAULT_CONTENT_TYPE ) fname = 'output-%s-%s' % ( result_id , os . path . basename ( exported ) ) stream = FileWrapper ( open ( exported , 'rb' ) ) # 'b' is needed on Windows stream . close = lambda : ( FileWrapper . close ( stream ) , shutil . rmtree ( tmpdir ) ) response = FileResponse ( stream , content_type = content_type ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( exported ) ) return response
Download a specific result by result_id .
573
9
232,503
def extract ( request , calc_id , what ) : job = logs . dbcmd ( 'get_job' , int ( calc_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) try : # read the data and save them on a temporary .npz file with datastore . read ( job . ds_calc_dir + '.hdf5' ) as ds : fd , fname = tempfile . mkstemp ( prefix = what . replace ( '/' , '-' ) , suffix = '.npz' ) os . close ( fd ) n = len ( request . path_info ) query_string = unquote_plus ( request . get_full_path ( ) [ n : ] ) aw = _extract ( ds , what + query_string ) a = { } for key , val in vars ( aw ) . items ( ) : key = str ( key ) # can be a numpy.bytes_ if isinstance ( val , str ) : # without this oq extract would fail a [ key ] = numpy . array ( val . encode ( 'utf-8' ) ) elif isinstance ( val , dict ) : # this is hack: we are losing the values a [ key ] = list ( val ) else : a [ key ] = val numpy . savez_compressed ( fname , * * a ) except Exception as exc : tb = '' . join ( traceback . format_tb ( exc . __traceback__ ) ) return HttpResponse ( content = '%s: %s\n%s' % ( exc . __class__ . __name__ , exc , tb ) , content_type = 'text/plain' , status = 500 ) # stream the data back stream = FileWrapper ( open ( fname , 'rb' ) ) stream . close = lambda : ( FileWrapper . close ( stream ) , os . remove ( fname ) ) response = FileResponse ( stream , content_type = 'application/octet-stream' ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( fname ) ) return response
Wrapper over the oq extract command . If setting . LOCKDOWN is true only calculations owned by the current user can be retrieved .
530
28
232,504
def calc_datastore ( request , job_id ) : job = logs . dbcmd ( 'get_job' , int ( job_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) fname = job . ds_calc_dir + '.hdf5' response = FileResponse ( FileWrapper ( open ( fname , 'rb' ) ) , content_type = HDF5 ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( fname ) ) return response
Download a full datastore file .
179
8
232,505
def calc_oqparam ( request , job_id ) : job = logs . dbcmd ( 'get_job' , int ( job_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) with datastore . read ( job . ds_calc_dir + '.hdf5' ) as ds : oq = ds [ 'oqparam' ] return HttpResponse ( content = json . dumps ( vars ( oq ) ) , content_type = JSON )
Return the calculation parameters as a JSON
141
7
232,506
def on_same_fs ( request ) : filename = request . POST [ 'filename' ] checksum_in = request . POST [ 'checksum' ] checksum = 0 try : data = open ( filename , 'rb' ) . read ( 32 ) checksum = zlib . adler32 ( data , checksum ) & 0xffffffff if checksum == int ( checksum_in ) : return HttpResponse ( content = json . dumps ( { 'success' : True } ) , content_type = JSON , status = 200 ) except ( IOError , ValueError ) : pass return HttpResponse ( content = json . dumps ( { 'success' : False } ) , content_type = JSON , status = 200 )
Accept a POST request to check access to a FS available by a client .
157
15
232,507
def classical_damage ( riskinputs , riskmodel , param , monitor ) : result = AccumDict ( accum = AccumDict ( ) ) for ri in riskinputs : for out in riskmodel . gen_outputs ( ri , monitor ) : for l , loss_type in enumerate ( riskmodel . loss_types ) : ordinals = ri . assets [ 'ordinal' ] result [ l , out . rlzi ] += dict ( zip ( ordinals , out [ loss_type ] ) ) return result
Core function for a classical damage computation .
117
8
232,508
def cmp_mat ( a , b ) : c = 0 for x , y in zip ( a . flat , b . flat ) : c = cmp ( abs ( x ) , abs ( y ) ) if c != 0 : return c return c
Sorts two matrices returning a positive or zero value
54
11
232,509
def _get_centroid_time ( self , time_diff ) : source_time = datetime . datetime . combine ( self . date , self . time ) second_diff = floor ( fabs ( time_diff ) ) microsecond_diff = int ( 1000. * ( time_diff - second_diff ) ) if time_diff < 0. : source_time = source_time - datetime . timedelta ( seconds = int ( second_diff ) , microseconds = microsecond_diff ) else : source_time = source_time + datetime . timedelta ( seconds = int ( second_diff ) , microseconds = microsecond_diff ) self . time = source_time . time ( ) self . date = source_time . date ( )
Calculates the time difference between the date - time classes
164
12
232,510
def _to_ned ( self ) : if self . ref_frame is 'USE' : # Rotate return utils . use_to_ned ( self . tensor ) , utils . use_to_ned ( self . tensor_sigma ) elif self . ref_frame is 'NED' : # Alreadt NED return self . tensor , self . tensor_sigma else : raise ValueError ( 'Reference frame %s not recognised - cannot ' 'transform to NED!' % self . ref_frame )
Switches the reference frame to NED
117
8
232,511
def _to_use ( self ) : if self . ref_frame is 'NED' : # Rotate return utils . ned_to_use ( self . tensor ) , utils . ned_to_use ( self . tensor_sigma ) elif self . ref_frame is 'USE' : # Already USE return self . tensor , self . tensor_sigma else : raise ValueError ( 'Reference frame %s not recognised - cannot ' 'transform to USE!' % self . ref_frame )
Returns a tensor in the USE reference frame
115
9
232,512
def get_nodal_planes ( self ) : # Convert reference frame to NED self . tensor , self . tensor_sigma = self . _to_ned ( ) self . ref_frame = 'NED' # Eigenvalue decomposition # Tensor _ , evect = utils . eigendecompose ( self . tensor ) # Rotation matrix _ , rot_vec = utils . eigendecompose ( np . matrix ( [ [ 0. , 0. , - 1 ] , [ 0. , 0. , 0. ] , [ - 1. , 0. , 0. ] ] ) ) rotation_matrix = ( np . matrix ( evect * rot_vec . T ) ) . T if np . linalg . det ( rotation_matrix ) < 0. : rotation_matrix *= - 1. flip_dc = np . matrix ( [ [ 0. , 0. , - 1. ] , [ 0. , - 1. , 0. ] , [ - 1. , 0. , 0. ] ] ) rotation_matrices = sorted ( [ rotation_matrix , flip_dc * rotation_matrix ] , cmp = cmp_mat ) nodal_planes = GCMTNodalPlanes ( ) dip , strike , rake = [ ( 180. / pi ) * angle for angle in utils . matrix_to_euler ( rotation_matrices [ 0 ] ) ] # 1st Nodal Plane nodal_planes . nodal_plane_1 = { 'strike' : strike % 360 , 'dip' : dip , 'rake' : - rake } # 2nd Nodal Plane dip , strike , rake = [ ( 180. / pi ) * angle for angle in utils . matrix_to_euler ( rotation_matrices [ 1 ] ) ] nodal_planes . nodal_plane_2 = { 'strike' : strike % 360. , 'dip' : dip , 'rake' : - rake } return nodal_planes
Returns the nodal planes by eigendecomposition of the moment tensor
444
17
232,513
def get_principal_axes ( self ) : # Perform eigendecomposition - returns in order P, B, T _ = self . eigendecompose ( normalise = True ) principal_axes = GCMTPrincipalAxes ( ) # Eigenvalues principal_axes . p_axis = { 'eigenvalue' : self . eigenvalues [ 0 ] } principal_axes . b_axis = { 'eigenvalue' : self . eigenvalues [ 1 ] } principal_axes . t_axis = { 'eigenvalue' : self . eigenvalues [ 2 ] } # Eigen vectors # 1) P axis azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 0 ] , True ) principal_axes . p_axis [ 'azimuth' ] = azim principal_axes . p_axis [ 'plunge' ] = plun # 2) B axis azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 1 ] , True ) principal_axes . b_axis [ 'azimuth' ] = azim principal_axes . b_axis [ 'plunge' ] = plun # 3) T axis azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 2 ] , True ) principal_axes . t_axis [ 'azimuth' ] = azim principal_axes . t_axis [ 'plunge' ] = plun return principal_axes
Uses the eigendecomposition to extract the principal axes from the moment tensor - returning an instance of the GCMTPrincipalAxes class
363
32
232,514
def select_catalogue_events ( self , id0 ) : for key in self . data . keys ( ) : if isinstance ( self . data [ key ] , np . ndarray ) and len ( self . data [ key ] ) > 0 : # Dictionary element is numpy array - use logical indexing self . data [ key ] = self . data [ key ] [ id0 ] elif isinstance ( self . data [ key ] , list ) and len ( self . data [ key ] ) > 0 : # Dictionary element is list self . data [ key ] = [ self . data [ key ] [ iloc ] for iloc in id0 ] else : continue if len ( self . gcmts ) > 0 : self . gcmts = [ self . gcmts [ iloc ] for iloc in id0 ] self . number_gcmts = self . get_number_tensors ( )
Orders the events in the catalogue according to an indexing vector
198
13
232,515
def _get_edge_set ( self , tol = 0.1 ) : edges = [ ] for surface in self . surfaces : if isinstance ( surface , GriddedSurface ) : return edges . append ( surface . mesh ) elif isinstance ( surface , PlanarSurface ) : # Top edge determined from two end points edge = [ ] for pnt in [ surface . top_left , surface . top_right ] : edge . append ( [ pnt . longitude , pnt . latitude , pnt . depth ] ) edges . append ( numpy . array ( edge ) ) elif isinstance ( surface , ( ComplexFaultSurface , SimpleFaultSurface ) ) : # Rectangular meshes are downsampled to reduce their # overall size edges . append ( downsample_trace ( surface . mesh , tol ) ) else : raise ValueError ( "Surface %s not recognised" % str ( surface ) ) return edges
Retrieve set of top edges from all of the individual surfaces downsampling the upper edge based on the specified tolerance
203
23
232,516
def get_min_distance ( self , mesh ) : dists = [ surf . get_min_distance ( mesh ) for surf in self . surfaces ] return numpy . min ( dists , axis = 0 )
For each point in mesh compute the minimum distance to each surface element and return the smallest value .
46
19
232,517
def get_closest_points ( self , mesh ) : # first, for each point in mesh compute minimum distance to each # surface. The distance matrix is flattend, because mesh can be of # an arbitrary shape. By flattening we obtain a ``distances`` matrix # for which the first dimension represents the different surfaces # and the second dimension the mesh points. dists = numpy . array ( [ surf . get_min_distance ( mesh ) . flatten ( ) for surf in self . surfaces ] ) # find for each point in mesh the index of closest surface idx = dists == numpy . min ( dists , axis = 0 ) # loop again over surfaces. For each surface compute the closest # points, and associate them to the mesh points for which the surface # is the closest. Note that if a surface is not the closest to any of # the mesh points then the calculation is skipped lons = numpy . empty_like ( mesh . lons . flatten ( ) ) lats = numpy . empty_like ( mesh . lats . flatten ( ) ) depths = None if mesh . depths is None else numpy . empty_like ( mesh . depths . flatten ( ) ) for i , surf in enumerate ( self . surfaces ) : if not idx [ i , : ] . any ( ) : continue cps = surf . get_closest_points ( mesh ) lons [ idx [ i , : ] ] = cps . lons . flatten ( ) [ idx [ i , : ] ] lats [ idx [ i , : ] ] = cps . lats . flatten ( ) [ idx [ i , : ] ] if depths is not None : depths [ idx [ i , : ] ] = cps . depths . flatten ( ) [ idx [ i , : ] ] lons = lons . reshape ( mesh . lons . shape ) lats = lats . reshape ( mesh . lats . shape ) if depths is not None : depths = depths . reshape ( mesh . depths . shape ) return Mesh ( lons , lats , depths )
For each point in mesh find the closest surface element and return the corresponding closest point .
459
17
232,518
def get_bounding_box ( self ) : lons = [ ] lats = [ ] for surf in self . surfaces : west , east , north , south = surf . get_bounding_box ( ) lons . extend ( [ west , east ] ) lats . extend ( [ north , south ] ) return utils . get_spherical_bounding_box ( lons , lats )
Compute bounding box for each surface element and then return the bounding box of all surface elements bounding boxes .
88
24
232,519
def _get_areas ( self ) : if self . areas is None : self . areas = [ ] for surf in self . surfaces : self . areas . append ( surf . get_area ( ) ) self . areas = numpy . array ( self . areas ) return self . areas
Return surface elements area values in a numpy array .
61
11
232,520
def _get_cartesian_edge_set ( self ) : # Get projection space for cartesian projection edge_sets = numpy . vstack ( self . edge_set ) west , east , north , south = utils . get_spherical_bounding_box ( edge_sets [ : , 0 ] , edge_sets [ : , 1 ] ) self . proj = utils . OrthographicProjection ( west , east , north , south ) for edges in self . edge_set : # Project edges into cartesian space px , py = self . proj ( edges [ : , 0 ] , edges [ : , 1 ] ) # Store the two end-points of the trace self . cartesian_endpoints . append ( numpy . array ( [ [ px [ 0 ] , py [ 0 ] , edges [ 0 , 2 ] ] , [ px [ - 1 ] , py [ - 1 ] , edges [ - 1 , 2 ] ] ] ) ) self . cartesian_edges . append ( numpy . column_stack ( [ px , py , edges [ : , 2 ] ] ) ) # Get surface length vector for the trace - easier in cartesian lengths = numpy . sqrt ( ( px [ : - 1 ] - px [ 1 : ] ) ** 2. + ( py [ : - 1 ] - py [ 1 : ] ) ** 2. ) self . length_set . append ( lengths ) # Get cumulative surface length vector self . cum_length_set . append ( numpy . hstack ( [ 0. , numpy . cumsum ( lengths ) ] ) ) return edge_sets
For the GC2 calculations a set of cartesian representations of the fault edges are needed . In this present case we use a common cartesian framework for all edges as opposed to defining a separate orthographic projection per edge
352
43
232,521
def _get_gc2_coordinates_for_rupture ( self , edge_sets ) : # Establish GC2 length - for use with Ry0 rup_gc2t , rup_gc2u = self . get_generalised_coordinates ( edge_sets [ : , 0 ] , edge_sets [ : , 1 ] ) # GC2 length should be the largest positive GC2 value of the edges self . gc_length = numpy . max ( rup_gc2u )
Calculates the GC2 coordinates for the nodes of the upper edge of the fault
110
17
232,522
def _get_ut_i ( self , seg , sx , sy ) : p0x , p0y , p1x , p1y = seg [ 0 , 0 ] , seg [ 0 , 1 ] , seg [ 1 , 0 ] , seg [ 1 , 1 ] # Unit vector normal to strike t_i_vec = [ p1y - p0y , - ( p1x - p0x ) , 0.0 ] t_i_hat = t_i_vec / numpy . linalg . norm ( t_i_vec ) # Unit vector along strike u_i_vec = [ p1x - p0x , p1y - p0y , 0.0 ] u_i_hat = u_i_vec / numpy . linalg . norm ( u_i_vec ) # Vectors from P0 to sites rsite = numpy . column_stack ( [ sx - p0x , sy - p0y ] ) return numpy . sum ( u_i_hat [ : - 1 ] * rsite , axis = 1 ) , numpy . sum ( t_i_hat [ : - 1 ] * rsite , axis = 1 )
Returns the U and T coordinate for a specific trace segment
270
11
232,523
def get_rx_distance ( self , mesh ) : # If the GC2 calculations have already been computed (by invoking Ry0 # first) and the mesh is identical then class has GC2 attributes # already pre-calculated if not self . tmp_mesh or ( self . tmp_mesh == mesh ) : self . gc2t , self . gc2u = self . get_generalised_coordinates ( mesh . lons , mesh . lats ) # Update mesh self . tmp_mesh = deepcopy ( mesh ) # Rx coordinate is taken directly from gc2t return self . gc2t
For each point determine the corresponding rx distance using the GC2 configuration .
134
15
232,524
def get_ry0_distance ( self , mesh ) : # If the GC2 calculations have already been computed (by invoking Ry0 # first) and the mesh is identical then class has GC2 attributes # already pre-calculated if not self . tmp_mesh or ( self . tmp_mesh == mesh ) : # If that's not the case, or the mesh is different then # re-compute GC2 configuration self . gc2t , self . gc2u = self . get_generalised_coordinates ( mesh . lons , mesh . lats ) # Update mesh self . tmp_mesh = deepcopy ( mesh ) # Default value ry0 (for sites within fault length) is 0.0 ry0 = numpy . zeros_like ( self . gc2u , dtype = float ) # For sites with negative gc2u (off the initial point of the fault) # take the absolute value of gc2u neg_gc2u = self . gc2u < 0.0 ry0 [ neg_gc2u ] = numpy . fabs ( self . gc2u [ neg_gc2u ] ) # Sites off the end of the fault have values shifted by the # GC2 length of the fault pos_gc2u = self . gc2u >= self . gc_length ry0 [ pos_gc2u ] = self . gc2u [ pos_gc2u ] - self . gc_length return ry0
For each point determine the corresponding Ry0 distance using the GC2 configuration .
329
15
232,525
def from_model_files ( cls , limits , input_model , investigation_time = 1.0 , simple_mesh_spacing = 1.0 , complex_mesh_spacing = 5.0 , mfd_width = 0.1 , area_discretisation = 10.0 ) : converter = SourceConverter ( investigation_time , simple_mesh_spacing , complex_mesh_spacing , mfd_width , area_discretisation ) sources = [ ] for grp in nrml . to_python ( input_model , converter ) : sources . extend ( grp . sources ) return cls ( limits , sources , area_discretisation )
Reads the hazard model from a file
152
8
232,526
def get_rates ( self , mmin , mmax = np . inf ) : nsrcs = self . number_sources ( ) for iloc , source in enumerate ( self . source_model ) : print ( "Source Number %s of %s, Name = %s, Typology = %s" % ( iloc + 1 , nsrcs , source . name , source . __class__ . __name__ ) ) if isinstance ( source , CharacteristicFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , ComplexFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , SimpleFaultSource ) : self . _get_fault_rates ( source , mmin , mmax ) elif isinstance ( source , AreaSource ) : self . _get_area_rates ( source , mmin , mmax ) elif isinstance ( source , PointSource ) : self . _get_point_rates ( source , mmin , mmax ) else : print ( "Source type %s not recognised - skipping!" % source ) continue
Returns the cumulative rates greater than Mmin
261
8
232,527
def _get_point_location ( self , location ) : if ( location . longitude < self . xlim [ 0 ] ) or ( location . longitude > self . xlim [ - 1 ] ) : return None , None xloc = int ( ( ( location . longitude - self . xlim [ 0 ] ) / self . xspc ) + 1E-7 ) if ( location . latitude < self . ylim [ 0 ] ) or ( location . latitude > self . ylim [ - 1 ] ) : return None , None yloc = int ( ( ( location . latitude - self . ylim [ 0 ] ) / self . yspc ) + 1E-7 ) return xloc , yloc
Returns the location in the output grid corresponding to the cell in which the epicentre lays
154
18
232,528
def _get_area_rates ( self , source , mmin , mmax = np . inf ) : points = list ( source ) for point in points : self . _get_point_rates ( point , mmin , mmax )
Adds the rates from the area source by discretising the source to a set of point sources
51
19
232,529
def _get_distance_scaling ( self , C , mag , rhypo ) : return ( C [ "a3" ] * np . log ( rhypo ) ) + ( C [ "a4" ] + C [ "a5" ] * mag ) * rhypo
Returns the distance scalig term
63
6
232,530
def _get_distance_scaling_term ( self , C , rjb , mag ) : # Depth adjusted distance, equation 11 (Page 63) rval = np . sqrt ( rjb ** 2.0 + C [ "c11" ] ** 2.0 ) f_0 , f_1 , f_2 = self . _get_distance_segment_coefficients ( rval ) return ( ( C [ "c4" ] + C [ "c5" ] * mag ) * f_0 + ( C [ "c6" ] + C [ "c7" ] * mag ) * f_1 + ( C [ "c8" ] + C [ "c9" ] * mag ) * f_2 + ( C [ "c10" ] * rval ) )
Returns the distance scaling component of the model Equation 10 Page 63
176
13
232,531
def _get_distance_segment_coefficients ( self , rval ) : # Get distance segment ends nsites = len ( rval ) # Equation 12a f_0 = np . log10 ( self . CONSTS [ "r0" ] / rval ) f_0 [ rval > self . CONSTS [ "r0" ] ] = 0.0 # Equation 12b f_1 = np . log10 ( rval ) f_1 [ rval > self . CONSTS [ "r1" ] ] = np . log10 ( self . CONSTS [ "r1" ] ) # Equation 12c f_2 = np . log10 ( rval / self . CONSTS [ "r2" ] ) f_2 [ rval <= self . CONSTS [ "r2" ] ] = 0.0 return f_0 , f_1 , f_2
Returns the coefficients describing the distance attenuation shape for three different distance bins equations 12a - 12c
201
20
232,532
def collect_files ( dirpath , cond = lambda fullname : True ) : files = [ ] for fname in os . listdir ( dirpath ) : fullname = os . path . join ( dirpath , fname ) if os . path . isdir ( fullname ) : # navigate inside files . extend ( collect_files ( fullname ) ) else : # collect files if cond ( fullname ) : files . append ( fullname ) return files
Recursively collect the files contained inside dirpath .
97
11
232,533
def extract_from_zip ( path , candidates ) : temp_dir = tempfile . mkdtemp ( ) with zipfile . ZipFile ( path ) as archive : archive . extractall ( temp_dir ) return [ f for f in collect_files ( temp_dir ) if os . path . basename ( f ) in candidates ]
Given a zip archive and a function to detect the presence of a given filename unzip the archive into a temporary directory and return the full path of the file . Raise an IOError if the file cannot be found within the archive .
72
46
232,534
def get_params ( job_inis , * * kw ) : input_zip = None if len ( job_inis ) == 1 and job_inis [ 0 ] . endswith ( '.zip' ) : input_zip = job_inis [ 0 ] job_inis = extract_from_zip ( job_inis [ 0 ] , [ 'job_hazard.ini' , 'job_haz.ini' , 'job.ini' , 'job_risk.ini' ] ) not_found = [ ini for ini in job_inis if not os . path . exists ( ini ) ] if not_found : # something was not found raise IOError ( 'File not found: %s' % not_found [ 0 ] ) cp = configparser . ConfigParser ( ) cp . read ( job_inis ) # directory containing the config files we're parsing job_ini = os . path . abspath ( job_inis [ 0 ] ) base_path = decode ( os . path . dirname ( job_ini ) ) params = dict ( base_path = base_path , inputs = { 'job_ini' : job_ini } ) if input_zip : params [ 'inputs' ] [ 'input_zip' ] = os . path . abspath ( input_zip ) for sect in cp . sections ( ) : _update ( params , cp . items ( sect ) , base_path ) _update ( params , kw . items ( ) , base_path ) # override on demand if params [ 'inputs' ] . get ( 'reqv' ) : # using pointsource_distance=0 because of the reqv approximation params [ 'pointsource_distance' ] = '0' return params
Parse one or more INI - style config files .
372
12
232,535
def get_oqparam ( job_ini , pkg = None , calculators = None , hc_id = None , validate = 1 , * * kw ) : # UGLY: this is here to avoid circular imports from openquake . calculators import base OqParam . calculation_mode . validator . choices = tuple ( calculators or base . calculators ) if not isinstance ( job_ini , dict ) : basedir = os . path . dirname ( pkg . __file__ ) if pkg else '' job_ini = get_params ( [ os . path . join ( basedir , job_ini ) ] ) if hc_id : job_ini . update ( hazard_calculation_id = str ( hc_id ) ) job_ini . update ( kw ) oqparam = OqParam ( * * job_ini ) if validate : oqparam . validate ( ) return oqparam
Parse a dictionary of parameters from an INI - style config file .
204
15
232,536
def get_site_model ( oqparam ) : req_site_params = get_gsim_lt ( oqparam ) . req_site_params arrays = [ ] for fname in oqparam . inputs [ 'site_model' ] : if isinstance ( fname , str ) and fname . endswith ( '.csv' ) : sm = read_csv ( fname ) if 'site_id' in sm . dtype . names : raise InvalidFile ( '%s: you passed a sites.csv file instead of ' 'a site_model.csv file!' % fname ) z = numpy . zeros ( len ( sm ) , sorted ( sm . dtype . descr ) ) for name in z . dtype . names : # reorder the fields z [ name ] = sm [ name ] arrays . append ( z ) continue nodes = nrml . read ( fname ) . siteModel params = [ valid . site_param ( node . attrib ) for node in nodes ] missing = req_site_params - set ( params [ 0 ] ) if 'vs30measured' in missing : # use a default of False missing -= { 'vs30measured' } for param in params : param [ 'vs30measured' ] = False if 'backarc' in missing : # use a default of False missing -= { 'backarc' } for param in params : param [ 'backarc' ] = False if missing : raise InvalidFile ( '%s: missing parameter %s' % ( oqparam . inputs [ 'site_model' ] , ', ' . join ( missing ) ) ) # NB: the sorted in sorted(params[0]) is essential, otherwise there is # an heisenbug in scenario/test_case_4 site_model_dt = numpy . dtype ( [ ( p , site . site_param_dt [ p ] ) for p in sorted ( params [ 0 ] ) ] ) sm = numpy . array ( [ tuple ( param [ name ] for name in site_model_dt . names ) for param in params ] , site_model_dt ) arrays . append ( sm ) return numpy . concatenate ( arrays )
Convert the NRML file into an array of site parameters .
476
13
232,537
def get_site_collection ( oqparam ) : mesh = get_mesh ( oqparam ) req_site_params = get_gsim_lt ( oqparam ) . req_site_params if oqparam . inputs . get ( 'site_model' ) : sm = get_site_model ( oqparam ) try : # in the future we could have elevation in the site model depth = sm [ 'depth' ] except ValueError : # this is the normal case depth = None sitecol = site . SiteCollection . from_points ( sm [ 'lon' ] , sm [ 'lat' ] , depth , sm , req_site_params ) if oqparam . region_grid_spacing : logging . info ( 'Reducing the grid sites to the site ' 'parameters within the grid spacing' ) sitecol , params , _ = geo . utils . assoc ( sm , sitecol , oqparam . region_grid_spacing * 1.414 , 'filter' ) sitecol . make_complete ( ) else : params = sm for name in req_site_params : if name in ( 'vs30measured' , 'backarc' ) and name not in params . dtype . names : sitecol . _set ( name , 0 ) # the default else : sitecol . _set ( name , params [ name ] ) elif mesh is None and oqparam . ground_motion_fields : raise InvalidFile ( 'You are missing sites.csv or site_model.csv in %s' % oqparam . inputs [ 'job_ini' ] ) elif mesh is None : # a None sitecol is okay when computing the ruptures only return else : # use the default site params sitecol = site . SiteCollection . from_points ( mesh . lons , mesh . lats , mesh . depths , oqparam , req_site_params ) ss = os . environ . get ( 'OQ_SAMPLE_SITES' ) if ss : # debugging tip to reduce the size of a calculation # OQ_SAMPLE_SITES=.1 oq engine --run job.ini # will run a computation with 10 times less sites sitecol . array = numpy . array ( random_filter ( sitecol . array , float ( ss ) ) ) sitecol . make_complete ( ) return sitecol
Returns a SiteCollection instance by looking at the points and the site model defined by the configuration parameters .
512
20
232,538
def get_rupture ( oqparam ) : rup_model = oqparam . inputs [ 'rupture_model' ] [ rup_node ] = nrml . read ( rup_model ) conv = sourceconverter . RuptureConverter ( oqparam . rupture_mesh_spacing , oqparam . complex_fault_mesh_spacing ) rup = conv . convert_node ( rup_node ) rup . tectonic_region_type = '*' # there is not TRT for scenario ruptures rup . serial = oqparam . random_seed return rup
Read the rupture_model file and by filter the site collection
141
12
232,539
def get_composite_source_model ( oqparam , monitor = None , in_memory = True , srcfilter = SourceFilter ( None , { } ) ) : ucerf = oqparam . calculation_mode . startswith ( 'ucerf' ) source_model_lt = get_source_model_lt ( oqparam , validate = not ucerf ) trts = source_model_lt . tectonic_region_types trts_lower = { trt . lower ( ) for trt in trts } reqv = oqparam . inputs . get ( 'reqv' , { } ) for trt in reqv : # these are lowercase because they come from the job.ini if trt not in trts_lower : raise ValueError ( 'Unknown TRT=%s in %s [reqv]' % ( trt , oqparam . inputs [ 'job_ini' ] ) ) gsim_lt = get_gsim_lt ( oqparam , trts or [ '*' ] ) p = source_model_lt . num_paths * gsim_lt . get_num_paths ( ) if oqparam . number_of_logic_tree_samples : logging . info ( 'Considering {:,d} logic tree paths out of {:,d}' . format ( oqparam . number_of_logic_tree_samples , p ) ) else : # full enumeration if oqparam . is_event_based ( ) and p > oqparam . max_potential_paths : raise ValueError ( 'There are too many potential logic tree paths (%d) ' 'use sampling instead of full enumeration' % p ) logging . info ( 'Potential number of logic tree paths = {:,d}' . format ( p ) ) if source_model_lt . on_each_source : logging . info ( 'There is a logic tree on each source' ) if monitor is None : monitor = performance . Monitor ( ) smodels = [ ] for source_model in get_source_models ( oqparam , gsim_lt , source_model_lt , monitor , in_memory , srcfilter ) : for src_group in source_model . src_groups : src_group . sources = sorted ( src_group , key = getid ) for src in src_group : # there are two cases depending on the flag in_memory: # 1) src is a hazardlib source and has a src_group_id # attribute; in that case the source has to be numbered # 2) src is a Node object, then nothing must be done if isinstance ( src , Node ) : continue smodels . append ( source_model ) csm = source . CompositeSourceModel ( gsim_lt , source_model_lt , smodels , oqparam . optimize_same_id_sources ) for sm in csm . source_models : counter = collections . Counter ( ) for sg in sm . src_groups : for srcid in map ( getid , sg ) : counter [ srcid ] += 1 dupl = [ srcid for srcid in counter if counter [ srcid ] > 1 ] if dupl : raise nrml . DuplicatedID ( 'Found duplicated source IDs in %s: %s' % ( sm , dupl ) ) if not in_memory : return csm if oqparam . is_event_based ( ) : # initialize the rupture serial numbers before splitting/filtering; in # this way the serials are independent from the site collection csm . init_serials ( oqparam . ses_seed ) if oqparam . disagg_by_src : csm = csm . grp_by_src ( ) # one group per source csm . info . gsim_lt . check_imts ( oqparam . imtls ) parallel . Starmap . shutdown ( ) # save memory return csm
Parse the XML and build a complete composite source model in memory .
864
14
232,540
def get_mesh_hcurves ( oqparam ) : imtls = oqparam . imtls lon_lats = set ( ) data = AccumDict ( ) # imt -> list of arrays ncols = len ( imtls ) + 1 # lon_lat + curve_per_imt ... csvfile = oqparam . inputs [ 'hazard_curves' ] for line , row in enumerate ( csv . reader ( csvfile ) , 1 ) : try : if len ( row ) != ncols : raise ValueError ( 'Expected %d columns, found %d' % ncols , len ( row ) ) x , y = row [ 0 ] . split ( ) lon_lat = valid . longitude ( x ) , valid . latitude ( y ) if lon_lat in lon_lats : raise DuplicatedPoint ( lon_lat ) lon_lats . add ( lon_lat ) for i , imt_ in enumerate ( imtls , 1 ) : values = valid . decreasing_probabilities ( row [ i ] ) if len ( values ) != len ( imtls [ imt_ ] ) : raise ValueError ( 'Found %d values, expected %d' % ( len ( values ) , len ( imtls ( [ imt_ ] ) ) ) ) data += { imt_ : [ numpy . array ( values ) ] } except ( ValueError , DuplicatedPoint ) as err : raise err . __class__ ( '%s: file %s, line %d' % ( err , csvfile , line ) ) lons , lats = zip ( * sorted ( lon_lats ) ) mesh = geo . Mesh ( numpy . array ( lons ) , numpy . array ( lats ) ) return mesh , { imt : numpy . array ( lst ) for imt , lst in data . items ( ) }
Read CSV data in the format lon lat v1 - vN w1 - wN ... .
431
21
232,541
def reduce_source_model ( smlt_file , source_ids , remove = True ) : found = 0 to_remove = [ ] for paths in logictree . collect_info ( smlt_file ) . smpaths . values ( ) : for path in paths : logging . info ( 'Reading %s' , path ) root = nrml . read ( path ) model = Node ( 'sourceModel' , root [ 0 ] . attrib ) origmodel = root [ 0 ] if root [ 'xmlns' ] == 'http://openquake.org/xmlns/nrml/0.4' : for src_node in origmodel : if src_node [ 'id' ] in source_ids : model . nodes . append ( src_node ) else : # nrml/0.5 for src_group in origmodel : sg = copy . copy ( src_group ) sg . nodes = [ ] weights = src_group . get ( 'srcs_weights' ) if weights : assert len ( weights ) == len ( src_group . nodes ) else : weights = [ 1 ] * len ( src_group . nodes ) src_group [ 'srcs_weights' ] = reduced_weigths = [ ] for src_node , weight in zip ( src_group , weights ) : if src_node [ 'id' ] in source_ids : found += 1 sg . nodes . append ( src_node ) reduced_weigths . append ( weight ) if sg . nodes : model . nodes . append ( sg ) shutil . copy ( path , path + '.bak' ) if model : with open ( path , 'wb' ) as f : nrml . write ( [ model ] , f , xmlns = root [ 'xmlns' ] ) elif remove : # remove the files completely reduced to_remove . append ( path ) if found : for path in to_remove : os . remove ( path )
Extract sources from the composite source model
426
8
232,542
def get_checksum32 ( oqparam , hazard = False ) : # NB: using adler32 & 0xffffffff is the documented way to get a checksum # which is the same between Python 2 and Python 3 checksum = 0 for fname in get_input_files ( oqparam , hazard ) : checksum = _checksum ( fname , checksum ) if hazard : hazard_params = [ ] for key , val in vars ( oqparam ) . items ( ) : if key in ( 'rupture_mesh_spacing' , 'complex_fault_mesh_spacing' , 'width_of_mfd_bin' , 'area_source_discretization' , 'random_seed' , 'ses_seed' , 'truncation_level' , 'maximum_distance' , 'investigation_time' , 'number_of_logic_tree_samples' , 'imtls' , 'ses_per_logic_tree_path' , 'minimum_magnitude' , 'prefilter_sources' , 'sites' , 'pointsource_distance' , 'filter_distance' ) : hazard_params . append ( '%s = %s' % ( key , val ) ) data = '\n' . join ( hazard_params ) . encode ( 'utf8' ) checksum = zlib . adler32 ( data , checksum ) & 0xffffffff return checksum
Build an unsigned 32 bit integer from the input files of a calculation .
321
14
232,543
def smart_save ( dbpath , archive , calc_id ) : tmpdir = tempfile . mkdtemp ( ) newdb = os . path . join ( tmpdir , os . path . basename ( dbpath ) ) shutil . copy ( dbpath , newdb ) try : with sqlite3 . connect ( newdb ) as conn : conn . execute ( 'DELETE FROM job WHERE status != "complete"' ) if calc_id : conn . execute ( 'DELETE FROM job WHERE id != %d' % calc_id ) except : safeprint ( 'Please check the copy of the db in %s' % newdb ) raise zipfiles ( [ newdb ] , archive , 'a' , safeprint ) shutil . rmtree ( tmpdir )
Make a copy of the db remove the incomplete jobs and add the copy to the archive
169
17
232,544
def dump ( archive , calc_id = 0 , user = None ) : t0 = time . time ( ) assert archive . endswith ( '.zip' ) , archive getfnames = 'select ds_calc_dir || ".hdf5" from job where ?A' param = dict ( status = 'complete' ) if calc_id : param [ 'id' ] = calc_id if user : param [ 'user_name' ] = user fnames = [ f for f , in db ( getfnames , param ) if os . path . exists ( f ) ] zipfiles ( fnames , archive , 'w' , safeprint ) pending_jobs = db ( 'select id, status, description from job ' 'where status="executing"' ) if pending_jobs : safeprint ( 'WARNING: there were calculations executing during the dump,' ' they have been not copied' ) for job_id , status , descr in pending_jobs : safeprint ( '%d %s %s' % ( job_id , status , descr ) ) # this also checks that the copied db is not corrupted smart_save ( db . path , archive , calc_id ) dt = time . time ( ) - t0 safeprint ( 'Archived %d calculations into %s in %d seconds' % ( len ( fnames ) , archive , dt ) )
Dump the openquake database and all the complete calculations into a zip file . In a multiuser installation must be run as administrator .
301
29
232,545
def load_version ( ) : filename = os . path . abspath ( os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , "cpt" , "__init__.py" ) ) with open ( filename , "rt" ) as version_file : conan_init = version_file . read ( ) version = re . search ( "__version__ = '([0-9a-z.-]+)'" , conan_init ) . group ( 1 ) return version
Loads a file content
118
5
232,546
def builds ( self , confs ) : self . _named_builds = { } self . _builds = [ ] for values in confs : if len ( values ) == 2 : self . _builds . append ( BuildConf ( values [ 0 ] , values [ 1 ] , { } , { } , self . reference ) ) elif len ( values ) == 4 : self . _builds . append ( BuildConf ( values [ 0 ] , values [ 1 ] , values [ 2 ] , values [ 3 ] , self . reference ) ) elif len ( values ) != 5 : raise Exception ( "Invalid build configuration, has to be a tuple of " "(settings, options, env_vars, build_requires, reference)" ) else : self . _builds . append ( BuildConf ( * values ) )
For retro compatibility directly assigning builds
176
6
232,547
def patch_default_base_profile ( conan_api , profile_abs_path ) : text = tools . load ( profile_abs_path ) if "include(default)" in text : # User didn't specified a custom profile if Version ( conan_version ) < Version ( "1.12.0" ) : cache = conan_api . _client_cache else : cache = conan_api . _cache default_profile_name = os . path . basename ( cache . default_profile_path ) if not os . path . exists ( cache . default_profile_path ) : conan_api . create_profile ( default_profile_name , detect = True ) if default_profile_name != "default" : # User have a different default profile name # https://github.com/conan-io/conan-package-tools/issues/121 text = text . replace ( "include(default)" , "include(%s)" % default_profile_name ) tools . save ( profile_abs_path , text )
If we have a profile including default but the users default in config is that the default is other we have to change the include
227
25
232,548
def get_user_if_exists ( strategy , details , user = None , * args , * * kwargs ) : if user : return { 'is_new' : False } try : username = details . get ( 'username' ) # Return the user if it exists return { 'is_new' : False , 'user' : User . objects . get ( username = username ) } except User . DoesNotExist : # Fall to the default return value pass # Nothing to return since we don't have a user return { }
Return a User with the given username iff the User exists .
115
13
232,549
def update_email ( strategy , details , user = None , * args , * * kwargs ) : if user : email = details . get ( 'email' ) if email and user . email != email : user . email = email strategy . storage . user . changed ( user )
Update the user s email address using data from provider .
60
11
232,550
def get_user_claims ( self , access_token , claims = None , token_type = 'Bearer' ) : data = self . get_json ( self . USER_INFO_URL , headers = { 'Authorization' : '{token_type} {token}' . format ( token_type = token_type , token = access_token ) } ) if claims : claims_names = set ( claims ) data = { k : v for ( k , v ) in six . iteritems ( data ) if k in claims_names } return data
Returns a dictionary with the values for each claim requested .
122
11
232,551
def _setup_ipc ( self ) : log . debug ( 'Setting up the server IPC puller to receive from the listener' ) self . ctx = zmq . Context ( ) # subscribe to listener self . sub = self . ctx . socket ( zmq . PULL ) self . sub . bind ( LST_IPC_URL ) try : self . sub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . sub . setsockopt ( zmq . RCVHWM , self . opts [ 'hwm' ] ) # device publishers log . debug ( 'Creating the router ICP on the server' ) self . pub = self . ctx . socket ( zmq . ROUTER ) self . pub . bind ( DEV_IPC_URL ) try : self . pub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . pub . setsockopt ( zmq . SNDHWM , self . opts [ 'hwm' ] ) # Pipe to the publishers self . publisher_pub = self . ctx . socket ( zmq . PUB ) self . publisher_pub . connect ( PUB_PX_IPC_URL ) try : self . publisher_pub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . publisher_pub . setsockopt ( zmq . SNDHWM , self . opts [ 'hwm' ] )
Setup the IPC pub and sub . Subscript to the listener IPC and publish to the device specific IPC .
390
24
232,552
def _cleanup_buffer ( self ) : if not self . _buffer : return while True : time . sleep ( 60 ) log . debug ( 'Cleaning up buffer' ) items = self . _buffer . items ( ) # The ``items`` function should also cleanup the buffer log . debug ( 'Collected items' ) log . debug ( list ( items ) )
Periodically cleanup the buffer .
78
7
232,553
def _compile_prefixes ( self ) : self . compiled_prefixes = { } for dev_os , os_config in self . config . items ( ) : if not os_config : continue self . compiled_prefixes [ dev_os ] = [ ] for prefix in os_config . get ( 'prefixes' , [ ] ) : values = prefix . get ( 'values' , { } ) line = prefix . get ( 'line' , '' ) if prefix . get ( '__python_fun__' ) : self . compiled_prefixes [ dev_os ] . append ( { '__python_fun__' : prefix [ '__python_fun__' ] , '__python_mod__' : prefix [ '__python_mod__' ] } ) continue # if python profiler defined for this prefix, # no need to go further, but jump to the next prefix # Add 'pri' and 'message' to the line, and values line = '{{pri}}{}{{message}}' . format ( line ) # PRI https://tools.ietf.org/html/rfc5424#section-6.2.1 values [ 'pri' ] = r'\<(\d+)\>' values [ 'message' ] = '(.*)' # We will now figure out which position each value is in so we can use it with the match statement position = { } for key in values . keys ( ) : position [ line . find ( '{' + key + '}' ) ] = key sorted_position = { } for i , elem in enumerate ( sorted ( position . items ( ) ) ) : sorted_position [ elem [ 1 ] ] = i + 1 # Escape the line, then remove the escape for the curly bracets so they can be used when formatting escaped = re . escape ( line ) . replace ( r'\{' , '{' ) . replace ( r'\}' , '}' ) # Replace a whitespace with \s+ escaped = escaped . replace ( r'\ ' , r'\s+' ) self . compiled_prefixes [ dev_os ] . append ( { 'prefix' : re . compile ( escaped . format ( * * values ) ) , 'prefix_positions' : sorted_position , 'raw_prefix' : escaped . format ( * * values ) , 'values' : values } )
Create a dict of all OS prefixes and their compiled regexs
518
13
232,554
def _identify_prefix ( self , msg , data ) : prefix_id = - 1 for prefix in data : msg_dict = { } prefix_id += 1 match = None if '__python_fun__' in prefix : log . debug ( 'Trying to match using the %s custom python profiler' , prefix [ '__python_mod__' ] ) try : match = prefix [ '__python_fun__' ] ( msg ) except Exception : log . error ( 'Exception while parsing %s with the %s python profiler' , msg , prefix [ '__python_mod__' ] , exc_info = True ) else : log . debug ( 'Matching using YAML-defined profiler:' ) log . debug ( prefix [ 'raw_prefix' ] ) match = prefix [ 'prefix' ] . search ( msg ) if not match : log . debug ( 'Match not found' ) continue if '__python_fun__' in prefix : log . debug ( '%s matched using the custom python profiler %s' , msg , prefix [ '__python_mod__' ] ) msg_dict = match # the output as-is from the custom function else : positions = prefix . get ( 'prefix_positions' , { } ) values = prefix . get ( 'values' ) msg_dict = { } for key in values . keys ( ) : msg_dict [ key ] = match . group ( positions . get ( key ) ) # Remove whitespace from the start or end of the message msg_dict [ '__prefix_id__' ] = prefix_id msg_dict [ 'message' ] = msg_dict [ 'message' ] . strip ( ) # The pri has to be an int as it is retrived using regex '\<(\d+)\>' if 'pri' in msg_dict : msg_dict [ 'facility' ] = int ( int ( msg_dict [ 'pri' ] ) / 8 ) msg_dict [ 'severity' ] = int ( int ( msg_dict [ 'pri' ] ) - ( msg_dict [ 'facility' ] * 8 ) ) return msg_dict
Check the message again each OS prefix and if matched return the message dict
465
14
232,555
def _identify_os ( self , msg ) : ret = [ ] for dev_os , data in self . compiled_prefixes . items ( ) : # TODO Should we prevent attepmting to determine the OS for the blacklisted? # [mircea] I think its good from a logging perspective to know at least that # that the server found the matching and it tells that it won't be processed # further. Later, we could potentially add an option to control this. log . debug ( 'Matching under %s' , dev_os ) msg_dict = self . _identify_prefix ( msg , data ) if msg_dict : log . debug ( 'Adding %s to list of matched OS' , dev_os ) ret . append ( ( dev_os , msg_dict ) ) else : log . debug ( 'No match found for %s' , dev_os ) if not ret : log . debug ( 'Not matched any OS, returning original log' ) msg_dict = { 'message' : msg } ret . append ( ( None , msg_dict ) ) return ret
Using the prefix of the syslog message we are able to identify the operating system and then continue parsing .
236
21
232,556
def _setup_ipc ( self ) : self . ctx = zmq . Context ( ) # subscribe to device IPC log . debug ( 'Creating the dealer IPC for %s' , self . _name ) self . sub = self . ctx . socket ( zmq . DEALER ) if six . PY2 : self . sub . setsockopt ( zmq . IDENTITY , self . _name ) elif six . PY3 : self . sub . setsockopt ( zmq . IDENTITY , bytes ( self . _name , 'utf-8' ) ) try : self . sub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . sub . setsockopt ( zmq . RCVHWM , self . opts [ 'hwm' ] ) # subscribe to the corresponding IPC pipe self . sub . connect ( DEV_IPC_URL ) # publish to the publisher IPC self . pub = self . ctx . socket ( zmq . PUB ) self . pub . connect ( PUB_PX_IPC_URL ) try : self . pub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . pub . setsockopt ( zmq . SNDHWM , self . opts [ 'hwm' ] )
Subscribe to the right topic in the device IPC and publish to the publisher proxy .
339
17
232,557
def _compile_messages ( self ) : self . compiled_messages = [ ] if not self . _config : return for message_dict in self . _config . get ( 'messages' , { } ) : error = message_dict [ 'error' ] tag = message_dict [ 'tag' ] model = message_dict [ 'model' ] match_on = message_dict . get ( 'match_on' , 'tag' ) if '__python_fun__' in message_dict : self . compiled_messages . append ( { 'error' : error , 'tag' : tag , 'match_on' : match_on , 'model' : model , '__python_fun__' : message_dict [ '__python_fun__' ] } ) continue values = message_dict [ 'values' ] line = message_dict [ 'line' ] mapping = message_dict [ 'mapping' ] # We will now figure out which position each value is in so we can use it with the match statement position = { } replace = { } for key in values . keys ( ) : if '|' in key : new_key , replace [ new_key ] = key . replace ( ' ' , '' ) . split ( '|' ) values [ new_key ] = values . pop ( key ) key = new_key position [ line . find ( '{' + key + '}' ) ] = key sorted_position = { } for i , elem in enumerate ( sorted ( position . items ( ) ) ) : sorted_position [ elem [ 1 ] ] = i + 1 # Escape the line, then remove the escape for the curly bracets so they can be used when formatting escaped = re . escape ( line ) . replace ( r'\{' , '{' ) . replace ( r'\}' , '}' ) # Replace a whitespace with \s+ escaped = escaped . replace ( r'\ ' , r'\s+' ) self . compiled_messages . append ( { 'error' : error , 'tag' : tag , 'match_on' : match_on , 'line' : re . compile ( escaped . format ( * * values ) ) , 'positions' : sorted_position , 'values' : values , 'replace' : replace , 'model' : model , 'mapping' : mapping } ) log . debug ( 'Compiled messages:' ) log . debug ( self . compiled_messages )
Create a list of all OS messages and their compiled regexs
541
12
232,558
def _parse ( self , msg_dict ) : error_present = False # log.debug('Matching the message:') # log.debug(msg_dict) for message in self . compiled_messages : # log.debug('Matching using:') # log.debug(message) match_on = message [ 'match_on' ] if match_on not in msg_dict : # log.debug('%s is not a valid key in the partially parsed dict', match_on) continue if message [ 'tag' ] != msg_dict [ match_on ] : continue if '__python_fun__' in message : return { 'model' : message [ 'model' ] , 'error' : message [ 'error' ] , '__python_fun__' : message [ '__python_fun__' ] } error_present = True match = message [ 'line' ] . search ( msg_dict [ 'message' ] ) if not match : continue positions = message . get ( 'positions' , { } ) values = message . get ( 'values' ) ret = { 'model' : message [ 'model' ] , 'mapping' : message [ 'mapping' ] , 'replace' : message [ 'replace' ] , 'error' : message [ 'error' ] } for key in values . keys ( ) : # Check if the value needs to be replaced if key in message [ 'replace' ] : result = napalm_logs . utils . cast ( match . group ( positions . get ( key ) ) , message [ 'replace' ] [ key ] ) else : result = match . group ( positions . get ( key ) ) ret [ key ] = result return ret if error_present is True : log . info ( 'Configured regex did not match for os: %s tag %s' , self . _name , msg_dict . get ( 'tag' , '' ) ) else : log . info ( 'Syslog message not configured for os: %s tag %s' , self . _name , msg_dict . get ( 'tag' , '' ) )
Parse a syslog message and check what OpenConfig object should be generated .
455
16
232,559
def _emit ( self , * * kwargs ) : oc_dict = { } for mapping , result_key in kwargs [ 'mapping' ] [ 'variables' ] . items ( ) : result = kwargs [ result_key ] oc_dict = napalm_logs . utils . setval ( mapping . format ( * * kwargs ) , result , oc_dict ) for mapping , result in kwargs [ 'mapping' ] [ 'static' ] . items ( ) : oc_dict = napalm_logs . utils . setval ( mapping . format ( * * kwargs ) , result , oc_dict ) return oc_dict
Emit an OpenConfig object given a certain combination of fields mappeed in the config to the corresponding hierarchy .
157
23
232,560
def _publish ( self , obj ) : bin_obj = umsgpack . packb ( obj ) self . pub . send ( bin_obj )
Publish the OC object .
33
6
232,561
def _handshake ( self , conn , addr ) : # waiting for the magic request message msg = conn . recv ( len ( MAGIC_REQ ) ) log . debug ( 'Received message %s from %s' , msg , addr ) if msg != MAGIC_REQ : log . warning ( '%s is not a valid REQ message from %s' , msg , addr ) return log . debug ( 'Sending the private key' ) conn . send ( self . __key ) # wait for explicit ACK log . debug ( 'Waiting for the client to confirm' ) msg = conn . recv ( len ( MAGIC_ACK ) ) if msg != MAGIC_ACK : return log . debug ( 'Sending the signature key' ) conn . send ( self . __sgn ) # wait for explicit ACK log . debug ( 'Waiting for the client to confirm' ) msg = conn . recv ( len ( MAGIC_ACK ) ) if msg != MAGIC_ACK : return log . info ( '%s is now authenticated' , addr ) self . keep_alive ( conn )
Ensures that the client receives the AES key .
241
11
232,562
def keep_alive ( self , conn ) : while self . __up : msg = conn . recv ( len ( AUTH_KEEP_ALIVE ) ) if msg != AUTH_KEEP_ALIVE : log . error ( 'Received something other than %s' , AUTH_KEEP_ALIVE ) conn . close ( ) return try : conn . send ( AUTH_KEEP_ALIVE_ACK ) except ( IOError , socket . error ) as err : log . error ( 'Unable to send auth keep alive: %s' , err ) conn . close ( ) return
Maintains auth sessions
127
5
232,563
def verify_cert ( self ) : log . debug ( 'Verifying the %s certificate, keyfile: %s' , self . certificate , self . keyfile ) try : ssl . create_default_context ( ) . load_cert_chain ( self . certificate , keyfile = self . keyfile ) except ssl . SSLError : error_string = 'SSL certificate and key do not match' log . error ( error_string ) raise SSLMismatchException ( error_string ) except IOError : log . error ( 'Unable to open either certificate or key file' ) raise log . debug ( 'Certificate looks good.' )
Checks that the provided cert and key are valid and usable
140
12
232,564
def _create_skt ( self ) : log . debug ( 'Creating the auth socket' ) if ':' in self . auth_address : self . socket = socket . socket ( socket . AF_INET6 , socket . SOCK_STREAM ) else : self . socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : self . socket . bind ( ( self . auth_address , self . auth_port ) ) except socket . error as msg : error_string = 'Unable to bind (auth) to port {} on {}: {}' . format ( self . auth_port , self . auth_address , msg ) log . error ( error_string , exc_info = True ) raise BindException ( error_string )
Create the authentication socket .
168
5
232,565
def start ( self ) : # Start suicide polling thread log . debug ( 'Starting the auth process' ) self . verify_cert ( ) self . _create_skt ( ) log . debug ( 'The auth process can receive at most %d parallel connections' , AUTH_MAX_CONN ) self . socket . listen ( AUTH_MAX_CONN ) thread = threading . Thread ( target = self . _suicide_when_without_parent , args = ( os . getppid ( ) , ) ) thread . start ( ) signal . signal ( signal . SIGTERM , self . _exit_gracefully ) self . __up = True while self . __up : try : ( clientsocket , address ) = self . socket . accept ( ) wrapped_auth_skt = ssl . wrap_socket ( clientsocket , server_side = True , certfile = self . certificate , keyfile = self . keyfile ) except ssl . SSLError : log . exception ( 'SSL error' , exc_info = True ) continue except socket . error as error : if self . __up is False : return else : msg = 'Received auth socket error: {}' . format ( error ) log . error ( msg , exc_info = True ) raise NapalmLogsExit ( msg ) log . info ( '%s connected' , address ) log . debug ( 'Starting the handshake' ) client_thread = threading . Thread ( target = self . _handshake , args = ( wrapped_auth_skt , address ) ) client_thread . start ( )
Listen to auth requests and send the AES key . Each client connection starts a new thread .
339
18
232,566
def stop ( self ) : log . info ( 'Stopping auth process' ) self . __up = False self . socket . close ( )
Stop the auth proc .
30
5
232,567
def start ( self ) : log . debug ( 'Creating the consumer using the bootstrap servers: %s and the group ID: %s' , self . bootstrap_servers , self . group_id ) try : self . consumer = kafka . KafkaConsumer ( bootstrap_servers = self . bootstrap_servers , group_id = self . group_id ) except kafka . errors . NoBrokersAvailable as err : log . error ( err , exc_info = True ) raise ListenerException ( err ) log . debug ( 'Subscribing to the %s topic' , self . topic ) self . consumer . subscribe ( topics = [ self . topic ] )
Startup the kafka consumer .
148
8
232,568
def stop ( self ) : log . info ( 'Stopping te kafka listener class' ) self . consumer . unsubscribe ( ) self . consumer . close ( )
Shutdown kafka consumer .
36
7
232,569
def get_transport ( name ) : try : log . debug ( 'Using %s as transport' , name ) return TRANSPORT_LOOKUP [ name ] except KeyError : msg = 'Transport {} is not available. Are the dependencies installed?' . format ( name ) log . error ( msg , exc_info = True ) raise InvalidTransportException ( msg )
Return the transport class .
80
5
232,570
def start ( self ) : zmq_uri = '{protocol}://{address}:{port}' . format ( protocol = self . protocol , address = self . address , port = self . port ) if self . port else '{protocol}://{address}' . format ( # noqa protocol = self . protocol , address = self . address ) log . debug ( 'ZMQ URI: %s' , zmq_uri ) self . ctx = zmq . Context ( ) if hasattr ( zmq , self . type ) : skt_type = getattr ( zmq , self . type ) else : skt_type = zmq . PULL self . sub = self . ctx . socket ( skt_type ) self . sub . connect ( zmq_uri ) if self . hwm is not None : try : self . sub . setsockopt ( zmq . HWM , self . hwm ) except AttributeError : self . sub . setsockopt ( zmq . RCVHWM , self . hwm ) if self . recvtimeout is not None : log . debug ( 'Setting RCVTIMEO to %d' , self . recvtimeout ) self . sub . setsockopt ( zmq . RCVTIMEO , self . recvtimeout ) if self . keepalive is not None : log . debug ( 'Setting TCP_KEEPALIVE to %d' , self . keepalive ) self . sub . setsockopt ( zmq . TCP_KEEPALIVE , self . keepalive ) if self . keepalive_idle is not None : log . debug ( 'Setting TCP_KEEPALIVE_IDLE to %d' , self . keepalive_idle ) self . sub . setsockopt ( zmq . TCP_KEEPALIVE_IDLE , self . keepalive_idle ) if self . keepalive_interval is not None : log . debug ( 'Setting TCP_KEEPALIVE_INTVL to %d' , self . keepalive_interval ) self . sub . setsockopt ( zmq . TCP_KEEPALIVE_INTVL , self . keepalive_interval )
Startup the zmq consumer .
498
8
232,571
def receive ( self ) : try : msg = self . sub . recv ( ) except zmq . Again as error : log . error ( 'Unable to receive messages: %s' , error , exc_info = True ) raise ListenerException ( error ) log . debug ( '[%s] Received %s' , time . time ( ) , msg ) return msg , ''
Return the message received .
82
5
232,572
def stop ( self ) : log . info ( 'Stopping the zmq listener class' ) self . sub . close ( ) self . ctx . term ( )
Shutdown zmq listener .
36
7
232,573
def start ( self ) : if ':' in self . address : self . skt = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM ) else : self . skt = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) if self . reuse_port : self . skt . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) if hasattr ( socket , 'SO_REUSEPORT' ) : self . skt . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEPORT , 1 ) else : log . error ( 'SO_REUSEPORT not supported' ) try : self . skt . bind ( ( self . address , int ( self . port ) ) ) except socket . error as msg : error_string = 'Unable to bind to port {} on {}: {}' . format ( self . port , self . address , msg ) log . error ( error_string , exc_info = True ) raise BindException ( error_string )
Create the UDP listener socket .
244
6
232,574
def _suicide_when_without_parent ( self , parent_pid ) : while True : time . sleep ( 5 ) try : # Check pid alive os . kill ( parent_pid , 0 ) except OSError : # Forcibly exit # Regular sys.exit raises an exception self . stop ( ) log . warning ( 'The parent is not alive, exiting.' ) os . _exit ( 999 )
Kill this process when the parent died .
88
8
232,575
def _setup_buffer ( self ) : if not self . _buffer_cfg or not isinstance ( self . _buffer_cfg , dict ) : return buffer_name = list ( self . _buffer_cfg . keys ( ) ) [ 0 ] buffer_class = napalm_logs . buffer . get_interface ( buffer_name ) log . debug ( 'Setting up buffer interface "%s"' , buffer_name ) if 'expire_time' not in self . _buffer_cfg [ buffer_name ] : self . _buffer_cfg [ buffer_name ] [ 'expire_time' ] = CONFIG . BUFFER_EXPIRE_TIME self . _buffer = buffer_class ( * * self . _buffer_cfg [ buffer_name ] )
Setup the buffer subsystem .
164
5
232,576
def _setup_metrics ( self ) : path = os . environ . get ( "prometheus_multiproc_dir" ) if not os . path . exists ( self . metrics_dir ) : try : log . info ( "Creating metrics directory" ) os . makedirs ( self . metrics_dir ) except OSError : log . error ( "Failed to create metrics directory!" ) raise ConfigurationException ( "Failed to create metrics directory!" ) path = self . metrics_dir elif path != self . metrics_dir : path = self . metrics_dir os . environ [ 'prometheus_multiproc_dir' ] = path log . info ( "Cleaning metrics collection directory" ) log . debug ( "Metrics directory set to: {}" . format ( path ) ) files = os . listdir ( path ) for f in files : if f . endswith ( ".db" ) : os . remove ( os . path . join ( path , f ) ) log . debug ( "Starting metrics exposition" ) if self . metrics_enabled : registry = CollectorRegistry ( ) multiprocess . MultiProcessCollector ( registry ) start_http_server ( port = self . metrics_port , addr = self . metrics_address , registry = registry )
Start metric exposition
276
3
232,577
def _setup_log ( self ) : logging_level = CONFIG . LOGGING_LEVEL . get ( self . log_level . lower ( ) ) logging . basicConfig ( format = self . log_format , level = logging_level )
Setup the log object .
53
5
232,578
def _whitelist_blacklist ( self , os_name ) : return napalm_logs . ext . check_whitelist_blacklist ( os_name , whitelist = self . device_whitelist , blacklist = self . device_blacklist )
Determines if the OS should be ignored depending on the whitelist - blacklist logic configured by the user .
58
22
232,579
def _extract_yaml_docstring ( stream ) : comment_lines = [ ] lines = stream . read ( ) . splitlines ( ) for line in lines : line_strip = line . strip ( ) if not line_strip : continue if line_strip . startswith ( '#' ) : comment_lines . append ( line_strip . replace ( '#' , '' , 1 ) . strip ( ) ) else : break return ' ' . join ( comment_lines )
Extract the comments at the top of the YAML file from the stream handler . Return the extracted comment as string .
105
25
232,580
def _verify_config_dict ( self , valid , config , dev_os , key_path = None ) : if not key_path : key_path = [ ] for key , value in valid . items ( ) : self . _verify_config_key ( key , value , valid , config , dev_os , key_path )
Verify if the config dict is valid .
75
9
232,581
def _verify_config ( self ) : if not self . config_dict : self . _raise_config_exception ( 'No config found' ) # Check for device conifg, if there isn't anything then just log, do not raise an exception for dev_os , dev_config in self . config_dict . items ( ) : if not dev_config : log . warning ( 'No config found for %s' , dev_os ) continue # Compare the valid opts with the conifg self . _verify_config_dict ( CONFIG . VALID_CONFIG , dev_config , dev_os ) log . debug ( 'Read the config without error' )
Verify that the config is correct
148
7
232,582
def _build_config ( self ) : if not self . config_dict : if not self . config_path : # No custom config path requested # Read the native config files self . config_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , 'config' ) log . info ( 'Reading the configuration from %s' , self . config_path ) self . config_dict = self . _load_config ( self . config_path ) if not self . extension_config_dict and self . extension_config_path and os . path . normpath ( self . extension_config_path ) != os . path . normpath ( self . config_path ) : # same path? # When extension config is not sent as dict # But `extension_config_path` is specified log . info ( 'Reading extension configuration from %s' , self . extension_config_path ) self . extension_config_dict = self . _load_config ( self . extension_config_path ) if self . extension_config_dict : napalm_logs . utils . dictupdate ( self . config_dict , self . extension_config_dict )
Build the config of the napalm syslog parser .
263
11
232,583
def _start_auth_proc ( self ) : log . debug ( 'Computing the signing key hex' ) verify_key = self . __signing_key . verify_key sgn_verify_hex = verify_key . encode ( encoder = nacl . encoding . HexEncoder ) log . debug ( 'Starting the authenticator subprocess' ) auth = NapalmLogsAuthProc ( self . certificate , self . keyfile , self . __priv_key , sgn_verify_hex , self . auth_address , self . auth_port ) proc = Process ( target = auth . start ) proc . start ( ) proc . description = 'Auth process' log . debug ( 'Started auth process as %s with PID %s' , proc . _name , proc . pid ) return proc
Start the authenticator process .
176
6
232,584
def _start_lst_proc ( self , listener_type , listener_opts ) : log . debug ( 'Starting the listener process for %s' , listener_type ) listener = NapalmLogsListenerProc ( self . opts , self . address , self . port , listener_type , listener_opts = listener_opts ) proc = Process ( target = listener . start ) proc . start ( ) proc . description = 'Listener process' log . debug ( 'Started listener process as %s with PID %s' , proc . _name , proc . pid ) return proc
Start the listener process .
128
5
232,585
def _start_srv_proc ( self , started_os_proc ) : log . debug ( 'Starting the server process' ) server = NapalmLogsServerProc ( self . opts , self . config_dict , started_os_proc , buffer = self . _buffer ) proc = Process ( target = server . start ) proc . start ( ) proc . description = 'Server process' log . debug ( 'Started server process as %s with PID %s' , proc . _name , proc . pid ) return proc
Start the server process .
115
5
232,586
def _start_pub_proc ( self , publisher_type , publisher_opts , pub_id ) : log . debug ( 'Starting the publisher process for %s' , publisher_type ) publisher = NapalmLogsPublisherProc ( self . opts , self . publish_address , self . publish_port , publisher_type , self . serializer , self . __priv_key , self . __signing_key , publisher_opts , disable_security = self . disable_security , pub_id = pub_id ) proc = Process ( target = publisher . start ) proc . start ( ) proc . description = 'Publisher process' log . debug ( 'Started publisher process as %s with PID %s' , proc . _name , proc . pid ) return proc
Start the publisher process .
168
5
232,587
def _start_dev_proc ( self , device_os , device_config ) : log . info ( 'Starting the child process for %s' , device_os ) dos = NapalmLogsDeviceProc ( device_os , self . opts , device_config ) os_proc = Process ( target = dos . start ) os_proc . start ( ) os_proc . description = '%s device process' % device_os log . debug ( 'Started process %s for %s, having PID %s' , os_proc . _name , device_os , os_proc . pid ) return os_proc
Start the device worker process .
136
6
232,588
def _check_children ( self ) : while self . up : time . sleep ( 1 ) for process in self . _processes : if process . is_alive ( ) is True : continue log . debug ( '%s is dead. Stopping the napalm-logs engine.' , process . description ) self . stop_engine ( )
Check all of the child processes are still running
74
9
232,589
def _setup_ipc ( self ) : log . debug ( 'Setting up the internal IPC proxy' ) self . ctx = zmq . Context ( ) # Frontend self . sub = self . ctx . socket ( zmq . SUB ) self . sub . bind ( PUB_PX_IPC_URL ) self . sub . setsockopt ( zmq . SUBSCRIBE , b'' ) log . debug ( 'Setting HWM for the proxy frontend: %d' , self . hwm ) try : self . sub . setsockopt ( zmq . HWM , self . hwm ) # zmq 2 except AttributeError : # zmq 3 self . sub . setsockopt ( zmq . SNDHWM , self . hwm ) # Backend self . pub = self . ctx . socket ( zmq . PUB ) self . pub . bind ( PUB_IPC_URL ) log . debug ( 'Setting HWM for the proxy backend: %d' , self . hwm ) try : self . pub . setsockopt ( zmq . HWM , self . hwm ) # zmq 2 except AttributeError : # zmq 3 self . pub . setsockopt ( zmq . SNDHWM , self . hwm )
Setup the IPC PUB and SUB sockets for the proxy .
292
13
232,590
def _setup_ipc ( self ) : self . ctx = zmq . Context ( ) log . debug ( 'Setting up the %s publisher subscriber #%d' , self . _transport_type , self . pub_id ) self . sub = self . ctx . socket ( zmq . SUB ) self . sub . connect ( PUB_IPC_URL ) self . sub . setsockopt ( zmq . SUBSCRIBE , b'' ) try : self . sub . setsockopt ( zmq . HWM , self . opts [ 'hwm' ] ) # zmq 2 except AttributeError : # zmq 3 self . sub . setsockopt ( zmq . RCVHWM , self . opts [ 'hwm' ] )
Subscribe to the pub IPC and publish the messages on the right transport .
176
15
232,591
def _prepare ( self , serialized_obj ) : # generating a nonce nonce = nacl . utils . random ( nacl . secret . SecretBox . NONCE_SIZE ) # encrypting using the nonce encrypted = self . __safe . encrypt ( serialized_obj , nonce ) # sign the message signed = self . __signing_key . sign ( encrypted ) return signed
Prepare the object to be sent over the untrusted channel .
86
14
232,592
def get_listener ( name ) : try : log . debug ( 'Using %s as listener' , name ) return LISTENER_LOOKUP [ name ] except KeyError : msg = 'Listener {} is not available. Are the dependencies installed?' . format ( name ) log . error ( msg , exc_info = True ) raise InvalidListenerException ( msg )
Return the listener class .
78
5
232,593
def _start_keep_alive ( self ) : keep_alive_thread = threading . Thread ( target = self . keep_alive ) keep_alive_thread . daemon = True keep_alive_thread . start ( )
Start the keep alive thread as a daemon
53
8
232,594
def keep_alive ( self ) : self . ssl_skt . settimeout ( defaults . AUTH_KEEP_ALIVE_INTERVAL ) while self . __up : try : log . debug ( 'Sending keep-alive message to the server' ) self . ssl_skt . send ( defaults . AUTH_KEEP_ALIVE ) except socket . error : log . error ( 'Unable to send keep-alive message to the server.' ) log . error ( 'Re-init the SSL socket.' ) self . reconnect ( ) log . debug ( 'Trying to re-send the keep-alive message to the server.' ) self . ssl_skt . send ( defaults . AUTH_KEEP_ALIVE ) msg = self . ssl_skt . recv ( len ( defaults . AUTH_KEEP_ALIVE_ACK ) ) log . debug ( 'Received %s from the keep-alive server' , msg ) if msg != defaults . AUTH_KEEP_ALIVE_ACK : log . error ( 'Received %s instead of %s form the auth keep-alive server' , msg , defaults . AUTH_KEEP_ALIVE_ACK ) log . error ( 'Re-init the SSL socket.' ) self . reconnect ( ) time . sleep ( defaults . AUTH_KEEP_ALIVE_INTERVAL )
Send a keep alive request periodically to make sure that the server is still alive . If not then try to reconnect .
297
23
232,595
def reconnect ( self ) : log . debug ( 'Closing the SSH socket.' ) try : self . ssl_skt . close ( ) except socket . error : log . error ( 'The socket seems to be closed already.' ) log . debug ( 'Re-opening the SSL socket.' ) self . authenticate ( )
Try to reconnect and re - authenticate with the server .
69
12
232,596
def authenticate ( self ) : log . debug ( 'Authenticate to %s:%d, using the certificate %s' , self . address , self . port , self . certificate ) if ':' in self . address : skt_ver = socket . AF_INET6 else : skt_ver = socket . AF_INET skt = socket . socket ( skt_ver , socket . SOCK_STREAM ) self . ssl_skt = ssl . wrap_socket ( skt , ca_certs = self . certificate , cert_reqs = ssl . CERT_REQUIRED ) try : self . ssl_skt . connect ( ( self . address , self . port ) ) self . auth_try_id = 0 except socket . error as err : log . error ( 'Unable to open the SSL socket.' ) self . auth_try_id += 1 if not self . max_try or self . auth_try_id < self . max_try : log . error ( 'Trying to authenticate again in %d seconds' , self . timeout ) time . sleep ( self . timeout ) self . authenticate ( ) log . critical ( 'Giving up, unable to authenticate to %s:%d using the certificate %s' , self . address , self . port , self . certificate ) raise ClientConnectException ( err ) # Explicit INIT self . ssl_skt . write ( defaults . MAGIC_REQ ) # Receive the private key private_key = self . ssl_skt . recv ( defaults . BUFFER_SIZE ) # Send back explicit ACK self . ssl_skt . write ( defaults . MAGIC_ACK ) # Read the hex of the verification key verify_key_hex = self . ssl_skt . recv ( defaults . BUFFER_SIZE ) # Send back explicit ACK self . ssl_skt . write ( defaults . MAGIC_ACK ) self . priv_key = nacl . secret . SecretBox ( private_key ) self . verify_key = nacl . signing . VerifyKey ( verify_key_hex , encoder = nacl . encoding . HexEncoder )
Authenticate the client and return the private and signature keys .
475
12
232,597
def decrypt ( self , binary ) : try : encrypted = self . verify_key . verify ( binary ) except BadSignatureError : log . error ( 'Signature was forged or corrupt' , exc_info = True ) raise BadSignatureException ( 'Signature was forged or corrupt' ) try : packed = self . priv_key . decrypt ( encrypted ) except CryptoError : log . error ( 'Unable to decrypt' , exc_info = True ) raise CryptoException ( 'Unable to decrypt' ) return umsgpack . unpackb ( packed )
Decrypt and unpack the original OpenConfig object serialized using MessagePack . Raise BadSignatureException when the signature was forged or corrupted .
119
29
232,598
def _client_connection ( self , conn , addr ) : log . debug ( 'Established connection with %s:%d' , addr [ 0 ] , addr [ 1 ] ) conn . settimeout ( self . socket_timeout ) try : while self . __up : msg = conn . recv ( self . buffer_size ) if not msg : # log.debug('Received empty message from %s', addr) # disabled ^ as it was too noisy continue log . debug ( '[%s] Received %s from %s. Adding in the queue' , time . time ( ) , msg , addr ) self . buffer . put ( ( msg , '{}:{}' . format ( addr [ 0 ] , addr [ 1 ] ) ) ) except socket . timeout : if not self . __up : return log . debug ( 'Connection %s:%d timed out' , addr [ 1 ] , addr [ 0 ] ) raise ListenerException ( 'Connection %s:%d timed out' % addr ) finally : log . debug ( 'Closing connection with %s' , addr ) conn . close ( )
Handle the connecition with one client .
238
9
232,599
def _serve_clients ( self ) : self . __up = True while self . __up : log . debug ( 'Waiting for a client to connect' ) try : conn , addr = self . skt . accept ( ) log . debug ( 'Received connection from %s:%d' , addr [ 0 ] , addr [ 1 ] ) except socket . error as error : if not self . __up : return msg = 'Received listener socket error: {}' . format ( error ) log . error ( msg , exc_info = True ) raise ListenerException ( msg ) client_thread = threading . Thread ( target = self . _client_connection , args = ( conn , addr , ) ) client_thread . start ( )
Accept cients and serve one separate thread per client .
161
11