idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
44,500
def convert_areaSource ( self , node ) : geom = node . areaGeometry coords = split_coords_2d ( ~ geom . Polygon . exterior . LinearRing . posList ) polygon = geo . Polygon ( [ geo . Point ( * xy ) for xy in coords ] ) msr = valid . SCALEREL [ ~ node . magScaleRel ] ( ) area_discretization = geom . attrib . get ( 'discretization' , self . area_source_discretization ) if area_discretization is None : raise ValueError ( 'The source %r has no `discretization` parameter and the job.' 'ini file has no `area_source_discretization` parameter either' % node [ 'id' ] ) return source . AreaSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = self . convert_mfdist ( node ) , rupture_mesh_spacing = self . rupture_mesh_spacing , magnitude_scaling_relationship = msr , rupture_aspect_ratio = ~ node . ruptAspectRatio , upper_seismogenic_depth = ~ geom . upperSeismoDepth , lower_seismogenic_depth = ~ geom . lowerSeismoDepth , nodal_plane_distribution = self . convert_npdist ( node ) , hypocenter_distribution = self . convert_hpdist ( node ) , polygon = polygon , area_discretization = area_discretization , temporal_occurrence_model = self . get_tom ( node ) )
Convert the given node into an area source object .
44,501
def convert_pointSource ( self , node ) : geom = node . pointGeometry lon_lat = ~ geom . Point . pos msr = valid . SCALEREL [ ~ node . magScaleRel ] ( ) return source . PointSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = self . convert_mfdist ( node ) , rupture_mesh_spacing = self . rupture_mesh_spacing , magnitude_scaling_relationship = msr , rupture_aspect_ratio = ~ node . ruptAspectRatio , upper_seismogenic_depth = ~ geom . upperSeismoDepth , lower_seismogenic_depth = ~ geom . lowerSeismoDepth , location = geo . Point ( * lon_lat ) , nodal_plane_distribution = self . convert_npdist ( node ) , hypocenter_distribution = self . convert_hpdist ( node ) , temporal_occurrence_model = self . get_tom ( node ) )
Convert the given node into a point source object .
44,502
def convert_multiPointSource ( self , node ) : geom = node . multiPointGeometry lons , lats = zip ( * split_coords_2d ( ~ geom . posList ) ) msr = valid . SCALEREL [ ~ node . magScaleRel ] ( ) return source . MultiPointSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = self . convert_mfdist ( node ) , magnitude_scaling_relationship = msr , rupture_aspect_ratio = ~ node . ruptAspectRatio , upper_seismogenic_depth = ~ geom . upperSeismoDepth , lower_seismogenic_depth = ~ geom . lowerSeismoDepth , nodal_plane_distribution = self . convert_npdist ( node ) , hypocenter_distribution = self . convert_hpdist ( node ) , mesh = geo . Mesh ( F32 ( lons ) , F32 ( lats ) ) , temporal_occurrence_model = self . get_tom ( node ) )
Convert the given node into a MultiPointSource object .
44,503
def convert_simpleFaultSource ( self , node ) : geom = node . simpleFaultGeometry msr = valid . SCALEREL [ ~ node . magScaleRel ] ( ) fault_trace = self . geo_line ( geom ) mfd = self . convert_mfdist ( node ) with context ( self . fname , node ) : try : hypo_list = valid . hypo_list ( node . hypoList ) except AttributeError : hypo_list = ( ) try : slip_list = valid . slip_list ( node . slipList ) except AttributeError : slip_list = ( ) simple = source . SimpleFaultSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = mfd , rupture_mesh_spacing = self . rupture_mesh_spacing , magnitude_scaling_relationship = msr , rupture_aspect_ratio = ~ node . ruptAspectRatio , upper_seismogenic_depth = ~ geom . upperSeismoDepth , lower_seismogenic_depth = ~ geom . lowerSeismoDepth , fault_trace = fault_trace , dip = ~ geom . dip , rake = ~ node . rake , temporal_occurrence_model = self . get_tom ( node ) , hypo_list = hypo_list , slip_list = slip_list ) return simple
Convert the given node into a simple fault object .
44,504
def convert_complexFaultSource ( self , node ) : geom = node . complexFaultGeometry edges = self . geo_lines ( geom ) mfd = self . convert_mfdist ( node ) msr = valid . SCALEREL [ ~ node . magScaleRel ] ( ) with context ( self . fname , node ) : cmplx = source . ComplexFaultSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = mfd , rupture_mesh_spacing = self . complex_fault_mesh_spacing , magnitude_scaling_relationship = msr , rupture_aspect_ratio = ~ node . ruptAspectRatio , edges = edges , rake = ~ node . rake , temporal_occurrence_model = self . get_tom ( node ) ) return cmplx
Convert the given node into a complex fault object .
44,505
def convert_characteristicFaultSource ( self , node ) : char = source . CharacteristicFaultSource ( source_id = node [ 'id' ] , name = node [ 'name' ] , tectonic_region_type = node . attrib . get ( 'tectonicRegion' ) , mfd = self . convert_mfdist ( node ) , surface = self . convert_surfaces ( node . surface ) , rake = ~ node . rake , temporal_occurrence_model = self . get_tom ( node ) ) return char
Convert the given node into a characteristic fault object .
44,506
def convert_nonParametricSeismicSource ( self , node ) : trt = node . attrib . get ( 'tectonicRegion' ) rup_pmf_data = [ ] rups_weights = None if 'rup_weights' in node . attrib : tmp = node . attrib . get ( 'rup_weights' ) rups_weights = numpy . array ( [ float ( s ) for s in tmp . split ( ) ] ) for i , rupnode in enumerate ( node ) : probs = pmf . PMF ( valid . pmf ( rupnode [ 'probs_occur' ] ) ) rup = RuptureConverter . convert_node ( self , rupnode ) rup . tectonic_region_type = trt rup . weight = None if rups_weights is None else rups_weights [ i ] rup_pmf_data . append ( ( rup , probs ) ) nps = source . NonParametricSeismicSource ( node [ 'id' ] , node [ 'name' ] , trt , rup_pmf_data ) nps . splittable = 'rup_weights' not in node . attrib return nps
Convert the given node into a non parametric source object .
44,507
def convert_sourceGroup ( self , node ) : trt = node [ 'tectonicRegion' ] srcs_weights = node . attrib . get ( 'srcs_weights' ) grp_attrs = { k : v for k , v in node . attrib . items ( ) if k not in ( 'name' , 'src_interdep' , 'rup_interdep' , 'srcs_weights' ) } sg = SourceGroup ( trt , min_mag = self . minimum_magnitude ) sg . temporal_occurrence_model = self . get_tom ( node ) sg . name = node . attrib . get ( 'name' ) sg . src_interdep = node . attrib . get ( 'src_interdep' , 'indep' ) sg . rup_interdep = node . attrib . get ( 'rup_interdep' , 'indep' ) sg . grp_probability = node . attrib . get ( 'grp_probability' ) sg . cluster = node . attrib . get ( 'cluster' ) == 'true' if sg . cluster : msg = 'A cluster group requires the definition of a temporal' msg += ' occurrence model' assert 'tom' in node . attrib , msg if isinstance ( tom , PoissonTOM ) : assert hasattr ( sg , 'occurrence_rate' ) for src_node in node : if self . source_id and self . source_id != src_node [ 'id' ] : continue src = self . convert_node ( src_node ) for attr , value in grp_attrs . items ( ) : if attr == 'tectonicRegion' : src_trt = src_node . get ( 'tectonicRegion' ) if src_trt and src_trt != trt : with context ( self . fname , src_node ) : raise ValueError ( 'Found %s, expected %s' % ( src_node [ 'tectonicRegion' ] , trt ) ) src . tectonic_region_type = trt elif attr == 'grp_probability' : pass else : setattr ( src , attr , node [ attr ] ) sg . update ( src ) if srcs_weights is not None : if len ( node ) and len ( srcs_weights ) != len ( node ) : raise ValueError ( 'There are %d srcs_weights but %d source(s) in %s' % ( len ( srcs_weights ) , len ( node ) , self . fname ) ) for src , sw in zip ( sg , srcs_weights ) : src . mutex_weight = sw if sg . cluster and not hasattr ( sg , 'temporal_occurrence_model' ) : msg = 'The Source Group is a cluster but does not have a ' msg += 'temporal occurrence model' raise ValueError ( msg ) return sg
Convert the given node into a SourceGroup object .
44,508
def _check_list_weights ( parameter , name ) : if not isinstance ( parameter , list ) : raise ValueError ( '%s must be formatted with a list of tuples' % name ) weight = np . sum ( [ val [ 1 ] for val in parameter ] ) if fabs ( weight - 1. ) > 1E-8 : raise ValueError ( '%s weights do not sum to 1.0!' % name ) return parameter
Checks that the weights in a list of tuples sums to 1 . 0
44,509
def build_fault_model ( self , collapse = False , rendered_msr = WC1994 ( ) , mfd_config = None ) : self . source_model = mtkSourceModel ( self . id , self . name ) for fault in self . faults : fault . generate_recurrence_models ( collapse , config = mfd_config , rendered_msr = rendered_msr ) src_model , src_weight = fault . generate_fault_source_model ( ) for iloc , model in enumerate ( src_model ) : new_model = deepcopy ( model ) new_model . id = str ( model . id ) + '_%g' % ( iloc + 1 ) new_model . mfd . occurrence_rates = ( np . array ( new_model . mfd . occurrence_rates ) * src_weight [ iloc ] ) . tolist ( ) self . source_model . sources . append ( new_model )
Constructs a full fault model with epistemic uncertainty by enumerating all the possible recurrence models of each fault as separate faults with the recurrence rates multiplied by the corresponding weights .
44,510
def read_file ( self , start_year = None , end_year = None , use_centroid = None ) : raw_data = getlines ( self . filename ) num_lines = len ( raw_data ) if ( ( float ( num_lines ) / 5. ) - float ( num_lines / 5 ) ) > 1E-9 : raise IOError ( 'GCMT represented by 5 lines - number in file not' ' a multiple of 5!' ) self . catalogue . number_gcmts = num_lines // 5 self . catalogue . gcmts = [ None ] * self . catalogue . number_gcmts id0 = 0 print ( 'Parsing catalogue ...' ) for iloc in range ( 0 , self . catalogue . number_gcmts ) : self . catalogue . gcmts [ iloc ] = self . read_ndk_event ( raw_data , id0 ) id0 += 5 print ( 'complete. Contains %s moment tensors' % self . catalogue . get_number_tensors ( ) ) if not start_year : min_years = [ ] min_years = [ cent . centroid . date . year for cent in self . catalogue . gcmts ] self . catalogue . start_year = np . min ( min_years ) if not end_year : max_years = [ ] max_years = [ cent . centroid . date . year for cent in self . catalogue . gcmts ] self . catalogue . end_year = np . max ( max_years ) self . to_hmtk ( use_centroid ) return self . catalogue
Reads the file
44,511
def read_ndk_event ( self , raw_data , id0 ) : gcmt = GCMTEvent ( ) ndkstring = raw_data [ id0 ] . rstrip ( '\n' ) gcmt . hypocentre = self . _read_hypocentre_from_ndk_string ( ndkstring ) ndkstring = raw_data [ id0 + 1 ] . rstrip ( '\n' ) gcmt = self . _get_metadata_from_ndk_string ( gcmt , ndkstring ) ndkstring = raw_data [ id0 + 2 ] . rstrip ( '\n' ) gcmt . centroid = self . _read_centroid_from_ndk_string ( ndkstring , gcmt . hypocentre ) ndkstring = raw_data [ id0 + 3 ] . rstrip ( '\n' ) gcmt . moment_tensor = self . _get_moment_tensor_from_ndk_string ( ndkstring ) ndkstring = raw_data [ id0 + 4 ] . rstrip ( '\n' ) gcmt . principal_axes = self . _get_principal_axes_from_ndk_string ( ndkstring [ 3 : 48 ] , exponent = gcmt . moment_tensor . exponent ) gcmt . nodal_planes = self . _get_nodal_planes_from_ndk_string ( ndkstring [ 57 : ] ) gcmt . moment , gcmt . version , gcmt . magnitude = self . _get_moment_from_ndk_string ( ndkstring , gcmt . moment_tensor . exponent ) return gcmt
Reads a 5 - line batch of data into a set of GCMTs
44,512
def _read_hypocentre_from_ndk_string ( self , linestring ) : hypo = GCMTHypocentre ( ) hypo . source = linestring [ 0 : 4 ] hypo . date = _read_date_from_string ( linestring [ 5 : 15 ] ) hypo . time = _read_time_from_string ( linestring [ 16 : 26 ] ) hypo . latitude = float ( linestring [ 27 : 33 ] ) hypo . longitude = float ( linestring [ 34 : 41 ] ) hypo . depth = float ( linestring [ 42 : 47 ] ) magnitudes = [ float ( x ) for x in linestring [ 48 : 55 ] . split ( ' ' ) ] if magnitudes [ 0 ] > 0. : hypo . m_b = magnitudes [ 0 ] if magnitudes [ 1 ] > 0. : hypo . m_s = magnitudes [ 1 ] hypo . location = linestring [ 56 : ] return hypo
Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class
44,513
def _get_metadata_from_ndk_string ( self , gcmt , ndk_string ) : gcmt . identifier = ndk_string [ : 16 ] inversion_data = re . split ( '[A-Z:]+' , ndk_string [ 17 : 61 ] ) gcmt . metadata [ 'BODY' ] = [ float ( x ) for x in inversion_data [ 1 ] . split ( ) ] gcmt . metadata [ 'SURFACE' ] = [ float ( x ) for x in inversion_data [ 2 ] . split ( ) ] gcmt . metadata [ 'MANTLE' ] = [ float ( x ) for x in inversion_data [ 3 ] . split ( ) ] further_meta = re . split ( '[: ]+' , ndk_string [ 62 : ] ) gcmt . metadata [ 'CMT' ] = int ( further_meta [ 1 ] ) gcmt . metadata [ 'FUNCTION' ] = { 'TYPE' : further_meta [ 2 ] , 'DURATION' : float ( further_meta [ 3 ] ) } return gcmt
Reads the GCMT metadata from line 2 of the ndk batch
44,514
def _get_principal_axes_from_ndk_string ( self , ndk_string , exponent ) : axes = GCMTPrincipalAxes ( ) exponent = 10. ** exponent axes . t_axis = { 'eigenvalue' : exponent * float ( ndk_string [ 0 : 8 ] ) , 'plunge' : float ( ndk_string [ 8 : 11 ] ) , 'azimuth' : float ( ndk_string [ 11 : 15 ] ) } axes . b_axis = { 'eigenvalue' : exponent * float ( ndk_string [ 15 : 23 ] ) , 'plunge' : float ( ndk_string [ 23 : 26 ] ) , 'azimuth' : float ( ndk_string [ 26 : 30 ] ) } axes . p_axis = { 'eigenvalue' : exponent * float ( ndk_string [ 30 : 38 ] ) , 'plunge' : float ( ndk_string [ 38 : 41 ] ) , 'azimuth' : float ( ndk_string [ 41 : ] ) } return axes
Gets the principal axes from the ndk string and returns an instance of the GCMTPrincipalAxes class
44,515
def _get_moment_from_ndk_string ( self , ndk_string , exponent ) : moment = float ( ndk_string [ 49 : 56 ] ) * ( 10. ** exponent ) version = ndk_string [ : 3 ] magnitude = utils . moment_magnitude_scalar ( moment ) return moment , version , magnitude
Returns the moment and the moment magnitude
44,516
def serialise_to_nrml ( self , filename , use_defaults = False ) : source_model = self . convert_to_oqhazardlib ( PoissonTOM ( 1.0 ) , 2.0 , 2.0 , 10.0 , use_defaults = use_defaults ) write_source_model ( filename , source_model , name = self . name )
Writes the source model to a nrml source model file given by the filename
44,517
def input_checks ( catalogue , config , completeness ) : if isinstance ( completeness , np . ndarray ) : if np . shape ( completeness ) [ 1 ] != 2 : raise ValueError ( 'Completeness Table incorrectly configured' ) else : cmag = completeness [ : , 1 ] ctime = completeness [ : , 0 ] elif isinstance ( completeness , float ) : cmag = np . array ( completeness ) ctime = np . array ( np . min ( catalogue . data [ 'year' ] ) ) else : cmag = np . array ( np . min ( catalogue . data [ 'magnitude' ] ) ) ctime = np . array ( np . min ( catalogue . data [ 'year' ] ) ) if not config : ref_mag = 0.0 dmag = 0.1 config = { 'reference_magnitude' : None , 'magnitude_interval' : 0.1 } else : if ( not 'reference_magnitude' in config . keys ( ) ) or ( config [ 'reference_magnitude' ] is None ) : ref_mag = 0. config [ 'reference_magnitude' ] = None else : ref_mag = config [ 'reference_magnitude' ] if ( not 'magnitude_interval' in config . keys ( ) ) or not config [ 'magnitude_interval' ] : dmag = 0.1 else : dmag = config [ 'magnitude_interval' ] return cmag , ctime , ref_mag , dmag , config
Performs a basic set of input checks on the data
44,518
def generate_trunc_gr_magnitudes ( bval , mmin , mmax , nsamples ) : sampler = np . random . uniform ( 0. , 1. , nsamples ) beta = bval * np . log ( 10. ) return ( - 1. / beta ) * ( np . log ( 1. - sampler * ( 1 - np . exp ( - beta * ( mmax - mmin ) ) ) ) ) + mmin
Generate a random list of magnitudes distributed according to a truncated Gutenberg - Richter model
44,519
def generate_synthetic_magnitudes ( aval , bval , mmin , mmax , nyears ) : nsamples = int ( np . round ( nyears * ( 10. ** ( aval - bval * mmin ) ) , 0 ) ) year = np . random . randint ( 0 , nyears , nsamples ) mags = generate_trunc_gr_magnitudes ( bval , mmin , mmax , nsamples ) return { 'magnitude' : mags , 'year' : np . sort ( year ) }
Generates a synthetic catalogue for a specified number of years with magnitudes distributed according to a truncated Gutenberg - Richter distribution
44,520
def downsample_completeness_table ( comp_table , sample_width = 0.1 , mmax = None ) : new_comp_table = [ ] for i in range ( comp_table . shape [ 0 ] - 1 ) : mvals = np . arange ( comp_table [ i , 1 ] , comp_table [ i + 1 , 1 ] , d_m ) new_comp_table . extend ( [ [ comp_table [ i , 0 ] , mval ] for mval in mvals ] ) if mmax and ( mmax > comp_table [ - 1 , 1 ] ) : new_comp_table . extend ( [ [ comp_table [ - 1 , 0 ] , mval ] for mval in np . arange ( comp_table [ - 1 , 1 ] , mmax + d_m , d_m ) ] ) return np . array ( new_comp_table )
Re - sample the completeness table to a specified sample_width
44,521
def reset ( yes ) : ok = yes or confirm ( 'Do you really want to destroy all your data? (y/n) ' ) if not ok : return dbpath = os . path . realpath ( os . path . expanduser ( config . dbserver . file ) ) if os . path . isfile ( dbpath ) and os . access ( dbpath , os . W_OK ) : if dbserver . get_status ( ) == 'running' : if config . dbserver . multi_user : sys . exit ( 'The oq dbserver must be stopped ' 'before proceeding' ) else : pid = logs . dbcmd ( 'getpid' ) os . kill ( pid , signal . SIGTERM ) time . sleep ( .5 ) assert dbserver . get_status ( ) == 'not-running' print ( 'dbserver stopped' ) try : os . remove ( dbpath ) print ( 'Removed %s' % dbpath ) except OSError as exc : print ( exc , file = sys . stderr ) purge_all ( fast = True )
Remove all the datastores and the database of the current user
44,522
def set_status ( db , job_id , status ) : assert status in ( 'created' , 'submitted' , 'executing' , 'complete' , 'aborted' , 'failed' ) , status if status in ( 'created' , 'complete' , 'failed' , 'aborted' ) : is_running = 0 else : is_running = 1 if job_id < 0 : rows = db ( 'SELECT id FROM job ORDER BY id DESC LIMIT ?x' , - job_id ) if not rows : return 0 job_id = rows [ - 1 ] . id cursor = db ( 'UPDATE job SET status=?x, is_running=?x WHERE id=?x' , status , is_running , job_id ) return cursor . rowcount
Set the status created executing complete failed aborted consistently with is_running .
44,523
def create_job ( db , datadir ) : calc_id = get_calc_id ( db , datadir ) + 1 job = dict ( id = calc_id , is_running = 1 , description = 'just created' , user_name = 'openquake' , calculation_mode = 'to be set' , ds_calc_dir = os . path . join ( '%s/calc_%s' % ( datadir , calc_id ) ) ) return db ( 'INSERT INTO job (?S) VALUES (?X)' , job . keys ( ) , job . values ( ) ) . lastrowid
Create job for the given user return it .
44,524
def import_job ( db , calc_id , calc_mode , description , user_name , status , hc_id , datadir ) : job = dict ( id = calc_id , calculation_mode = calc_mode , description = description , user_name = user_name , hazard_calculation_id = hc_id , is_running = 0 , status = status , ds_calc_dir = os . path . join ( '%s/calc_%s' % ( datadir , calc_id ) ) ) db ( 'INSERT INTO job (?S) VALUES (?X)' , job . keys ( ) , job . values ( ) )
Insert a calculation inside the database if calc_id is not taken
44,525
def get_job ( db , job_id , username = None ) : job_id = int ( job_id ) if job_id > 0 : dic = dict ( id = job_id ) if username : dic [ 'user_name' ] = username try : return db ( 'SELECT * FROM job WHERE ?A' , dic , one = True ) except NotFound : return if username : joblist = db ( 'SELECT * FROM job WHERE user_name=?x ' 'ORDER BY id DESC LIMIT ?x' , username , - job_id ) else : joblist = db ( 'SELECT * FROM job ORDER BY id DESC LIMIT ?x' , - job_id ) if not joblist : return else : return joblist [ - 1 ]
If job_id is negative return the last calculation of the current user otherwise returns the job_id unchanged .
44,526
def get_calc_id ( db , datadir , job_id = None ) : calcs = datastore . get_calc_ids ( datadir ) calc_id = 0 if not calcs else calcs [ - 1 ] if job_id is None : try : job_id = db ( 'SELECT seq FROM sqlite_sequence WHERE name="job"' , scalar = True ) except NotFound : job_id = 0 return max ( calc_id , job_id )
Return the latest calc_id by looking both at the datastore and the database .
44,527
def list_calculations ( db , job_type , user_name ) : jobs = db ( 'SELECT *, %s FROM job WHERE user_name=?x ' 'AND job_type=?x ORDER BY start_time' % JOB_TYPE , user_name , job_type ) out = [ ] if len ( jobs ) == 0 : out . append ( 'None' ) else : out . append ( 'job_id | status | start_time | ' ' description' ) for job in jobs : descr = job . description start_time = job . start_time out . append ( '%6d | %10s | %s | %s' % ( job . id , job . status , start_time , descr ) ) return out
Yield a summary of past calculations .
44,528
def create_outputs ( db , job_id , keysize , ds_size ) : rows = [ ( job_id , DISPLAY_NAME . get ( key , key ) , key , size ) for key , size in keysize ] db ( 'UPDATE job SET size_mb=?x WHERE id=?x' , ds_size , job_id ) db . insert ( 'output' , 'oq_job_id display_name ds_key size_mb' . split ( ) , rows )
Build a correspondence between the outputs in the datastore and the ones in the database . Also update the datastore size in the job table .
44,529
def finish ( db , job_id , status ) : db ( 'UPDATE job SET ?D WHERE id=?x' , dict ( is_running = False , status = status , stop_time = datetime . utcnow ( ) ) , job_id )
Set the job columns is_running status and stop_time .
44,530
def del_calc ( db , job_id , user ) : job_id = int ( job_id ) dependent = db ( 'SELECT id FROM job WHERE hazard_calculation_id=?x' , job_id ) if dependent : return { "error" : 'Cannot delete calculation %d: there ' 'are calculations ' 'dependent from it: %s' % ( job_id , [ j . id for j in dependent ] ) } try : owner , path = db ( 'SELECT user_name, ds_calc_dir FROM job WHERE id=?x' , job_id , one = True ) except NotFound : return { "error" : 'Cannot delete calculation %d:' ' ID does not exist' % job_id } deleted = db ( 'DELETE FROM job WHERE id=?x AND user_name=?x' , job_id , user ) . rowcount if not deleted : return { "error" : 'Cannot delete calculation %d: it belongs to ' '%s and you are %s' % ( job_id , owner , user ) } fname = path + ".hdf5" try : os . remove ( fname ) except OSError as exc : return { "error" : 'Could not remove %s: %s' % ( fname , exc ) } return { "success" : fname }
Delete a calculation and all associated outputs if possible .
44,531
def log ( db , job_id , timestamp , level , process , message ) : db ( 'INSERT INTO log (job_id, timestamp, level, process, message) ' 'VALUES (?X)' , ( job_id , timestamp , level , process , message ) )
Write a log record in the database .
44,532
def get_log ( db , job_id ) : logs = db ( 'SELECT * FROM log WHERE job_id=?x ORDER BY id' , job_id ) out = [ ] for log in logs : time = str ( log . timestamp ) [ : - 4 ] out . append ( '[%s #%d %s] %s' % ( time , job_id , log . level , log . message ) ) return out
Extract the logs as a big string
44,533
def save_performance ( db , job_id , records ) : rows = [ ( job_id , rec [ 'operation' ] , rec [ 'time_sec' ] , rec [ 'memory_mb' ] , int ( rec [ 'counts' ] ) ) for rec in records ] db . insert ( 'performance' , 'job_id operation time_sec memory_mb counts' . split ( ) , rows )
Save in the database the performance information about the given job .
44,534
def get_traceback ( db , job_id ) : log = db ( "SELECT * FROM log WHERE job_id=?x AND level='CRITICAL'" , job_id ) if not log : return [ ] response_data = log [ - 1 ] . message . splitlines ( ) return response_data
Return the traceback of the given calculation as a list of lines . The list is empty if the calculation was successful .
44,535
def webui ( cmd , hostport = '127.0.0.1:8800' , skip_browser = False ) : dbpath = os . path . realpath ( os . path . expanduser ( config . dbserver . file ) ) if os . path . isfile ( dbpath ) and not os . access ( dbpath , os . W_OK ) : sys . exit ( 'This command must be run by the proper user: ' 'see the documentation for details' ) if cmd == 'start' : dbserver . ensure_on ( ) rundjango ( 'runserver' , hostport , skip_browser ) elif cmd in commands : rundjango ( cmd )
start the webui server in foreground or perform other operation on the django application
44,536
def _get_basic_term ( self , C , rup , dists ) : if rup . mag > 5. : c4m = C [ 'c4' ] elif rup . mag > 4. : c4m = C [ 'c4' ] - ( C [ 'c4' ] - 1. ) * ( 5. - rup . mag ) else : c4m = 1. R = np . sqrt ( dists . rrup ** 2. + c4m ** 2. ) base_term = C [ 'a1' ] * np . ones_like ( dists . rrup ) + C [ 'a17' ] * dists . rrup if rup . mag >= C [ 'm1' ] : base_term += ( C [ 'a5' ] * ( rup . mag - C [ 'm1' ] ) + C [ 'a8' ] * ( 8.5 - rup . mag ) ** 2. + ( C [ 'a2' ] + C [ 'a3' ] * ( rup . mag - C [ 'm1' ] ) ) * np . log ( R ) ) elif rup . mag >= self . CONSTS [ 'm2' ] : base_term += ( C [ 'a4' ] * ( rup . mag - C [ 'm1' ] ) + C [ 'a8' ] * ( 8.5 - rup . mag ) ** 2. + ( C [ 'a2' ] + C [ 'a3' ] * ( rup . mag - C [ 'm1' ] ) ) * np . log ( R ) ) else : base_term += ( C [ 'a4' ] * ( self . CONSTS [ 'm2' ] - C [ 'm1' ] ) + C [ 'a8' ] * ( 8.5 - self . CONSTS [ 'm2' ] ) ** 2. + C [ 'a6' ] * ( rup . mag - self . CONSTS [ 'm2' ] ) + C [ 'a7' ] * ( rup . mag - self . CONSTS [ 'm2' ] ) ** 2. + ( C [ 'a2' ] + C [ 'a3' ] * ( self . CONSTS [ 'm2' ] - C [ 'm1' ] ) ) * np . log ( R ) ) return base_term
Compute and return basic form see page 1030 .
44,537
def _get_vs30star ( self , vs30 , imt ) : if imt . name == "SA" : t = imt . period if t <= 0.50 : v1 = 1500.0 elif t < 3.0 : v1 = np . exp ( - 0.35 * np . log ( t / 0.5 ) + np . log ( 1500. ) ) else : v1 = 800.0 elif imt . name == "PGA" : v1 = 1500.0 else : v1 = 1500.0 vs30_star = np . ones_like ( vs30 ) * vs30 vs30_star [ vs30 >= v1 ] = v1 return vs30_star
This computes equations 8 and 9 at page 1034
44,538
def _get_site_response_term ( self , C , imt , vs30 , sa1180 ) : vs30_star = self . _get_vs30star ( vs30 , imt ) site_resp_term = np . zeros_like ( vs30 ) gt_vlin = vs30 >= C [ 'vlin' ] lw_vlin = vs30 < C [ 'vlin' ] vs30_rat = vs30_star / C [ 'vlin' ] site_resp_term [ gt_vlin ] = ( ( C [ 'a10' ] + C [ 'b' ] * self . CONSTS [ 'n' ] ) * np . log ( vs30_rat [ gt_vlin ] ) ) site_resp_term [ lw_vlin ] = ( C [ 'a10' ] * np . log ( vs30_rat [ lw_vlin ] ) - C [ 'b' ] * np . log ( sa1180 [ lw_vlin ] + C [ 'c' ] ) + C [ 'b' ] * np . log ( sa1180 [ lw_vlin ] + C [ 'c' ] * vs30_rat [ lw_vlin ] ** self . CONSTS [ 'n' ] ) ) return site_resp_term
Compute and return site response model term see page 1033
44,539
def _get_hanging_wall_term ( self , C , dists , rup ) : if rup . dip == 90.0 : return np . zeros_like ( dists . rx ) else : Fhw = np . zeros_like ( dists . rx ) Fhw [ dists . rx > 0 ] = 1. T1 = np . ones_like ( dists . rx ) T1 *= 60. / 45. if rup . dip <= 30. else ( 90. - rup . dip ) / 45.0 T2 = np . zeros_like ( dists . rx ) a2hw = 0.2 if rup . mag > 6.5 : T2 += ( 1. + a2hw * ( rup . mag - 6.5 ) ) elif rup . mag > 5.5 : T2 += ( 1. + a2hw * ( rup . mag - 6.5 ) - ( 1. - a2hw ) * ( rup . mag - 6.5 ) ** 2 ) else : T2 *= 0. T3 = np . zeros_like ( dists . rx ) r1 = rup . width * np . cos ( np . radians ( rup . dip ) ) r2 = 3. * r1 idx = dists . rx < r1 T3 [ idx ] = ( np . ones_like ( dists . rx ) [ idx ] * self . CONSTS [ 'h1' ] + self . CONSTS [ 'h2' ] * ( dists . rx [ idx ] / r1 ) + self . CONSTS [ 'h3' ] * ( dists . rx [ idx ] / r1 ) ** 2 ) idx = ( ( dists . rx >= r1 ) & ( dists . rx <= r2 ) ) T3 [ idx ] = 1. - ( dists . rx [ idx ] - r1 ) / ( r2 - r1 ) T4 = np . zeros_like ( dists . rx ) if rup . ztor <= 10. : T4 += ( 1. - rup . ztor ** 2. / 100. ) T5 = np . zeros_like ( dists . rx ) ry1 = dists . rx * np . tan ( np . radians ( 20. ) ) idx = ( dists . ry0 - ry1 ) <= 0.0 T5 [ idx ] = 1. idx = ( ( ( dists . ry0 - ry1 ) > 0.0 ) & ( ( dists . ry0 - ry1 ) < 5.0 ) ) T5 [ idx ] = 1. - ( dists . ry0 [ idx ] - ry1 [ idx ] ) / 5.0 return Fhw * C [ 'a13' ] * T1 * T2 * T3 * T4 * T5
Compute and return hanging wall model term see page 1038 .
44,540
def _get_top_of_rupture_depth_term ( self , C , imt , rup ) : if rup . ztor >= 20.0 : return C [ 'a15' ] else : return C [ 'a15' ] * rup . ztor / 20.0
Compute and return top of rupture depth term . See paragraph Depth - to - Top of Rupture Model page 1042 .
44,541
def _get_soil_depth_term ( self , C , z1pt0 , vs30 ) : z1ref = self . _get_z1pt0ref ( vs30 ) z10 = copy . deepcopy ( z1pt0 ) idx = z1pt0 < 0 z10 [ idx ] = z1ref [ idx ] factor = np . log ( ( z10 + 0.01 ) / ( z1ref + 0.01 ) ) f2 = interpolate . interp1d ( [ 0.0 , 150 , 250 , 400 , 700 , 1000 , 6000 ] , [ C [ 'a43' ] , C [ 'a43' ] , C [ 'a44' ] , C [ 'a45' ] , C [ 'a46' ] , C [ 'a46' ] , C [ 'a46' ] ] , kind = 'linear' ) return f2 ( vs30 ) * factor
Compute and return soil depth term . See page 1042 .
44,542
def _get_stddevs ( self , C , imt , rup , sites , stddev_types , sa1180 , dists ) : std_intra = self . _get_intra_event_std ( C , rup . mag , sa1180 , sites . vs30 , sites . vs30measured , dists . rrup ) std_inter = self . _get_inter_event_std ( C , rup . mag , sa1180 , sites . vs30 ) stddevs = [ ] for stddev_type in stddev_types : assert stddev_type in self . DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const . StdDev . TOTAL : stddevs . append ( np . sqrt ( std_intra ** 2 + std_inter ** 2 ) ) elif stddev_type == const . StdDev . INTRA_EVENT : stddevs . append ( std_intra ) elif stddev_type == const . StdDev . INTER_EVENT : stddevs . append ( std_inter ) return stddevs
Return standard deviations as described in paragraph Equations for standard deviation page 1046 .
44,543
def _get_intra_event_std ( self , C , mag , sa1180 , vs30 , vs30measured , rrup ) : phi_al = self . _get_phi_al_regional ( C , mag , vs30measured , rrup ) derAmp = self . _get_derivative ( C , sa1180 , vs30 ) phi_amp = 0.4 idx = phi_al < phi_amp if np . any ( idx ) : phi_amp = 0.4 * np . ones_like ( phi_al ) phi_amp [ idx ] = 0.99 * phi_al [ idx ] phi_b = np . sqrt ( phi_al ** 2 - phi_amp ** 2 ) phi = np . sqrt ( phi_b ** 2 * ( 1 + derAmp ) ** 2 + phi_amp ** 2 ) return phi
Returns Phi as described at pages 1046 and 1047
44,544
def _get_derivative ( self , C , sa1180 , vs30 ) : derAmp = np . zeros_like ( vs30 ) n = self . CONSTS [ 'n' ] c = C [ 'c' ] b = C [ 'b' ] idx = vs30 < C [ 'vlin' ] derAmp [ idx ] = ( b * sa1180 [ idx ] * ( - 1. / ( sa1180 [ idx ] + c ) + 1. / ( sa1180 [ idx ] + c * ( vs30 [ idx ] / C [ 'vlin' ] ) ** n ) ) ) return derAmp
Returns equation 30 page 1047
44,545
def _get_regional_term ( self , C , imt , vs30 , rrup ) : f3 = interpolate . interp1d ( [ 150 , 250 , 350 , 450 , 600 , 850 , 1150 , 2000 ] , [ C [ 'a36' ] , C [ 'a37' ] , C [ 'a38' ] , C [ 'a39' ] , C [ 'a40' ] , C [ 'a41' ] , C [ 'a42' ] , C [ 'a42' ] ] , kind = 'linear' ) return f3 ( vs30 ) + C [ 'a29' ] * rrup
Compute regional term for Japan . See page 1043
44,546
def gc ( coeff , mag ) : if mag > 6.5 : a1ca = coeff [ 'ua' ] a1cb = coeff [ 'ub' ] a1cc = coeff [ 'uc' ] a1cd = coeff [ 'ud' ] a1ce = coeff [ 'ue' ] a2ca = coeff [ 'ia' ] a2cb = coeff [ 'ib' ] a2cc = coeff [ 'ic' ] a2cd = coeff [ 'id' ] a2ce = coeff [ 'ie' ] else : a1ca = coeff [ 'a' ] a1cb = coeff [ 'b' ] a1cc = coeff [ 'c' ] a1cd = coeff [ 'd' ] a1ce = coeff [ 'e' ] a2ca = coeff [ 'ma' ] a2cb = coeff [ 'mb' ] a2cc = coeff [ 'mc' ] a2cd = coeff [ 'md' ] a2ce = coeff [ 'me' ] return a1ca , a1cb , a1cc , a1cd , a1ce , a2ca , a2cb , a2cc , a2cd , a2ce
Returns the set of coefficients to be used for the calculation of GM as a function of earthquake magnitude
44,547
def rbf ( ra , coeff , mag ) : a1ca , a1cb , a1cc , a1cd , a1ce , a2ca , a2cb , a2cc , a2cd , a2ce = gc ( coeff , mag ) term1 = a1ca + a1cb * mag + a1cc * np . log ( ra + a1cd * np . exp ( a1ce * mag ) ) term2 = a2ca + a2cb * mag term3 = a2cd * np . exp ( a2ce * mag ) return np . exp ( ( term1 - term2 ) / a2cc ) - term3
Calculate the median ground motion for a given magnitude and distance
44,548
def fnc ( ra , * args ) : repi = args [ 0 ] theta = args [ 1 ] mag = args [ 2 ] coeff = args [ 3 ] rb = rbf ( ra , coeff , mag ) t1 = ra ** 2 * ( np . sin ( np . radians ( theta ) ) ) ** 2 t2 = rb ** 2 * ( np . cos ( np . radians ( theta ) ) ) ** 2 xx = ra * rb / ( t1 + t2 ) ** 0.5 return xx - repi
Function used in the minimisation problem .
44,549
def get_ras ( repi , theta , mag , coeff ) : rx = 100. ras = 200. dff = fnc ( ras , repi , theta , mag , coeff ) while abs ( dff ) > 1e-3 : if dff > 0. : ras = ras - rx else : ras = ras + rx dff = fnc ( ras , repi , theta , mag , coeff ) rx = rx / 2. if rx < 1e-3 : break return ras
Computes equivalent distance
44,550
def _get_stddevs ( self , C , stddev_types , rup , imt , num_sites ) : stddevs = [ ] for stddev_type in stddev_types : sigma_mean = self . _compute_standard_dev ( rup , imt , C ) sigma_tot = np . sqrt ( ( sigma_mean ** 2 ) + ( C [ 'SigmaReg' ] ** 2 ) ) sigma_tot = np . log10 ( np . exp ( sigma_tot ) ) stddevs . append ( sigma_tot + np . zeros ( num_sites ) ) return stddevs
Return standard deviations as defined in eq . 4 and 5 page 744 based on table 8 page 744 . Eq . 5 yields std dev in natural log so convert to log10
44,551
def _compute_standard_dev ( self , rup , imt , C ) : sigma_mean = 0. if imt . name in "SA PGA" : psi = - 6.898E-3 else : psi = - 3.054E-5 if rup . mag <= 6.5 : sigma_mean = ( C [ 'c12' ] * rup . mag ) + C [ 'c13' ] elif rup . mag > 6.5 : sigma_mean = ( psi * rup . mag ) + C [ 'c14' ] return sigma_mean
Compute the the standard deviation in terms of magnitude described on page 744 eq . 4
44,552
def insert ( self , table , columns , rows ) : cursor = self . conn . cursor ( ) if len ( rows ) : templ , _args = match ( 'INSERT INTO ?s (?S) VALUES (?X)' , table , columns , rows [ 0 ] ) cursor . executemany ( templ , rows ) return cursor
Insert several rows with executemany . Return a cursor .
44,553
def _cluster ( param , tom , imtls , gsims , grp_ids , pmap ) : pmapclu = AccumDict ( { grp_id : ProbabilityMap ( len ( imtls . array ) , len ( gsims ) ) for grp_id in grp_ids } ) first = True for nocc in range ( 0 , 50 ) : ocr = tom . occurrence_rate prob_n_occ = tom . get_probability_n_occurrences ( ocr , nocc ) if first : pmapclu = prob_n_occ * ( ~ pmap ) ** nocc first = False else : pmapclu += prob_n_occ * ( ~ pmap ) ** nocc pmap = ~ pmapclu return pmap
Computes the probability map in case of a cluster group
44,554
def _get_stddevs ( self , C , rup , shape , stddev_types ) : weight = self . _compute_weight_std ( C , rup . mag ) std_intra = weight * C [ "sd1" ] * np . ones ( shape ) std_inter = weight * C [ "sd2" ] * np . ones ( shape ) stddevs = [ ] for stddev_type in stddev_types : assert stddev_type in self . DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const . StdDev . TOTAL : stddevs . append ( np . sqrt ( std_intra ** 2. + std_inter ** 2. ) ) elif stddev_type == const . StdDev . INTRA_EVENT : stddevs . append ( std_intra ) elif stddev_type == const . StdDev . INTER_EVENT : stddevs . append ( std_inter ) return stddevs
Return standard deviations as defined in p . 971 .
44,555
def _compute_weight_std ( self , C , mag ) : if mag < 6.0 : return C [ 'a1' ] elif mag >= 6.0 and mag < 6.5 : return C [ 'a1' ] + ( C [ 'a2' ] - C [ 'a1' ] ) * ( ( mag - 6.0 ) / 0.5 ) else : return C [ 'a2' ]
Common part of equations 8 and 9 page 971 .
44,556
def _compute_magnitude_scaling_term ( self , C , mag ) : c1 = self . CONSTS [ 'c1' ] if mag <= c1 : return C [ 'b1' ] + C [ 'b2' ] * ( mag - c1 ) + C [ 'b3' ] * ( 8.5 - mag ) ** 2 else : return C [ 'b1' ] + C [ 'b7' ] * ( mag - c1 ) + C [ 'b3' ] * ( 8.5 - mag ) ** 2
Compute and return magnitude scaling term in equation 2 page 970 .
44,557
def _compute_geometric_decay_term ( self , C , mag , dists ) : c1 = self . CONSTS [ 'c1' ] return ( ( C [ 'b4' ] + C [ 'b5' ] * ( mag - c1 ) ) * np . log ( np . sqrt ( dists . rjb ** 2.0 + C [ 'b6' ] ** 2.0 ) ) )
Compute and return geometric decay term in equation 3 page 970 .
44,558
def _compute_anelestic_attenuation_term ( self , C , dists ) : f_aat = np . zeros_like ( dists . rjb ) idx = dists . rjb > 80.0 f_aat [ idx ] = C [ "b10" ] * ( dists . rjb [ idx ] - 80.0 ) return f_aat
Compute and return anelastic attenuation term in equation 5 page 970 .
44,559
def _compute_non_linear_term ( self , C , pga_only , sites ) : Vref = self . CONSTS [ 'Vref' ] Vcon = self . CONSTS [ 'Vcon' ] c = self . CONSTS [ 'c' ] n = self . CONSTS [ 'n' ] lnS = np . zeros_like ( sites . vs30 ) idx = sites . vs30 < Vref lnS [ idx ] = ( C [ 'sb1' ] * np . log ( sites . vs30 [ idx ] / Vref ) + C [ 'sb2' ] * np . log ( ( pga_only [ idx ] + c * ( sites . vs30 [ idx ] / Vref ) ** n ) / ( ( pga_only [ idx ] + c ) * ( sites . vs30 [ idx ] / Vref ) ** n ) ) ) idx = sites . vs30 >= Vref new_sites = sites . vs30 [ idx ] new_sites [ new_sites > Vcon ] = Vcon lnS [ idx ] = C [ 'sb1' ] * np . log ( new_sites / Vref ) return lnS
Compute non - linear term equation 6 page 970 .
44,560
def _compute_mean ( self , C , mag , dists , rake ) : mean = ( self . _compute_magnitude_scaling_term ( C , mag ) + self . _compute_geometric_decay_term ( C , mag , dists ) + self . _compute_faulting_style_term ( C , rake ) + self . _compute_anelestic_attenuation_term ( C , dists ) ) return mean
Compute and return mean value without site conditions that is equations 2 - 5 page 970 .
44,561
def get_bounding_box ( self , maxdist ) : return utils . get_bounding_box ( [ ps . location for ps in self ] , maxdist )
Bounding box containing all the point sources enlarged by the maximum distance .
44,562
def _compute_standard_dev ( self , rup , imt , C ) : sigma_mean = 0. if rup . mag <= 7.0 : sigma_mean = ( C [ 'c12' ] * rup . mag ) + C [ 'c13' ] elif rup . mag > 7.0 : sigma_mean = ( - 0.00695 * rup . mag ) + C [ 'c14' ] return sigma_mean
Compute the the standard deviation in terms of magnitude described on p . 1866 eq . 6
44,563
def get_rate_osr_normal_transform ( self , threshold_moment , id0 ) : e1h_ridge = np . zeros ( np . sum ( id0 ) , dtype = float ) e2h_ridge = self . strain . data [ 'e1h' ] [ id0 ] + self . strain . data [ 'e2h' ] [ id0 ] err_ridge = - ( e1h_ridge + e2h_ridge ) calculated_rate_ridge = self . continuum_seismicity ( threshold_moment , e1h_ridge , e2h_ridge , err_ridge , self . regionalisation [ 'OSRnor' ] ) e1h_trans = self . strain . data [ 'e1h' ] [ id0 ] e2h_trans = - e1h_trans err_trans = np . zeros ( np . sum ( id0 ) , dtype = float ) calculated_rate_transform = self . continuum_seismicity ( threshold_moment , e1h_trans , e2h_trans , err_trans , self . regionalisation [ 'OTFmed' ] ) return ( self . regionalisation [ 'OSRnor' ] [ 'adjustment_factor' ] * ( calculated_rate_ridge + calculated_rate_transform ) )
Gets seismicity rate for special case of the ridge condition with spreading and transform component
44,564
def get_rate_osr_convergent_transform ( self , threshold_moment , id0 ) : e1h_ocb = self . strain . data [ 'e1h' ] [ id0 ] + self . strain . data [ 'e2h' ] [ id0 ] e2h_ocb = np . zeros ( np . sum ( id0 ) , dtype = float ) err_ocb = - ( e1h_ocb + e2h_ocb ) calculated_rate_ocb = self . continuum_seismicity ( threshold_moment , e1h_ocb , e2h_ocb , err_ocb , self . regionalisation [ 'OCB' ] ) e2h_trans = self . strain . data [ 'e2h' ] [ id0 ] e1h_trans = - e2h_trans err_trans = np . zeros ( np . sum ( id0 ) , dtype = float ) calculated_rate_transform = self . continuum_seismicity ( threshold_moment , e1h_trans , e2h_trans , err_trans , self . regionalisation [ 'OTFmed' ] ) return ( self . regionalisation [ 'OSRnor' ] [ 'adjustment_factor' ] * ( calculated_rate_ocb + calculated_rate_transform ) )
Calculates seismicity rate for special case of the ridge condition with convergence and transform
44,565
def get_median_area ( self , mag , rake ) : if rake is None : return power ( 10.0 , ( mag - 4.185 ) ) elif ( - 45 <= rake <= 45 ) or ( rake >= 135 ) or ( rake <= - 135 ) : return power ( 10.0 , ( mag - 4.18 ) ) else : return power ( 10.0 , ( mag - 4.19 ) )
Calculates median fault area from magnitude .
44,566
def _get_base_url ( request ) : if request . is_secure ( ) : base_url = 'https://%s' else : base_url = 'http://%s' base_url %= request . META [ 'HTTP_HOST' ] return base_url
Construct a base URL given a request object .
44,567
def _prepare_job ( request , candidates ) : temp_dir = tempfile . mkdtemp ( ) inifiles = [ ] arch = request . FILES . get ( 'archive' ) if arch is None : for each_file in request . FILES . values ( ) : new_path = os . path . join ( temp_dir , each_file . name ) shutil . move ( each_file . temporary_file_path ( ) , new_path ) if each_file . name in candidates : inifiles . append ( new_path ) return inifiles return readinput . extract_from_zip ( arch , candidates )
Creates a temporary directory move uploaded files there and select the job file by looking at the candidate names .
44,568
def ajax_login ( request ) : username = request . POST [ 'username' ] password = request . POST [ 'password' ] user = authenticate ( username = username , password = password ) if user is not None : if user . is_active : login ( request , user ) return HttpResponse ( content = 'Successful login' , content_type = 'text/plain' , status = 200 ) else : return HttpResponse ( content = 'Disabled account' , content_type = 'text/plain' , status = 403 ) else : return HttpResponse ( content = 'Invalid login' , content_type = 'text/plain' , status = 403 )
Accept a POST request to login .
44,569
def get_available_gsims ( request ) : gsims = list ( gsim . get_available_gsims ( ) ) return HttpResponse ( content = json . dumps ( gsims ) , content_type = JSON )
Return a list of strings with the available GSIMs
44,570
def validate_nrml ( request ) : xml_text = request . POST . get ( 'xml_text' ) if not xml_text : return HttpResponseBadRequest ( 'Please provide the "xml_text" parameter' ) xml_file = gettemp ( xml_text , suffix = '.xml' ) try : nrml . to_python ( xml_file ) except ExpatError as exc : return _make_response ( error_msg = str ( exc ) , error_line = exc . lineno , valid = False ) except Exception as exc : exc_msg = exc . args [ 0 ] if isinstance ( exc_msg , bytes ) : exc_msg = exc_msg . decode ( 'utf-8' ) elif isinstance ( exc_msg , str ) : pass else : return _make_response ( error_msg = str ( exc_msg ) , error_line = None , valid = False ) error_msg = exc_msg . split ( ', line' ) [ 0 ] search_match = re . search ( r'line \d+' , exc_msg ) if search_match : error_line = int ( search_match . group ( 0 ) . split ( ) [ 1 ] ) else : error_line = None return _make_response ( error_msg = error_msg , error_line = error_line , valid = False ) else : return _make_response ( error_msg = None , error_line = None , valid = True )
Leverage oq - risklib to check if a given XML text is a valid NRML
44,571
def calc_list ( request , id = None ) : base_url = _get_base_url ( request ) calc_data = logs . dbcmd ( 'get_calcs' , request . GET , utils . get_valid_users ( request ) , utils . get_acl_on ( request ) , id ) response_data = [ ] username = psutil . Process ( os . getpid ( ) ) . username ( ) for ( hc_id , owner , status , calculation_mode , is_running , desc , pid , parent_id , size_mb ) in calc_data : url = urlparse . urljoin ( base_url , 'v1/calc/%d' % hc_id ) abortable = False if is_running : try : if psutil . Process ( pid ) . username ( ) == username : abortable = True except psutil . NoSuchProcess : pass response_data . append ( dict ( id = hc_id , owner = owner , calculation_mode = calculation_mode , status = status , is_running = bool ( is_running ) , description = desc , url = url , parent_id = parent_id , abortable = abortable , size_mb = size_mb ) ) if id is not None : [ response_data ] = response_data return HttpResponse ( content = json . dumps ( response_data ) , content_type = JSON )
Get a list of calculations and report their id status calculation_mode is_running description and a url where more detailed information can be accessed . This is called several times by the Javascript .
44,572
def calc_abort ( request , calc_id ) : job = logs . dbcmd ( 'get_job' , calc_id ) if job is None : message = { 'error' : 'Unknown job %s' % calc_id } return HttpResponse ( content = json . dumps ( message ) , content_type = JSON ) if job . status not in ( 'submitted' , 'executing' ) : message = { 'error' : 'Job %s is not running' % job . id } return HttpResponse ( content = json . dumps ( message ) , content_type = JSON ) if not utils . user_has_permission ( request , job . user_name ) : message = { 'error' : ( 'User %s has no permission to abort job %s' % ( job . user_name , job . id ) ) } return HttpResponse ( content = json . dumps ( message ) , content_type = JSON , status = 403 ) if job . pid : try : os . kill ( job . pid , signal . SIGTERM ) except Exception as exc : logging . error ( exc ) else : logging . warning ( 'Aborting job %d, pid=%d' , job . id , job . pid ) logs . dbcmd ( 'set_status' , job . id , 'aborted' ) message = { 'success' : 'Killing job %d' % job . id } return HttpResponse ( content = json . dumps ( message ) , content_type = JSON ) message = { 'error' : 'PID for job %s not found' % job . id } return HttpResponse ( content = json . dumps ( message ) , content_type = JSON )
Abort the given calculation it is it running
44,573
def calc_remove ( request , calc_id ) : user = utils . get_user ( request ) try : message = logs . dbcmd ( 'del_calc' , calc_id , user ) except dbapi . NotFound : return HttpResponseNotFound ( ) if 'success' in message : return HttpResponse ( content = json . dumps ( message ) , content_type = JSON , status = 200 ) elif 'error' in message : logging . error ( message [ 'error' ] ) return HttpResponse ( content = json . dumps ( message ) , content_type = JSON , status = 403 ) else : logging . error ( message ) return HttpResponse ( content = message , content_type = 'text/plain' , status = 500 )
Remove the calculation id
44,574
def log_to_json ( log ) : return [ log . timestamp . isoformat ( ) [ : 22 ] , log . level , log . process , log . message ]
Convert a log record into a list of strings
44,575
def calc_log_size ( request , calc_id ) : try : response_data = logs . dbcmd ( 'get_log_size' , calc_id ) except dbapi . NotFound : return HttpResponseNotFound ( ) return HttpResponse ( content = json . dumps ( response_data ) , content_type = JSON )
Get the current number of lines in the log
44,576
def submit_job ( job_ini , username , hazard_job_id = None ) : job_id = logs . init ( 'job' ) oq = engine . job_from_file ( job_ini , job_id , username , hazard_calculation_id = hazard_job_id ) pik = pickle . dumps ( oq , protocol = 0 ) code = RUNCALC % dict ( job_id = job_id , hazard_job_id = hazard_job_id , pik = pik , username = username ) tmp_py = gettemp ( code , suffix = '.py' ) devnull = subprocess . DEVNULL popen = subprocess . Popen ( [ sys . executable , tmp_py ] , stdin = devnull , stdout = devnull , stderr = devnull ) threading . Thread ( target = popen . wait ) . start ( ) logs . dbcmd ( 'update_job' , job_id , { 'pid' : popen . pid } ) return job_id , popen . pid
Create a job object from the given job . ini file in the job directory and run it in a new process . Returns the job ID and PID .
44,577
def calc_result ( request , result_id ) : try : job_id , job_status , job_user , datadir , ds_key = logs . dbcmd ( 'get_result' , result_id ) if not utils . user_has_permission ( request , job_user ) : return HttpResponseForbidden ( ) except dbapi . NotFound : return HttpResponseNotFound ( ) etype = request . GET . get ( 'export_type' ) export_type = etype or DEFAULT_EXPORT_TYPE tmpdir = tempfile . mkdtemp ( ) try : exported = core . export_from_db ( ( ds_key , export_type ) , job_id , datadir , tmpdir ) except DataStoreExportError as exc : return HttpResponse ( content = '%s: %s' % ( exc . __class__ . __name__ , exc ) , content_type = 'text/plain' , status = 500 ) if not exported : return HttpResponseNotFound ( 'Nothing to export for export_type=%s, %s' % ( export_type , ds_key ) ) elif len ( exported ) > 1 : archname = ds_key + '-' + export_type + '.zip' zipfiles ( exported , os . path . join ( tmpdir , archname ) ) exported = os . path . join ( tmpdir , archname ) else : exported = exported [ 0 ] content_type = EXPORT_CONTENT_TYPE_MAP . get ( export_type , DEFAULT_CONTENT_TYPE ) fname = 'output-%s-%s' % ( result_id , os . path . basename ( exported ) ) stream = FileWrapper ( open ( exported , 'rb' ) ) stream . close = lambda : ( FileWrapper . close ( stream ) , shutil . rmtree ( tmpdir ) ) response = FileResponse ( stream , content_type = content_type ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( exported ) ) return response
Download a specific result by result_id .
44,578
def extract ( request , calc_id , what ) : job = logs . dbcmd ( 'get_job' , int ( calc_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) try : with datastore . read ( job . ds_calc_dir + '.hdf5' ) as ds : fd , fname = tempfile . mkstemp ( prefix = what . replace ( '/' , '-' ) , suffix = '.npz' ) os . close ( fd ) n = len ( request . path_info ) query_string = unquote_plus ( request . get_full_path ( ) [ n : ] ) aw = _extract ( ds , what + query_string ) a = { } for key , val in vars ( aw ) . items ( ) : key = str ( key ) if isinstance ( val , str ) : a [ key ] = numpy . array ( val . encode ( 'utf-8' ) ) elif isinstance ( val , dict ) : a [ key ] = list ( val ) else : a [ key ] = val numpy . savez_compressed ( fname , ** a ) except Exception as exc : tb = '' . join ( traceback . format_tb ( exc . __traceback__ ) ) return HttpResponse ( content = '%s: %s\n%s' % ( exc . __class__ . __name__ , exc , tb ) , content_type = 'text/plain' , status = 500 ) stream = FileWrapper ( open ( fname , 'rb' ) ) stream . close = lambda : ( FileWrapper . close ( stream ) , os . remove ( fname ) ) response = FileResponse ( stream , content_type = 'application/octet-stream' ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( fname ) ) return response
Wrapper over the oq extract command . If setting . LOCKDOWN is true only calculations owned by the current user can be retrieved .
44,579
def calc_datastore ( request , job_id ) : job = logs . dbcmd ( 'get_job' , int ( job_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) fname = job . ds_calc_dir + '.hdf5' response = FileResponse ( FileWrapper ( open ( fname , 'rb' ) ) , content_type = HDF5 ) response [ 'Content-Disposition' ] = ( 'attachment; filename=%s' % os . path . basename ( fname ) ) response [ 'Content-Length' ] = str ( os . path . getsize ( fname ) ) return response
Download a full datastore file .
44,580
def calc_oqparam ( request , job_id ) : job = logs . dbcmd ( 'get_job' , int ( job_id ) ) if job is None : return HttpResponseNotFound ( ) if not utils . user_has_permission ( request , job . user_name ) : return HttpResponseForbidden ( ) with datastore . read ( job . ds_calc_dir + '.hdf5' ) as ds : oq = ds [ 'oqparam' ] return HttpResponse ( content = json . dumps ( vars ( oq ) ) , content_type = JSON )
Return the calculation parameters as a JSON
44,581
def on_same_fs ( request ) : filename = request . POST [ 'filename' ] checksum_in = request . POST [ 'checksum' ] checksum = 0 try : data = open ( filename , 'rb' ) . read ( 32 ) checksum = zlib . adler32 ( data , checksum ) & 0xffffffff if checksum == int ( checksum_in ) : return HttpResponse ( content = json . dumps ( { 'success' : True } ) , content_type = JSON , status = 200 ) except ( IOError , ValueError ) : pass return HttpResponse ( content = json . dumps ( { 'success' : False } ) , content_type = JSON , status = 200 )
Accept a POST request to check access to a FS available by a client .
44,582
def classical_damage ( riskinputs , riskmodel , param , monitor ) : result = AccumDict ( accum = AccumDict ( ) ) for ri in riskinputs : for out in riskmodel . gen_outputs ( ri , monitor ) : for l , loss_type in enumerate ( riskmodel . loss_types ) : ordinals = ri . assets [ 'ordinal' ] result [ l , out . rlzi ] += dict ( zip ( ordinals , out [ loss_type ] ) ) return result
Core function for a classical damage computation .
44,583
def cmp_mat ( a , b ) : c = 0 for x , y in zip ( a . flat , b . flat ) : c = cmp ( abs ( x ) , abs ( y ) ) if c != 0 : return c return c
Sorts two matrices returning a positive or zero value
44,584
def _get_centroid_time ( self , time_diff ) : source_time = datetime . datetime . combine ( self . date , self . time ) second_diff = floor ( fabs ( time_diff ) ) microsecond_diff = int ( 1000. * ( time_diff - second_diff ) ) if time_diff < 0. : source_time = source_time - datetime . timedelta ( seconds = int ( second_diff ) , microseconds = microsecond_diff ) else : source_time = source_time + datetime . timedelta ( seconds = int ( second_diff ) , microseconds = microsecond_diff ) self . time = source_time . time ( ) self . date = source_time . date ( )
Calculates the time difference between the date - time classes
44,585
def _to_ned ( self ) : if self . ref_frame is 'USE' : return utils . use_to_ned ( self . tensor ) , utils . use_to_ned ( self . tensor_sigma ) elif self . ref_frame is 'NED' : return self . tensor , self . tensor_sigma else : raise ValueError ( 'Reference frame %s not recognised - cannot ' 'transform to NED!' % self . ref_frame )
Switches the reference frame to NED
44,586
def _to_use ( self ) : if self . ref_frame is 'NED' : return utils . ned_to_use ( self . tensor ) , utils . ned_to_use ( self . tensor_sigma ) elif self . ref_frame is 'USE' : return self . tensor , self . tensor_sigma else : raise ValueError ( 'Reference frame %s not recognised - cannot ' 'transform to USE!' % self . ref_frame )
Returns a tensor in the USE reference frame
44,587
def get_nodal_planes ( self ) : self . tensor , self . tensor_sigma = self . _to_ned ( ) self . ref_frame = 'NED' _ , evect = utils . eigendecompose ( self . tensor ) _ , rot_vec = utils . eigendecompose ( np . matrix ( [ [ 0. , 0. , - 1 ] , [ 0. , 0. , 0. ] , [ - 1. , 0. , 0. ] ] ) ) rotation_matrix = ( np . matrix ( evect * rot_vec . T ) ) . T if np . linalg . det ( rotation_matrix ) < 0. : rotation_matrix *= - 1. flip_dc = np . matrix ( [ [ 0. , 0. , - 1. ] , [ 0. , - 1. , 0. ] , [ - 1. , 0. , 0. ] ] ) rotation_matrices = sorted ( [ rotation_matrix , flip_dc * rotation_matrix ] , cmp = cmp_mat ) nodal_planes = GCMTNodalPlanes ( ) dip , strike , rake = [ ( 180. / pi ) * angle for angle in utils . matrix_to_euler ( rotation_matrices [ 0 ] ) ] nodal_planes . nodal_plane_1 = { 'strike' : strike % 360 , 'dip' : dip , 'rake' : - rake } dip , strike , rake = [ ( 180. / pi ) * angle for angle in utils . matrix_to_euler ( rotation_matrices [ 1 ] ) ] nodal_planes . nodal_plane_2 = { 'strike' : strike % 360. , 'dip' : dip , 'rake' : - rake } return nodal_planes
Returns the nodal planes by eigendecomposition of the moment tensor
44,588
def get_principal_axes ( self ) : _ = self . eigendecompose ( normalise = True ) principal_axes = GCMTPrincipalAxes ( ) principal_axes . p_axis = { 'eigenvalue' : self . eigenvalues [ 0 ] } principal_axes . b_axis = { 'eigenvalue' : self . eigenvalues [ 1 ] } principal_axes . t_axis = { 'eigenvalue' : self . eigenvalues [ 2 ] } azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 0 ] , True ) principal_axes . p_axis [ 'azimuth' ] = azim principal_axes . p_axis [ 'plunge' ] = plun azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 1 ] , True ) principal_axes . b_axis [ 'azimuth' ] = azim principal_axes . b_axis [ 'plunge' ] = plun azim , plun = utils . get_azimuth_plunge ( self . eigenvectors [ : , 2 ] , True ) principal_axes . t_axis [ 'azimuth' ] = azim principal_axes . t_axis [ 'plunge' ] = plun return principal_axes
Uses the eigendecomposition to extract the principal axes from the moment tensor - returning an instance of the GCMTPrincipalAxes class
44,589
def select_catalogue_events ( self , id0 ) : for key in self . data . keys ( ) : if isinstance ( self . data [ key ] , np . ndarray ) and len ( self . data [ key ] ) > 0 : self . data [ key ] = self . data [ key ] [ id0 ] elif isinstance ( self . data [ key ] , list ) and len ( self . data [ key ] ) > 0 : self . data [ key ] = [ self . data [ key ] [ iloc ] for iloc in id0 ] else : continue if len ( self . gcmts ) > 0 : self . gcmts = [ self . gcmts [ iloc ] for iloc in id0 ] self . number_gcmts = self . get_number_tensors ( )
Orders the events in the catalogue according to an indexing vector
44,590
def _get_edge_set ( self , tol = 0.1 ) : edges = [ ] for surface in self . surfaces : if isinstance ( surface , GriddedSurface ) : return edges . append ( surface . mesh ) elif isinstance ( surface , PlanarSurface ) : edge = [ ] for pnt in [ surface . top_left , surface . top_right ] : edge . append ( [ pnt . longitude , pnt . latitude , pnt . depth ] ) edges . append ( numpy . array ( edge ) ) elif isinstance ( surface , ( ComplexFaultSurface , SimpleFaultSurface ) ) : edges . append ( downsample_trace ( surface . mesh , tol ) ) else : raise ValueError ( "Surface %s not recognised" % str ( surface ) ) return edges
Retrieve set of top edges from all of the individual surfaces downsampling the upper edge based on the specified tolerance
44,591
def get_min_distance ( self , mesh ) : dists = [ surf . get_min_distance ( mesh ) for surf in self . surfaces ] return numpy . min ( dists , axis = 0 )
For each point in mesh compute the minimum distance to each surface element and return the smallest value .
44,592
def get_closest_points ( self , mesh ) : dists = numpy . array ( [ surf . get_min_distance ( mesh ) . flatten ( ) for surf in self . surfaces ] ) idx = dists == numpy . min ( dists , axis = 0 ) lons = numpy . empty_like ( mesh . lons . flatten ( ) ) lats = numpy . empty_like ( mesh . lats . flatten ( ) ) depths = None if mesh . depths is None else numpy . empty_like ( mesh . depths . flatten ( ) ) for i , surf in enumerate ( self . surfaces ) : if not idx [ i , : ] . any ( ) : continue cps = surf . get_closest_points ( mesh ) lons [ idx [ i , : ] ] = cps . lons . flatten ( ) [ idx [ i , : ] ] lats [ idx [ i , : ] ] = cps . lats . flatten ( ) [ idx [ i , : ] ] if depths is not None : depths [ idx [ i , : ] ] = cps . depths . flatten ( ) [ idx [ i , : ] ] lons = lons . reshape ( mesh . lons . shape ) lats = lats . reshape ( mesh . lats . shape ) if depths is not None : depths = depths . reshape ( mesh . depths . shape ) return Mesh ( lons , lats , depths )
For each point in mesh find the closest surface element and return the corresponding closest point .
44,593
def get_bounding_box ( self ) : lons = [ ] lats = [ ] for surf in self . surfaces : west , east , north , south = surf . get_bounding_box ( ) lons . extend ( [ west , east ] ) lats . extend ( [ north , south ] ) return utils . get_spherical_bounding_box ( lons , lats )
Compute bounding box for each surface element and then return the bounding box of all surface elements bounding boxes .
44,594
def _get_areas ( self ) : if self . areas is None : self . areas = [ ] for surf in self . surfaces : self . areas . append ( surf . get_area ( ) ) self . areas = numpy . array ( self . areas ) return self . areas
Return surface elements area values in a numpy array .
44,595
def _get_cartesian_edge_set ( self ) : edge_sets = numpy . vstack ( self . edge_set ) west , east , north , south = utils . get_spherical_bounding_box ( edge_sets [ : , 0 ] , edge_sets [ : , 1 ] ) self . proj = utils . OrthographicProjection ( west , east , north , south ) for edges in self . edge_set : px , py = self . proj ( edges [ : , 0 ] , edges [ : , 1 ] ) self . cartesian_endpoints . append ( numpy . array ( [ [ px [ 0 ] , py [ 0 ] , edges [ 0 , 2 ] ] , [ px [ - 1 ] , py [ - 1 ] , edges [ - 1 , 2 ] ] ] ) ) self . cartesian_edges . append ( numpy . column_stack ( [ px , py , edges [ : , 2 ] ] ) ) lengths = numpy . sqrt ( ( px [ : - 1 ] - px [ 1 : ] ) ** 2. + ( py [ : - 1 ] - py [ 1 : ] ) ** 2. ) self . length_set . append ( lengths ) self . cum_length_set . append ( numpy . hstack ( [ 0. , numpy . cumsum ( lengths ) ] ) ) return edge_sets
For the GC2 calculations a set of cartesian representations of the fault edges are needed . In this present case we use a common cartesian framework for all edges as opposed to defining a separate orthographic projection per edge
44,596
def _get_gc2_coordinates_for_rupture ( self , edge_sets ) : rup_gc2t , rup_gc2u = self . get_generalised_coordinates ( edge_sets [ : , 0 ] , edge_sets [ : , 1 ] ) self . gc_length = numpy . max ( rup_gc2u )
Calculates the GC2 coordinates for the nodes of the upper edge of the fault
44,597
def _get_ut_i ( self , seg , sx , sy ) : p0x , p0y , p1x , p1y = seg [ 0 , 0 ] , seg [ 0 , 1 ] , seg [ 1 , 0 ] , seg [ 1 , 1 ] t_i_vec = [ p1y - p0y , - ( p1x - p0x ) , 0.0 ] t_i_hat = t_i_vec / numpy . linalg . norm ( t_i_vec ) u_i_vec = [ p1x - p0x , p1y - p0y , 0.0 ] u_i_hat = u_i_vec / numpy . linalg . norm ( u_i_vec ) rsite = numpy . column_stack ( [ sx - p0x , sy - p0y ] ) return numpy . sum ( u_i_hat [ : - 1 ] * rsite , axis = 1 ) , numpy . sum ( t_i_hat [ : - 1 ] * rsite , axis = 1 )
Returns the U and T coordinate for a specific trace segment
44,598
def get_rx_distance ( self , mesh ) : if not self . tmp_mesh or ( self . tmp_mesh == mesh ) : self . gc2t , self . gc2u = self . get_generalised_coordinates ( mesh . lons , mesh . lats ) self . tmp_mesh = deepcopy ( mesh ) return self . gc2t
For each point determine the corresponding rx distance using the GC2 configuration .
44,599
def get_ry0_distance ( self , mesh ) : if not self . tmp_mesh or ( self . tmp_mesh == mesh ) : self . gc2t , self . gc2u = self . get_generalised_coordinates ( mesh . lons , mesh . lats ) self . tmp_mesh = deepcopy ( mesh ) ry0 = numpy . zeros_like ( self . gc2u , dtype = float ) neg_gc2u = self . gc2u < 0.0 ry0 [ neg_gc2u ] = numpy . fabs ( self . gc2u [ neg_gc2u ] ) pos_gc2u = self . gc2u >= self . gc_length ry0 [ pos_gc2u ] = self . gc2u [ pos_gc2u ] - self . gc_length return ry0
For each point determine the corresponding Ry0 distance using the GC2 configuration .