idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
26,400
def proc ( ctx , files ) : def calc_reader ( fn , verb ) : if verb : echo ( 'Reading: {:<60s}\r' . format ( fn ) , nl = False , err = True ) return ase . io . read ( fn ) action = ctx . parent . params [ 'action' ] systems = [ calc_reader ( calc , verbose ) for calc in files ] if verbose : echo ( '' , err = True ) if action == 'cij' : cij = elastic . get_elastic_tensor ( systems [ 0 ] , systems = systems [ 1 : ] ) msv = cij [ 1 ] [ 3 ] . max ( ) eps = 1e-4 if verbose : echo ( 'Cij solution\n' + 30 * '-' ) echo ( ' Solution rank: {:2d}{}' . format ( cij [ 1 ] [ 2 ] , ' (undetermined)' if cij [ 1 ] [ 2 ] < len ( cij [ 0 ] ) else '' ) ) if cij [ 1 ] [ 2 ] == len ( cij [ 0 ] ) : echo ( ' Square of residuals: {:7.2g}' . format ( cij [ 1 ] [ 1 ] ) ) echo ( ' Relative singular values:' ) for sv in cij [ 1 ] [ 3 ] / msv : echo ( '{:7.4f}{}' . format ( sv , '* ' if ( sv ) < eps else ' ' ) , nl = False ) echo ( '\n\nElastic tensor (GPa):' ) for dsc in elastic . elastic . get_cij_order ( systems [ 0 ] ) : echo ( '{: >7s} ' . format ( dsc ) , nl = False ) echo ( '\n' + 30 * '-' ) for c , sv in zip ( cij [ 0 ] , cij [ 1 ] [ 3 ] / msv ) : echo ( '{:7.2f}{}' . format ( c / ase . units . GPa , '* ' if sv < eps else ' ' ) , nl = False ) echo ( ) elif action == 'eos' : eos = elastic . get_BM_EOS ( systems [ 0 ] , systems = systems [ 1 : ] ) eos [ 1 ] /= ase . units . GPa if verbose : echo ( '# %7s (A^3) %7s (GPa) %7s' % ( "V0" , "B0" , "B0'" ) ) echo ( ' %7.2f %7.2f %7.2f' % tuple ( eos ) )
Process calculated structures
597
3
26,401
def save_model ( self , request , entry , form , change ) : context = RequestContext ( request ) try : content = render_placeholder ( entry . content_placeholder , context ) entry . content = content or '' except KeyError : # https://github.com/django-blog-zinnia/cmsplugin-zinnia/pull/61 entry . content = '' super ( EntryPlaceholderAdmin , self ) . save_model ( request , entry , form , change )
Fill the content field with the interpretation of the placeholder
105
10
26,402
def get_nodes ( self , request ) : nodes = [ ] archives = [ ] attributes = { 'hidden' : HIDE_ENTRY_MENU } for entry in Entry . published . all ( ) : year = entry . creation_date . strftime ( '%Y' ) month = entry . creation_date . strftime ( '%m' ) month_text = format ( entry . creation_date , 'b' ) . capitalize ( ) day = entry . creation_date . strftime ( '%d' ) key_archive_year = 'year-%s' % year key_archive_month = 'month-%s-%s' % ( year , month ) key_archive_day = 'day-%s-%s-%s' % ( year , month , day ) if key_archive_year not in archives : nodes . append ( NavigationNode ( year , reverse ( 'zinnia:entry_archive_year' , args = [ year ] ) , key_archive_year , attr = attributes ) ) archives . append ( key_archive_year ) if key_archive_month not in archives : nodes . append ( NavigationNode ( month_text , reverse ( 'zinnia:entry_archive_month' , args = [ year , month ] ) , key_archive_month , key_archive_year , attr = attributes ) ) archives . append ( key_archive_month ) if key_archive_day not in archives : nodes . append ( NavigationNode ( day , reverse ( 'zinnia:entry_archive_day' , args = [ year , month , day ] ) , key_archive_day , key_archive_month , attr = attributes ) ) archives . append ( key_archive_day ) nodes . append ( NavigationNode ( entry . title , entry . get_absolute_url ( ) , entry . pk , key_archive_day ) ) return nodes
Return menu s node for entries
416
6
26,403
def get_nodes ( self , request ) : nodes = [ ] nodes . append ( NavigationNode ( _ ( 'Categories' ) , reverse ( 'zinnia:category_list' ) , 'categories' ) ) for category in Category . objects . all ( ) : nodes . append ( NavigationNode ( category . title , category . get_absolute_url ( ) , category . pk , 'categories' ) ) return nodes
Return menu s node for categories
94
6
26,404
def get_nodes ( self , request ) : nodes = [ ] nodes . append ( NavigationNode ( _ ( 'Authors' ) , reverse ( 'zinnia:author_list' ) , 'authors' ) ) for author in Author . published . all ( ) : nodes . append ( NavigationNode ( author . __str__ ( ) , author . get_absolute_url ( ) , author . pk , 'authors' ) ) return nodes
Return menu s node for authors
96
6
26,405
def get_nodes ( self , request ) : nodes = [ ] nodes . append ( NavigationNode ( _ ( 'Tags' ) , reverse ( 'zinnia:tag_list' ) , 'tags' ) ) for tag in tags_published ( ) : nodes . append ( NavigationNode ( tag . name , reverse ( 'zinnia:tag_detail' , args = [ tag . name ] ) , tag . pk , 'tags' ) ) return nodes
Return menu s node for tags
100
6
26,406
def modify ( self , request , nodes , namespace , root_id , post_cut , breadcrumb ) : if breadcrumb : return nodes for node in nodes : if node . attr . get ( 'hidden' ) : node . visible = False return nodes
Modify nodes of a menu
56
6
26,407
def acquire_context ( self ) : frame = None request = None try : for f in inspect . stack ( ) [ 1 : ] : frame = f [ 0 ] args , varargs , keywords , alocals = inspect . getargvalues ( frame ) if not request and 'request' in args : request = alocals [ 'request' ] if 'context' in args : return alocals [ 'context' ] finally : del frame return RequestContext ( request )
Inspect the stack to acquire the current context used to render the placeholder . I m really sorry for this but if you have a better way you are welcome !
100
32
26,408
def get_boto_ses_connection ( ) : access_key_id = getattr ( settings , 'CUCUMBER_SES_ACCESS_KEY_ID' , getattr ( settings , 'AWS_ACCESS_KEY_ID' , None ) ) access_key = getattr ( settings , 'CUCUMBER_SES_SECRET_ACCESS_KEY' , getattr ( settings , 'AWS_SECRET_ACCESS_KEY' , None ) ) region_name = getattr ( settings , 'CUCUMBER_SES_REGION_NAME' , getattr ( settings , 'AWS_SES_REGION_NAME' , None ) ) if region_name != None : return boto . ses . connect_to_region ( region_name , aws_access_key_id = access_key_id , aws_secret_access_key = access_key , ) else : return boto . connect_ses ( aws_access_key_id = access_key_id , aws_secret_access_key = access_key , )
Shortcut for instantiating and returning a boto SESConnection object .
245
15
26,409
def run ( self , from_email , recipients , message ) : self . _open_ses_conn ( ) try : # We use the send_raw_email func here because the Django # EmailMessage object we got these values from constructs all of # the headers and such. self . connection . send_raw_email ( source = from_email , destinations = recipients , raw_message = dkim_sign ( message ) , ) except SESAddressBlacklistedError , exc : # Blacklisted users are those which delivery failed for in the # last 24 hours. They'll eventually be automatically removed from # the blacklist, but for now, this address is marked as # undeliverable to. logger . warning ( 'Attempted to email a blacklisted user: %s' % recipients , exc_info = exc , extra = { 'trace' : True } ) return False except SESDomainEndsWithDotError , exc : # Domains ending in a dot are simply invalid. logger . warning ( 'Invalid recipient, ending in dot: %s' % recipients , exc_info = exc , extra = { 'trace' : True } ) return False except SESLocalAddressCharacterError , exc : # Invalid character, usually in the sender "name". logger . warning ( 'Local address contains control or whitespace: %s' % recipients , exc_info = exc , extra = { 'trace' : True } ) return False except SESIllegalAddressError , exc : # A clearly mal-formed address. logger . warning ( 'Illegal address: %s' % recipients , exc_info = exc , extra = { 'trace' : True } ) return False except Exception , exc : # Something else happened that we haven't explicitly forbade # retry attempts for. #noinspection PyUnresolvedReferences logger . error ( 'Something went wrong; retrying: %s' % recipients , exc_info = exc , extra = { 'trace' : True } ) self . retry ( exc = exc ) else : logger . info ( 'An email has been successfully sent: %s' % recipients ) # We shouldn't ever block long enough to see this, but here it is # just in case (for debugging?). return True
This does the dirty work . Connects to Amazon SES via boto and fires off the message .
473
21
26,410
def handle ( self , * args , * * options ) : # AWS SES connection, which can be re-used for each query needed. conn = get_boto_ses_connection ( ) self . _print_quota ( conn ) self . _print_daily_stats ( conn )
Renders the output by piecing together a few methods that do the dirty work .
63
17
26,411
def _print_quota ( self , conn ) : quota = conn . get_send_quota ( ) quota = quota [ 'GetSendQuotaResponse' ] [ 'GetSendQuotaResult' ] print "--- SES Quota ---" print " 24 Hour Quota: %s" % quota [ 'Max24HourSend' ] print " Sent (Last 24 hours): %s" % quota [ 'SentLast24Hours' ] print " Max sending rate: %s/sec" % quota [ 'MaxSendRate' ]
Prints some basic quota statistics .
115
7
26,412
def sample ( self , bqm , init_solution = None , tenure = None , scale_factor = 1 , timeout = 20 , num_reads = 1 ) : # input checking and defaults calculation # TODO: one "read" per sample in init_solution sampleset if init_solution is not None : if not isinstance ( init_solution , dimod . SampleSet ) : raise TypeError ( "'init_solution' should be a 'dimod.SampleSet' instance" ) if len ( init_solution . record ) < 1 : raise ValueError ( "'init_solution' should contain at least one sample" ) if len ( init_solution . record [ 0 ] . sample ) != len ( bqm ) : raise ValueError ( "'init_solution' sample dimension different from BQM" ) init_sample = self . _bqm_sample_to_tabu_sample ( init_solution . change_vartype ( dimod . BINARY , inplace = False ) . record [ 0 ] . sample , bqm . binary ) else : init_sample = None if not bqm : return dimod . SampleSet . from_samples ( [ ] , energy = 0 , vartype = bqm . vartype ) if tenure is None : tenure = max ( min ( 20 , len ( bqm ) // 4 ) , 0 ) if not isinstance ( tenure , int ) : raise TypeError ( "'tenure' should be an integer in range [0, num_vars - 1]" ) if not 0 <= tenure < len ( bqm ) : raise ValueError ( "'tenure' should be an integer in range [0, num_vars - 1]" ) if not isinstance ( num_reads , int ) : raise TypeError ( "'num_reads' should be a positive integer" ) if num_reads < 1 : raise ValueError ( "'num_reads' should be a positive integer" ) qubo = self . _bqm_to_tabu_qubo ( bqm . binary ) # run Tabu search samples = [ ] energies = [ ] for _ in range ( num_reads ) : if init_sample is None : init_sample = self . _bqm_sample_to_tabu_sample ( self . _random_sample ( bqm . binary ) , bqm . binary ) r = TabuSearch ( qubo , init_sample , tenure , scale_factor , timeout ) sample = self . _tabu_sample_to_bqm_sample ( list ( r . bestSolution ( ) ) , bqm . binary ) energy = bqm . binary . energy ( sample ) samples . append ( sample ) energies . append ( energy ) response = dimod . SampleSet . from_samples ( samples , energy = energies , vartype = dimod . BINARY ) response . change_vartype ( bqm . vartype , inplace = True ) return response
Run a tabu search on a given binary quadratic model .
658
14
26,413
def upload_from_url ( self , url ) : self . check_token ( ) params = { 'fetchUrl' : url } r = requests . get ( FETCH_URL_ENDPOINT , params = params ) if r . status_code != 200 : raise GfycatClientError ( 'Error fetching the URL' , r . status_code ) response = r . json ( ) if 'error' in response : raise GfycatClientError ( response [ 'error' ] ) return response
Upload a GIF from a URL .
111
7
26,414
def upload_from_file ( self , filename ) : key = str ( uuid . uuid4 ( ) ) [ : 8 ] form = [ ( 'key' , key ) , ( 'acl' , ACL ) , ( 'AWSAccessKeyId' , AWS_ACCESS_KEY_ID ) , ( 'success_action_status' , SUCCESS_ACTION_STATUS ) , ( 'signature' , SIGNATURE ) , ( 'Content-Type' , CONTENT_TYPE ) , ( 'policy' , POLICY ) ] data = dict ( form ) files = { 'file' : open ( filename , 'rb' ) } r = requests . post ( FILE_UPLOAD_ENDPOINT , data = data , files = files ) if r . status_code != 200 : raise GfycatClientError ( 'Error uploading the GIF' , r . status_code ) info = self . uploaded_file_info ( key ) while 'timeout' in info . get ( 'error' , '' ) . lower ( ) : time . sleep ( 2 ) info = self . uploaded_file_info ( key ) if 'error' in info : raise GfycatClientError ( info [ 'error' ] ) return info
Upload a local file to Gfycat
268
8
26,415
def uploaded_file_info ( self , key ) : r = requests . get ( FILE_UPLOAD_STATUS_ENDPOINT + key ) if r . status_code != 200 : raise GfycatClientError ( 'Unable to check the status' , r . status_code ) return r . json ( )
Get information about an uploaded GIF .
71
7
26,416
def query_gfy ( self , gfyname ) : self . check_token ( ) r = requests . get ( QUERY_ENDPOINT + gfyname , headers = self . headers ) response = r . json ( ) if r . status_code != 200 and not ERROR_KEY in response : raise GfycatClientError ( 'Bad response from Gfycat' , r . status_code ) elif ERROR_KEY in response : raise GfycatClientError ( response [ ERROR_KEY ] , r . status_code ) return response
Query a gfy name for URLs and more information .
121
11
26,417
def check_link ( self , link ) : r = requests . get ( CHECK_LINK_ENDPOINT + link ) if r . status_code != 200 : raise GfycatClientError ( 'Unable to check the link' , r . status_code ) return r . json ( )
Check if a link has been already converted .
66
9
26,418
def get_token ( self ) : payload = { 'grant_type' : 'client_credentials' , 'client_id' : self . client_id , 'client_secret' : self . client_secret } r = requests . post ( OAUTH_ENDPOINT , data = json . dumps ( payload ) , headers = { 'content-type' : 'application/json' } ) response = r . json ( ) if r . status_code != 200 and not ERROR_KEY in response : raise GfycatClientError ( 'Error fetching the OAUTH URL' , r . status_code ) elif ERROR_KEY in response : raise GfycatClientError ( response [ ERROR_KEY ] , r . status_code ) self . token_type = response [ 'token_type' ] self . access_token = response [ 'access_token' ] self . expires_in = response [ 'expires_in' ] self . expires_at = time . time ( ) + self . expires_in - 5 self . headers = { 'content-type' : 'application/json' , 'Authorization' : self . token_type + ' ' + self . access_token }
Gets the authorization token
264
5
26,419
def dict ( self ) : metadata = { } properties = { } for name , prop in list ( self . properties . items ( ) ) : properties [ name ] = prop . dict metadata [ 'properties' ] = properties return metadata
dictionary representation of the metadata .
48
7
26,420
def json ( self ) : json_dumps = json . dumps ( self . dict , indent = 2 , sort_keys = True , separators = ( ',' , ': ' ) , cls = MetadataEncoder ) if not json_dumps . endswith ( '\n' ) : json_dumps += '\n' return json_dumps
json representation of the metadata .
80
6
26,421
def _read_json_file ( self ) : with open ( self . json_uri ) as metadata_file : try : metadata = json . load ( metadata_file ) return metadata except ValueError : message = tr ( 'the file %s does not appear to be valid JSON' ) message = message % self . json_uri raise MetadataReadError ( message )
read metadata from a json file .
78
7
26,422
def _read_json_db ( self ) : try : metadata_str = self . db_io . read_metadata_from_uri ( self . layer_uri , 'json' ) except HashNotFoundError : return { } try : metadata = json . loads ( metadata_str ) return metadata except ValueError : message = tr ( 'the file DB entry for %s does not appear to be ' 'valid JSON' ) message %= self . layer_uri raise MetadataReadError ( message )
read metadata from a json string stored in a DB .
107
11
26,423
def _read_xml_file ( self ) : # this raises a IOError if the file doesn't exist root = ElementTree . parse ( self . xml_uri ) root . getroot ( ) return root
read metadata from an xml file .
44
7
26,424
def _read_xml_db ( self ) : try : metadata_str = self . db_io . read_metadata_from_uri ( self . layer_uri , 'xml' ) root = ElementTree . fromstring ( metadata_str ) return root except HashNotFoundError : return None
read metadata from an xml string stored in a DB .
63
11
26,425
def set ( self , name , value , xml_path ) : xml_type = xml_path . split ( '/' ) [ - 1 ] # check if the desired type is supported try : property_class = TYPE_CONVERSIONS [ xml_type ] except KeyError : raise KeyError ( 'The xml type %s is not supported yet' % xml_type ) try : metadata_property = property_class ( name , value , xml_path ) self . _properties [ name ] = metadata_property self . set_last_update_to_now ( ) except TypeError : if self . reading_ancillary_files : # we are parsing files so we want to accept as much as # possible without raising exceptions pass else : raise
Create a new metadata property .
157
6
26,426
def write_to_file ( self , destination_path ) : file_format = os . path . splitext ( destination_path ) [ 1 ] [ 1 : ] metadata = self . get_writable_metadata ( file_format ) with open ( destination_path , 'w' , encoding = 'utf-8' ) as f : f . write ( metadata ) return metadata
Writes the metadata json or xml to a file .
82
11
26,427
def get_writable_metadata ( self , file_format ) : if file_format == 'json' : metadata = self . json elif file_format == 'xml' : metadata = self . xml else : raise TypeError ( 'The requested file type (%s) is not yet supported' % file_format ) return metadata
Convert the metadata to a writable form .
70
10
26,428
def read_from_ancillary_file ( self , custom_xml = None ) : if custom_xml and os . path . isfile ( self . xml_uri ) : self . read_xml ( ) else : if not self . read_json ( ) : self . read_xml ( )
try to read xml and json from existing files or db .
64
12
26,429
def update_from_dict ( self , keywords ) : for key , value in list ( keywords . items ( ) ) : setattr ( self , key , value )
Set properties of metadata using key and value from keywords
35
10
26,430
def accept ( self ) : # set parameter from dialog input_layer = self . layer . currentLayer ( ) output_path = self . output_form . text ( ) radius = self . get_classification ( ) # monkey patch keywords so layer works on multi buffering function input_layer . keywords = { 'inasafe_fields' : { } } # run multi buffering self . output_layer = multi_buffering ( input_layer , radius ) # save output layer to data store and check whether user # provide the output path. if output_path : self . output_directory , self . output_filename = ( os . path . split ( output_path ) ) self . output_filename , self . output_extension = ( os . path . splitext ( self . output_filename ) ) # if user do not provide the output path, create a temporary file. else : self . output_directory = temp_dir ( sub_dir = 'work' ) self . output_filename = ( unique_filename ( prefix = 'hazard_layer' , suffix = '.geojson' , dir = self . output_directory ) ) self . output_filename = os . path . split ( self . output_filename ) [ 1 ] self . output_filename , self . output_extension = ( os . path . splitext ( self . output_filename ) ) self . data_store = Folder ( self . output_directory ) if self . output_extension == '.shp' : self . data_store . default_vector_format = 'shp' elif self . output_extension == '.geojson' : self . data_store . default_vector_format = 'geojson' self . data_store . add_layer ( self . output_layer , self . output_filename ) # add output layer to map canvas self . output_layer = self . data_store . layer ( self . output_filename ) QgsProject . instance ( ) . addMapLayers ( [ self . output_layer ] ) self . iface . setActiveLayer ( self . output_layer ) self . iface . zoomToActiveLayer ( ) self . done ( QtWidgets . QDialog . Accepted ) if self . keyword_wizard_checkbox . isChecked ( ) : self . launch_keyword_wizard ( )
Process the layer for multi buffering and generate a new layer .
507
13
26,431
def on_directory_button_tool_clicked ( self ) : # noinspection PyCallByClass,PyTypeChecker # set up parameter from dialog input_path = self . layer . currentLayer ( ) . source ( ) input_directory , self . output_filename = os . path . split ( input_path ) file_extension = os . path . splitext ( self . output_filename ) [ 1 ] self . output_filename = os . path . splitext ( self . output_filename ) [ 0 ] # show Qt file directory dialog output_path , __ = QtWidgets . QFileDialog . getSaveFileName ( self , self . tr ( 'Output file' ) , '%s_multi_buffer%s' % ( os . path . join ( input_directory , self . output_filename ) , file_extension ) , 'GeoJSON (*.geojson);;Shapefile (*.shp)' ) # set selected path to the dialog self . output_form . setText ( output_path )
Autoconnect slot activated when directory button is clicked .
227
11
26,432
def get_output_from_input ( self ) : input_path = self . layer . currentLayer ( ) . source ( ) output_path = ( os . path . splitext ( input_path ) [ 0 ] + '_multi_buffer' + os . path . splitext ( input_path ) [ 1 ] ) self . output_form . setText ( output_path )
Populate output form with default output path based on input layer .
85
13
26,433
def populate_hazard_classification ( self ) : new_class = { 'value' : self . radius_form . value ( ) , 'name' : self . class_form . text ( ) } self . classification . append ( new_class ) self . classification = sorted ( self . classification , key = itemgetter ( 'value' ) ) self . hazard_class_form . clear ( ) for item in self . classification : new_item = '{value} - {name}' . format ( value = item [ 'value' ] , name = item [ 'name' ] ) self . hazard_class_form . addItem ( new_item ) self . radius_form . setValue ( 0 ) self . class_form . clear ( ) self . ok_button_status ( )
Populate hazard classification on hazard class form .
171
9
26,434
def remove_selected_classification ( self ) : removed_classes = self . hazard_class_form . selectedItems ( ) current_item = self . hazard_class_form . currentItem ( ) removed_index = self . hazard_class_form . indexFromItem ( current_item ) del self . classification [ removed_index . row ( ) ] for item in removed_classes : self . hazard_class_form . takeItem ( self . hazard_class_form . row ( item ) )
Remove selected item on hazard class form .
107
8
26,435
def get_classification ( self ) : classification_dictionary = { } for item in self . classification : classification_dictionary [ item [ 'value' ] ] = item [ 'name' ] classification_dictionary = OrderedDict ( sorted ( classification_dictionary . items ( ) ) ) return classification_dictionary
Get all hazard class created by user .
69
8
26,436
def directory_button_status ( self ) : if self . layer . currentLayer ( ) : self . directory_button . setEnabled ( True ) else : self . directory_button . setEnabled ( False )
Function to enable or disable directory button .
44
8
26,437
def add_class_button_status ( self ) : if self . class_form . text ( ) and self . radius_form . value ( ) >= 0 : self . add_class_button . setEnabled ( True ) else : self . add_class_button . setEnabled ( False )
Function to enable or disable add class button .
63
9
26,438
def ok_button_status ( self ) : if not self . layer . currentLayer ( ) : self . button_box . button ( QtWidgets . QDialogButtonBox . Ok ) . setEnabled ( False ) elif ( self . hazard_class_form . count ( ) > 0 and self . layer . currentLayer ( ) . name ( ) and len ( self . output_form . text ( ) ) >= 0 ) : self . button_box . button ( QtWidgets . QDialogButtonBox . Ok ) . setEnabled ( True ) else : self . button_box . button ( QtWidgets . QDialogButtonBox . Ok ) . setEnabled ( False )
Function to enable or disable OK button .
147
8
26,439
def help_toggled ( self , flag ) : if flag : self . help_button . setText ( self . tr ( 'Hide Help' ) ) self . show_help ( ) else : self . help_button . setText ( self . tr ( 'Show Help' ) ) self . hide_help ( )
Show or hide the help tab in the stacked widget .
69
11
26,440
def convert_mmi_data ( grid_xml_path , title , source , output_path = None , algorithm = None , algorithm_filename_flag = True , smoothing_method = NONE_SMOOTHING , smooth_sigma = 0.9 , extra_keywords = None ) : LOGGER . debug ( grid_xml_path ) LOGGER . debug ( output_path ) if output_path is not None : output_dir , output_basename = os . path . split ( output_path ) output_basename , _ = os . path . splitext ( output_basename ) LOGGER . debug ( 'output_dir : ' + output_dir + 'output_basename : ' + output_basename ) else : output_dir = output_path output_basename = None converter = ShakeGrid ( title , source , grid_xml_path , output_dir = output_dir , output_basename = output_basename , algorithm_filename_flag = algorithm_filename_flag , smoothing_method = smoothing_method , smooth_sigma = smooth_sigma , extra_keywords = extra_keywords ) return converter . mmi_to_raster ( force_flag = True , algorithm = algorithm )
Convenience function to convert a single file .
273
10
26,441
def extract_date_time ( self , the_time_stamp ) : date_tokens = the_time_stamp [ 0 : 10 ] . split ( '-' ) self . year = int ( date_tokens [ 0 ] ) self . month = int ( date_tokens [ 1 ] ) self . day = int ( date_tokens [ 2 ] ) time_tokens = the_time_stamp [ 11 : 19 ] . split ( ':' ) self . hour = int ( time_tokens [ 0 ] ) self . minute = int ( time_tokens [ 1 ] ) self . second = int ( time_tokens [ 2 ] ) # right now only handles Indonesian Timezones tz_dict = { 'WIB' : 'Asia/Jakarta' , 'WITA' : 'Asia/Makassar' , 'WIT' : 'Asia/Jayapura' } if self . time_zone in tz_dict : self . time_zone = tz_dict . get ( self . time_zone , self . time_zone ) # noinspection PyBroadException try : if not self . time_zone : # default to utc if empty tzinfo = pytz . utc else : tzinfo = timezone ( self . time_zone ) except BaseException : tzinfo = pytz . utc self . time = datetime ( self . year , self . month , self . day , self . hour , self . minute , self . second ) # For now realtime always uses Western Indonesia Time self . time = tzinfo . localize ( self . time )
Extract the parts of a date given a timestamp as per below example .
363
15
26,442
def grid_file_path ( self ) : if os . path . isfile ( self . grid_xml_path ) : return self . grid_xml_path else : raise GridXmlFileNotFoundError
Validate that grid file path points to a file .
45
11
26,443
def mmi_to_delimited_text ( self ) : delimited_text = 'lon,lat,mmi\n' for row in self . mmi_data : delimited_text += '%s,%s,%s\n' % ( row [ 0 ] , row [ 1 ] , row [ 2 ] ) return delimited_text
Return the mmi data as a delimited test string .
79
12
26,444
def mmi_to_delimited_file ( self , force_flag = True ) : LOGGER . debug ( 'mmi_to_delimited_text requested.' ) csv_path = os . path . join ( self . output_dir , 'mmi.csv' ) # short circuit if the csv is already created. if os . path . exists ( csv_path ) and force_flag is not True : return csv_path csv_file = open ( csv_path , 'w' ) csv_file . write ( self . mmi_to_delimited_text ( ) ) csv_file . close ( ) # Also write the .csvt which contains metadata about field types csvt_path = os . path . join ( self . output_dir , self . output_basename + '.csvt' ) csvt_file = open ( csvt_path , 'w' ) csvt_file . write ( '"Real","Real","Real"' ) csvt_file . close ( ) return csv_path
Save mmi_data to delimited text file suitable for gdal_grid .
238
17
26,445
def mmi_to_vrt ( self , force_flag = True ) : # Ensure the delimited mmi file exists LOGGER . debug ( 'mmi_to_vrt requested.' ) vrt_path = os . path . join ( self . output_dir , self . output_basename + '.vrt' ) # short circuit if the vrt is already created. if os . path . exists ( vrt_path ) and force_flag is not True : return vrt_path csv_path = self . mmi_to_delimited_file ( True ) vrt_string = ( '<OGRVRTDataSource>' ' <OGRVRTLayer name="mmi">' ' <SrcDataSource>%s</SrcDataSource>' ' <GeometryType>wkbPoint</GeometryType>' ' <GeometryField encoding="PointFromColumns"' ' x="lon" y="lat" z="mmi"/>' ' </OGRVRTLayer>' '</OGRVRTDataSource>' % csv_path ) with codecs . open ( vrt_path , 'w' , encoding = 'utf-8' ) as f : f . write ( vrt_string ) return vrt_path
Save the mmi_data to an ogr vrt text file .
276
15
26,446
def _run_command ( self , command ) : try : my_result = call ( command , shell = True ) del my_result except CalledProcessError as e : LOGGER . exception ( 'Running command failed %s' % command ) message = ( 'Error while executing the following shell ' 'command: %s\nError message: %s' % ( command , str ( e ) ) ) # shameless hack - see https://github.com/AIFDR/inasafe/issues/141 if sys . platform == 'darwin' : # Mac OS X if 'Errno 4' in str ( e ) : # continue as the error seems to be non critical pass else : raise Exception ( message ) else : raise Exception ( message )
Run a command and raise any error as needed .
158
10
26,447
def mmi_to_shapefile ( self , force_flag = False ) : LOGGER . debug ( 'mmi_to_shapefile requested.' ) shp_path = os . path . join ( self . output_dir , '%s-points.shp' % self . output_basename ) # Short circuit if the tif is already created. if os . path . exists ( shp_path ) and force_flag is not True : return shp_path # Ensure the vrt mmi file exists (it will generate csv too if needed) vrt_path = self . mmi_to_vrt ( force_flag ) # now generate the tif using default interpolation options binary_list = which ( 'ogr2ogr' ) LOGGER . debug ( 'Path for ogr2ogr: %s' % binary_list ) if len ( binary_list ) < 1 : raise CallGDALError ( tr ( 'ogr2ogr could not be found on your computer' ) ) # Use the first matching gdalwarp found binary = binary_list [ 0 ] command = ( ( '%(ogr2ogr)s -overwrite -select mmi -a_srs EPSG:4326 ' '%(shp)s %(vrt)s mmi' ) % { 'ogr2ogr' : binary , 'shp' : shp_path , 'vrt' : vrt_path } ) LOGGER . info ( 'Created this ogr command:\n%s' % command ) # Now run ogr2ogr ... # noinspection PyProtectedMember self . _run_command ( command ) # Lastly copy over the standard qml (QGIS Style file) for the mmi.tif qml_path = os . path . join ( self . output_dir , '%s-points.qml' % self . output_basename ) source_qml = resources_path ( 'converter_data' , 'mmi-shape.qml' ) shutil . copyfile ( source_qml , qml_path ) return shp_path
Convert grid . xml s mmi column to a vector shp file using ogr2ogr .
471
22
26,448
def mmi_to_ascii ( self , force_flag = False ) : ascii_path = os . path . join ( self . output_dir , '%s.asc' % self . output_basename ) # Short circuit if the tif is already created. if os . path . exists ( ascii_path ) and force_flag is not True : return ascii_path cell_size = ( self . x_maximum - self . x_minimum ) / ( self . rows - 1 ) asc_file = open ( ascii_path , 'w' ) asc_file . write ( 'ncols %d\n' % self . columns ) asc_file . write ( 'nrows %d\n' % self . rows ) asc_file . write ( 'xllcorner %.3f\n' % self . x_minimum ) asc_file . write ( 'yllcorner %.3f\n' % self . y_minimum ) asc_file . write ( 'cellsize %.3f\n' % cell_size ) asc_file . write ( 'nodata_value -9999\n' ) cell_string = '' cell_values = np . reshape ( [ v [ 2 ] for v in self . mmi_data ] , ( self . rows , self . columns ) ) for i in range ( self . rows ) : for j in range ( self . columns ) : cell_string += '%.3f ' % cell_values [ i ] [ j ] cell_string += '\n' asc_file . write ( cell_string ) asc_file . close ( ) return ascii_path
Convert grid . xml mmi column to a ascii raster file .
367
17
26,449
def set_widgets ( self ) : if self . parent . aggregation_layer : aggr = self . parent . aggregation_layer . name ( ) else : aggr = self . tr ( 'no aggregation' ) html = self . tr ( 'Please ensure the following information ' 'is correct and press Run.' ) # TODO: update this to use InaSAFE message API rather... html += '<br/><table cellspacing="4">' html += ( '<tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td><b>%s</b></td><td></td><td>%s</td>' '</tr><tr>' ' <td colspan="3"></td>' '</tr>' % ( self . tr ( 'hazard layer' ) . capitalize ( ) . replace ( ' ' , '&nbsp;' ) , self . parent . hazard_layer . name ( ) , self . tr ( 'exposure layer' ) . capitalize ( ) . replace ( ' ' , '&nbsp;' ) , self . parent . exposure_layer . name ( ) , self . tr ( 'aggregation layer' ) . capitalize ( ) . replace ( ' ' , '&nbsp;' ) , aggr ) ) self . lblSummary . setText ( html )
Set widgets on the Summary tab .
342
7
26,450
def inasafe_analysis_summary_field_value ( field , feature , parent ) : _ = feature , parent # NOQA project_context_scope = QgsExpressionContextUtils . projectScope ( QgsProject . instance ( ) ) registry = QgsProject . instance ( ) key = provenance_layer_analysis_impacted_id [ 'provenance_key' ] if not project_context_scope . hasVariable ( key ) : return None layer = registry . mapLayer ( project_context_scope . variable ( key ) ) if not layer : return None index = layer . fields ( ) . lookupField ( field ) if index < 0 : return None feature = next ( layer . getFeatures ( ) ) return feature [ index ]
Retrieve a value from a field in the analysis summary layer .
160
13
26,451
def inasafe_sub_analysis_summary_field_value ( exposure_key , field , feature , parent ) : _ = feature , parent # NOQA project_context_scope = QgsExpressionContextUtils . projectScope ( QgsProject . instance ( ) ) project = QgsProject . instance ( ) key = ( '{provenance}__{exposure}' ) . format ( provenance = provenance_multi_exposure_analysis_summary_layers_id [ 'provenance_key' ] , exposure = exposure_key ) if not project_context_scope . hasVariable ( key ) : return None analysis_summary_layer = project . mapLayer ( project_context_scope . variable ( key ) ) if not analysis_summary_layer : return None index = analysis_summary_layer . fields ( ) . lookupField ( field ) if index < 0 : return None feature = next ( analysis_summary_layer . getFeatures ( ) ) return feature [ index ]
Retrieve a value from field in the specified exposure analysis layer .
213
13
26,452
def inasafe_exposure_summary_field_values ( field , feature , parent ) : _ = feature , parent # NOQA layer = exposure_summary_layer ( ) if not layer : return None index = layer . fields ( ) . lookupField ( field ) if index < 0 : return None values = [ ] for feat in layer . getFeatures ( ) : value = feat [ index ] values . append ( value ) return str ( values )
Retrieve all values from a field in the exposure summary layer .
96
13
26,453
def inasafe_place_value_name ( number , feature , parent ) : _ = feature , parent # NOQA if number is None : return None rounded_number = round_affected_number ( number , use_rounding = True , use_population_rounding = True ) value , unit = denomination ( rounded_number , 1000 ) if not unit : return None else : return unit [ 'name' ]
Given a number it will return the place value name .
89
11
26,454
def inasafe_place_value_coefficient ( number , feature , parent ) : _ = feature , parent # NOQA if number >= 0 : rounded_number = round_affected_number ( number , use_rounding = True , use_population_rounding = True ) min_number = 1000 value , unit = denomination ( rounded_number , min_number ) if number < min_number : rounded_number = int ( round ( value , 1 ) ) else : rounded_number = round ( value , 1 ) return str ( rounded_number ) else : return None
Given a number it will return the coefficient of the place value name .
123
14
26,455
def inasafe_place_value_percentage ( number , total , feature , parent ) : _ = feature , parent # NOQA if number < 0 : return None percentage_format = '{percentage}%' percentage = round ( ( float ( number ) / float ( total ) ) * 100 , 1 ) return percentage_format . format ( percentage = percentage )
Given a number and total it will return the percentage of the number to the total .
79
17
26,456
def beautify_date ( inasafe_time , feature , parent ) : _ = feature , parent # NOQA datetime_object = parse ( inasafe_time ) date = datetime_object . strftime ( '%Y-%m-%d' ) return date
Given an InaSAFE analysis time it will convert it to a date with year - month - date format .
62
23
26,457
def hazard_extra_keyword ( keyword , feature , parent ) : _ = feature , parent # NOQA hazard_layer_path = QgsExpressionContextUtils . projectScope ( QgsProject . instance ( ) ) . variable ( 'hazard_layer' ) hazard_layer = load_layer ( hazard_layer_path ) [ 0 ] keywords = KeywordIO . read_keywords ( hazard_layer ) extra_keywords = keywords . get ( 'extra_keywords' ) if extra_keywords : value = extra_keywords . get ( keyword ) if value : value_definition = definition ( value ) if value_definition : return value_definition [ 'name' ] return value else : return tr ( 'Keyword %s is not found' % keyword ) return tr ( 'No extra keywords found' )
Given a keyword it will return the value of the keyword from the hazard layer s extra keywords .
177
19
26,458
def to_html ( self ) : if self . items is None : return else : html = '<ol%s>\n' % self . html_attributes ( ) for item in self . items : html += '<li>%s</li>\n' % item . to_html ( ) html += '</ol>' return html
Render a Text MessageElement as html
76
7
26,459
def _check_value_mapping ( layer , exposure_key = None ) : index = layer . fields ( ) . lookupField ( exposure_type_field [ 'field_name' ] ) unique_exposure = layer . uniqueValues ( index ) if layer . keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] : if not exposure_key : message = tr ( 'Hazard value mapping missing exposure key.' ) raise InvalidKeywordsForProcessingAlgorithm ( message ) value_map = active_thresholds_value_maps ( layer . keywords , exposure_key ) else : value_map = layer . keywords . get ( 'value_map' ) if not value_map : # The exposure do not have a value_map, we can skip the layer. return layer if layer . keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] : if not exposure_key : message = tr ( 'Hazard classification is missing exposure key.' ) raise InvalidKeywordsForProcessingAlgorithm ( message ) classification = active_classification ( layer . keywords , exposure_key ) else : classification = layer . keywords [ 'classification' ] exposure_classification = definition ( classification ) other = None if exposure_classification [ 'key' ] != data_driven_classes [ 'key' ] : other = exposure_classification [ 'classes' ] [ - 1 ] [ 'key' ] exposure_mapped = [ ] for group in list ( value_map . values ( ) ) : exposure_mapped . extend ( group ) diff = list ( unique_exposure - set ( exposure_mapped ) ) if other in list ( value_map . keys ( ) ) : value_map [ other ] . extend ( diff ) else : value_map [ other ] = diff layer . keywords [ 'value_map' ] = value_map layer . keywords [ 'classification' ] = classification return layer
Loop over the exposure type field and check if the value map is correct .
420
15
26,460
def clean_inasafe_fields ( layer ) : fields = [ ] # Exposure if layer . keywords [ 'layer_purpose' ] == layer_purpose_exposure [ 'key' ] : fields = get_fields ( layer . keywords [ 'layer_purpose' ] , layer . keywords [ 'exposure' ] ) # Hazard elif layer . keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] : fields = get_fields ( layer . keywords [ 'layer_purpose' ] , layer . keywords [ 'hazard' ] ) # Aggregation elif layer . keywords [ 'layer_purpose' ] == layer_purpose_aggregation [ 'key' ] : fields = get_fields ( layer . keywords [ 'layer_purpose' ] ) # Add displaced_field definition to expected_fields # for minimum needs calculator. # If there is no displaced_field keyword, then pass try : if layer . keywords [ 'inasafe_fields' ] [ displaced_field [ 'key' ] ] : fields . append ( displaced_field ) except KeyError : pass expected_fields = { field [ 'key' ] : field [ 'field_name' ] for field in fields } # Convert the field name and sum up if needed new_keywords = { } for key , val in list ( layer . keywords . get ( 'inasafe_fields' ) . items ( ) ) : if key in expected_fields : if isinstance ( val , str ) : val = [ val ] sum_fields ( layer , key , val ) new_keywords [ key ] = expected_fields [ key ] # Houra, InaSAFE keywords match our concepts ! layer . keywords [ 'inasafe_fields' ] . update ( new_keywords ) to_remove = [ ] # Remove unnecessary fields (the one that is not in the inasafe_fields) for field in layer . fields ( ) . toList ( ) : if field . name ( ) not in list ( layer . keywords [ 'inasafe_fields' ] . values ( ) ) : to_remove . append ( field . name ( ) ) remove_fields ( layer , to_remove ) LOGGER . debug ( 'Fields which have been removed from %s : %s' % ( layer . keywords [ 'layer_purpose' ] , ' ' . join ( to_remove ) ) )
Clean inasafe_fields based on keywords .
506
10
26,461
def _size_is_needed ( layer ) : exposure = layer . keywords . get ( 'exposure' ) if not exposure : # The layer is not an exposure. return False indivisible_exposure_keys = [ f [ 'key' ] for f in indivisible_exposure ] if exposure in indivisible_exposure_keys : # The exposure is not divisible, We don't need to compute the size. return False if layer . geometryType ( ) == QgsWkbTypes . PointGeometry : # The exposure is a point layer. We don't need to compute the size. return False # The layer is divisible and not a point layer. # We need to check if some fields are absolute. fields = layer . keywords [ 'inasafe_fields' ] absolute_field_keys = [ f [ 'key' ] for f in count_fields ] for field in fields : if field in absolute_field_keys : return True else : return False
Checker if we need the size field .
208
9
26,462
def _remove_features ( layer ) : # Get the layer purpose of the layer. layer_purpose = layer . keywords [ 'layer_purpose' ] layer_subcategory = layer . keywords . get ( layer_purpose ) compulsory_field = get_compulsory_fields ( layer_purpose , layer_subcategory ) inasafe_fields = layer . keywords [ 'inasafe_fields' ] # Compulsory fields can be list of field name or single field name. # We need to iterate through all of them field_names = inasafe_fields . get ( compulsory_field [ 'key' ] ) if not isinstance ( field_names , list ) : field_names = [ field_names ] for field_name in field_names : if not field_name : message = 'Keyword %s is missing from %s' % ( compulsory_field [ 'key' ] , layer_purpose ) raise InvalidKeywordsForProcessingAlgorithm ( message ) index = layer . fields ( ) . lookupField ( field_name ) request = QgsFeatureRequest ( ) request . setSubsetOfAttributes ( [ field_name ] , layer . fields ( ) ) layer . startEditing ( ) i = 0 for feature in layer . getFeatures ( request ) : feat_attr = feature . attributes ( ) [ index ] if ( feat_attr is None or ( hasattr ( feat_attr , 'isNull' ) and feat_attr . isNull ( ) ) ) : if layer_purpose == 'hazard' : # Remove the feature if the hazard is null. layer . deleteFeature ( feature . id ( ) ) i += 1 elif layer_purpose == 'aggregation' : # Put the ID if the value is null. layer . changeAttributeValue ( feature . id ( ) , index , str ( feature . id ( ) ) ) elif layer_purpose == 'exposure' : # Put an empty value, the value mapping will take care of # it in the 'other' group. layer . changeAttributeValue ( feature . id ( ) , index , '' ) # Check if there is en empty geometry. geometry = feature . geometry ( ) if not geometry : layer . deleteFeature ( feature . id ( ) ) i += 1 continue # Check if the geometry is empty. if geometry . isEmpty ( ) : layer . deleteFeature ( feature . id ( ) ) i += 1 continue # Check if the geometry is valid. if not geometry . isGeosValid ( ) : # polygonize can produce some invalid geometries # For instance a polygon like this, sharing a same point : # _______ # | ___|__ # | |__| | # |________| # layer.deleteFeature(feature.id()) # i += 1 pass # TODO We need to add more tests # like checking if the value is in the value_mapping. layer . commitChanges ( ) if i : LOGGER . critical ( 'Features which have been removed from %s : %s' % ( layer . keywords [ 'layer_purpose' ] , i ) ) else : LOGGER . info ( 'No feature has been removed from %s during the vector layer ' 'preparation' % layer . keywords [ 'layer_purpose' ] )
Remove features which do not have information for InaSAFE or an invalid geometry .
690
17
26,463
def _add_id_column ( layer ) : layer_purpose = layer . keywords [ 'layer_purpose' ] mapping = { layer_purpose_exposure [ 'key' ] : exposure_id_field , layer_purpose_hazard [ 'key' ] : hazard_id_field , layer_purpose_aggregation [ 'key' ] : aggregation_id_field } has_id_column = False for layer_type , field in list ( mapping . items ( ) ) : if layer_purpose == layer_type : safe_id = field if layer . keywords [ 'inasafe_fields' ] . get ( field [ 'key' ] ) : has_id_column = True break if not has_id_column : LOGGER . info ( 'We add an ID column in {purpose}' . format ( purpose = layer_purpose ) ) layer . startEditing ( ) id_field = QgsField ( ) id_field . setName ( safe_id [ 'field_name' ] ) if isinstance ( safe_id [ 'type' ] , list ) : # Use the first element in the list of type id_field . setType ( safe_id [ 'type' ] [ 0 ] ) else : id_field . setType ( safe_id [ 'type' ] [ 0 ] ) id_field . setPrecision ( safe_id [ 'precision' ] ) id_field . setLength ( safe_id [ 'length' ] ) layer . addAttribute ( id_field ) new_index = layer . fields ( ) . lookupField ( id_field . name ( ) ) for feature in layer . getFeatures ( ) : layer . changeAttributeValue ( feature . id ( ) , new_index , feature . id ( ) ) layer . commitChanges ( ) layer . keywords [ 'inasafe_fields' ] [ safe_id [ 'key' ] ] = ( safe_id [ 'field_name' ] )
Add an ID column if it s not present in the attribute table .
419
14
26,464
def _add_default_exposure_class ( layer ) : layer . startEditing ( ) field = create_field_from_definition ( exposure_class_field ) layer . keywords [ 'inasafe_fields' ] [ exposure_class_field [ 'key' ] ] = ( exposure_class_field [ 'field_name' ] ) layer . addAttribute ( field ) index = layer . fields ( ) . lookupField ( exposure_class_field [ 'field_name' ] ) exposure = layer . keywords [ 'exposure' ] request = QgsFeatureRequest ( ) request . setFlags ( QgsFeatureRequest . NoGeometry ) for feature in layer . getFeatures ( request ) : layer . changeAttributeValue ( feature . id ( ) , index , exposure ) layer . commitChanges ( ) return
The layer doesn t have an exposure class we need to add it .
173
14
26,465
def sum_fields ( layer , output_field_key , input_fields ) : field_definition = definition ( output_field_key ) output_field_name = field_definition [ 'field_name' ] # If the fields only has one element if len ( input_fields ) == 1 : # Name is different, copy it if input_fields [ 0 ] != output_field_name : to_rename = { input_fields [ 0 ] : output_field_name } # We copy only, it will be deleted later. # We can't rename the field, we need to copy it as the same # field might be used many times in the FMT tool. copy_fields ( layer , to_rename ) else : # Name is same, do nothing return else : # Creating expression # Put field name in a double quote. See #4248 input_fields = [ '"%s"' % f for f in input_fields ] string_expression = ' + ' . join ( input_fields ) sum_expression = QgsExpression ( string_expression ) context = QgsExpressionContext ( ) context . setFields ( layer . fields ( ) ) sum_expression . prepare ( context ) # Get the output field index output_idx = layer . fields ( ) . lookupField ( output_field_name ) # Output index is not found layer . startEditing ( ) if output_idx == - 1 : output_field = create_field_from_definition ( field_definition ) layer . addAttribute ( output_field ) output_idx = layer . fields ( ) . lookupField ( output_field_name ) # Iterate to all features for feature in layer . getFeatures ( ) : context . setFeature ( feature ) result = sum_expression . evaluate ( context ) feature [ output_idx ] = result layer . updateFeature ( feature ) layer . commitChanges ( )
Sum the value of input_fields and put it as output_field .
406
15
26,466
def get_needs_provenance ( parameters ) : if 'minimum needs' not in parameters : return None needs = parameters [ 'minimum needs' ] provenance = [ p for p in needs if p . name == tr ( 'Provenance' ) ] if provenance : return provenance [ 0 ] return None
Get the provenance of minimum needs .
66
8
26,467
def load ( self ) : self . minimum_needs = self . settings . value ( 'MinimumNeeds' ) if not self . minimum_needs or self . minimum_needs == '' : # Load the most relevant minimum needs # If more than one profile exists, just use defaults so # the user doesn't get confused. profiles = self . get_profiles ( ) if len ( profiles ) == 1 : profile = self . get_profiles ( ) [ 0 ] self . load_profile ( profile ) else : self . minimum_needs = self . _defaults ( )
Load the minimum needs .
121
5
26,468
def load_profile ( self , profile ) : profile_path = os . path . join ( self . root_directory , 'minimum_needs' , profile + '.json' ) self . read_from_file ( profile_path )
Load a specific profile into the current minimum needs .
50
10
26,469
def save_profile ( self , profile ) : profile = profile . replace ( '.json' , '' ) profile_path = os . path . join ( self . root_directory , 'minimum_needs' , profile + '.json' ) self . write_to_file ( profile_path )
Save the current minimum needs into a new profile .
62
10
26,470
def get_profiles ( self , overwrite = False ) : def sort_by_locale ( unsorted_profiles , locale ) : """Sort the profiles by language settings. The profiles that are in the same language as the QGIS' locale will be sorted out first. :param unsorted_profiles: The user profiles profiles :type unsorted_profiles: list :param locale: The language settings string :type locale: str :returns: Ordered profiles :rtype: list """ if locale is None : return unsorted_profiles locale = '_%s' % locale [ : 2 ] profiles_our_locale = [ ] profiles_remaining = [ ] for profile_name in unsorted_profiles : if locale in profile_name : profiles_our_locale . append ( profile_name ) else : profiles_remaining . append ( profile_name ) return profiles_our_locale + profiles_remaining # We ignore empty root_directory to avoid load min needs profile # to test directory when test is running. if not self . root_directory : profiles = [ ] return profiles else : locale_minimum_needs_dir = os . path . join ( self . root_directory , 'minimum_needs' ) path_name = resources_path ( 'minimum_needs' ) if not os . path . exists ( locale_minimum_needs_dir ) : os . makedirs ( locale_minimum_needs_dir ) # load default min needs profile for file_name in os . listdir ( path_name ) : source_file = os . path . join ( path_name , file_name ) destination_file = os . path . join ( locale_minimum_needs_dir , file_name ) if not os . path . exists ( destination_file ) or overwrite : copy ( source_file , destination_file ) # move old min needs profile under user profile to inasafe # subdirectory self . move_old_profile ( locale_minimum_needs_dir ) profiles = [ profile [ : - 5 ] for profile in os . listdir ( locale_minimum_needs_dir ) if profile [ - 5 : ] == '.json' ] profiles = sort_by_locale ( profiles , self . locale ) return profiles
Get all the minimum needs profiles .
483
7
26,471
def get_needs_parameters ( self ) : parameters = [ ] for resource in self . minimum_needs [ 'resources' ] : parameter = ResourceParameter ( ) parameter . name = resource [ 'Resource name' ] parameter . help_text = resource [ 'Resource description' ] # Adding in the frequency property. This is not in the # FloatParameter by default, so maybe we should subclass. parameter . frequency = resource [ 'Frequency' ] parameter . description = self . format_sentence ( resource [ 'Readable sentence' ] , resource ) parameter . minimum_allowed_value = float ( resource [ 'Minimum allowed' ] ) parameter . maximum_allowed_value = float ( resource [ 'Maximum allowed' ] ) parameter . unit . name = resource [ 'Unit' ] parameter . unit . plural = resource [ 'Units' ] parameter . unit . abbreviation = resource [ 'Unit abbreviation' ] parameter . value = float ( resource [ 'Default' ] ) # choose highest precision between resource's parameters # start with default of 1 precisions = [ 1 ] precision_influence = [ 'Maximum allowed' , 'Minimum allowed' , 'Default' ] for element in precision_influence : resource_element = str ( resource [ element ] ) if resource [ element ] is not None and '.' in resource_element : precisions . append ( self . precision_of ( resource_element ) ) parameter . precision = max ( precisions ) parameters . append ( parameter ) prov_parameter = TextParameter ( ) prov_parameter . name = tr ( 'Provenance' ) prov_parameter . description = tr ( 'The provenance of minimum needs' ) prov_parameter . help_text = tr ( 'The provenance of minimum needs' ) try : prov_parameter . value = self . provenance except TypeError : prov_parameter . value = '' parameters . append ( prov_parameter ) return parameters
Get the minimum needs resources in parameter format
409
8
26,472
def format_sentence ( sentence , resource ) : sentence = sentence . split ( '{{' ) updated_sentence = sentence [ 0 ] . rstrip ( ) for part in sentence [ 1 : ] : replace , keep = part . split ( '}}' ) replace = replace . strip ( ) updated_sentence = "%s %s%s" % ( updated_sentence , resource [ replace ] , keep ) return updated_sentence
Populate the placeholders in the sentence .
94
9
26,473
def remove_profile ( self , profile ) : self . remove_file ( os . path . join ( self . root_directory , 'minimum_needs' , profile + '.json' ) )
Remove a profile .
41
4
26,474
def tr ( text , context = '@default' ) : # noinspection PyCallByClass,PyTypeChecker,PyArgumentList if type ( text ) != str : text = str ( text ) translated_text = QCoreApplication . translate ( context , text ) # Check if there is missing container. If so, return the original text. # See #3164 if text . count ( '%' ) == translated_text . count ( '%' ) : return translated_text else : content = ( 'There is a problem in the translation text.\n' 'The original text: "%s".\n' 'The translation: "%s".\n' 'The number of %% character does not match (%s and %s).' 'Please check the translation in transifex for %s.' % ( text , translated_text , text . count ( '%' ) , translated_text . count ( '%s' ) , locale ( ) ) ) LOGGER . warning ( content ) return text
We define a tr function alias here since the utilities implementation below is not a class and does not inherit from QObject .
216
24
26,475
def locale ( qsetting = '' ) : override_flag = QSettings ( qsetting ) . value ( 'locale/overrideFlag' , True , type = bool ) default = 'en_US' if override_flag : locale_name = QSettings ( qsetting ) . value ( 'locale/userLocale' , default , type = str ) else : # noinspection PyArgumentList locale_name = QLocale . system ( ) . name ( ) if locale_name == 'C' : # On travis, locale/userLocale is equal to C. We want 'en'. locale_name = default # NOTES: we split the locale name because we need the first two # character i.e. 'id', 'af, etc locale_name = str ( locale_name ) . split ( '_' ) [ 0 ] return locale_name
Get the name of the currently active locale .
188
9
26,476
def update_band_description ( self ) : self . clear_further_steps ( ) # Set widgets selected_band = self . selected_band ( ) statistics = self . parent . layer . dataProvider ( ) . bandStatistics ( selected_band , QgsRasterBandStats . All , self . parent . layer . extent ( ) , 0 ) band_description = tr ( 'This band contains data from {min_value} to {max_value}' ) . format ( min_value = statistics . minimumValue , max_value = statistics . maximumValue ) self . lblDescribeBandSelector . setText ( band_description )
Helper to update band description .
138
6
26,477
def selected_band ( self ) : item = self . lstBands . currentItem ( ) return item . data ( QtCore . Qt . UserRole )
Obtain the layer mode selected by user .
34
9
26,478
def merge_dictionaries ( base_dict , extra_dict ) : new_dict = base_dict . copy ( ) new_dict . update ( extra_dict ) return new_dict
merge two dictionaries .
41
6
26,479
def read_property_from_xml ( root , path ) : element = root . find ( path , XML_NS ) try : return element . text . strip ( ' \t\n\r' ) except AttributeError : return None
Get the text from an XML property .
51
8
26,480
def north_arrow ( self , north_arrow_path ) : if isinstance ( north_arrow_path , str ) and os . path . exists ( north_arrow_path ) : self . _north_arrow = north_arrow_path else : self . _north_arrow = default_north_arrow_path ( )
Set image that will be used as north arrow in reports .
70
12
26,481
def organisation_logo ( self , logo ) : if isinstance ( logo , str ) and os . path . exists ( logo ) : self . _organisation_logo = logo else : self . _organisation_logo = supporters_logo_path ( )
Set image that will be used as organisation logo in reports .
58
12
26,482
def disclaimer ( self , text ) : if not isinstance ( text , str ) : self . _disclaimer = disclaimer ( ) else : self . _disclaimer = text
Set text that will be used as disclaimer in reports .
36
11
26,483
def output_folder ( self , value ) : self . _output_folder = value if not os . path . exists ( self . _output_folder ) : os . makedirs ( self . _output_folder )
Output folder path for the rendering .
47
7
26,484
def _check_layer_count ( self , layer ) : if layer : if not layer . isValid ( ) : raise ImpactReport . LayerException ( 'Layer is not valid' ) if isinstance ( layer , QgsRasterLayer ) : # can't check feature count of raster layer return feature_count = len ( [ f for f in layer . getFeatures ( ) ] ) if feature_count == 0 : raise ImpactReport . LayerException ( 'Layer contains no features' )
Check for the validity of the layer .
103
8
26,485
def map_title ( self ) : # noinspection PyBroadException try : title = self . _keyword_io . read_keywords ( self . impact , 'map_title' ) return title except KeywordNotFoundError : return None except Exception : # pylint: disable=broad-except return None
Get the map title from the layer keywords if possible .
68
11
26,486
def map_legend_attributes ( self ) : LOGGER . debug ( 'InaSAFE Map getMapLegendAttributes called' ) legend_attribute_list = [ 'legend_notes' , 'legend_units' , 'legend_title' ] legend_attribute_dict = { } for legend_attribute in legend_attribute_list : # noinspection PyBroadException try : legend_attribute_dict [ legend_attribute ] = self . _keyword_io . read_keywords ( self . impact , legend_attribute ) except KeywordNotFoundError : pass except Exception : # pylint: disable=broad-except pass return legend_attribute_dict
Get the map legend attribute from the layer keywords if possible .
146
12
26,487
def _vector_layers ( self ) : layers = [ ] vector_datasource = self . vector_driver . Open ( self . uri . absoluteFilePath ( ) ) if vector_datasource : for i in range ( vector_datasource . GetLayerCount ( ) ) : layers . append ( vector_datasource . GetLayer ( i ) . GetName ( ) ) return layers
Return a list of vector layers available .
87
8
26,488
def _raster_layers ( self ) : layers = [ ] raster_datasource = gdal . Open ( self . uri . absoluteFilePath ( ) ) if raster_datasource : subdatasets = raster_datasource . GetSubDatasets ( ) if len ( subdatasets ) == 0 : metadata = raster_datasource . GetMetadata ( ) layers . append ( metadata [ 'IDENTIFIER' ] ) else : for subdataset in subdatasets : layers . append ( subdataset [ 0 ] . split ( ':' ) [ 2 ] ) return layers
Return a list of raster layers available .
139
9
26,489
def _add_vector_layer ( self , vector_layer , layer_name , save_style = False ) : # Fixme # if not self.is_writable(): # return False, 'The destination is not writable.' geometry = QGIS_OGR_GEOMETRY_MAP [ vector_layer . wkbType ( ) ] spatial_reference = osr . SpatialReference ( ) qgis_spatial_reference = vector_layer . crs ( ) . authid ( ) # Use 4326 as default if the spatial reference is not found epsg = 4326 epsg_string = qgis_spatial_reference if epsg_string : epsg = int ( epsg_string . split ( ':' ) [ 1 ] ) spatial_reference . ImportFromEPSG ( epsg ) vector_datasource = self . vector_driver . Open ( self . uri . absoluteFilePath ( ) , True ) vector_datasource . CreateLayer ( layer_name , spatial_reference , geometry ) uri = '{}|layerid=0' . format ( self . uri . absoluteFilePath ( ) ) vector_layer = QgsVectorLayer ( uri , layer_name , 'ogr' ) data_provider = vector_layer . dataProvider ( ) for feature in vector_layer . getFeatures ( ) : data_provider . addFeatures ( [ feature ] ) return True , layer_name
Add a vector layer to the geopackage .
320
10
26,490
def _add_tabular_layer ( self , tabular_layer , layer_name , save_style = False ) : return self . _add_vector_layer ( tabular_layer , layer_name , save_style )
Add a tabular layer to the geopackage .
50
11
26,491
def selected_canvas_hazlayer ( self ) : if self . lstCanvasHazLayers . selectedItems ( ) : item = self . lstCanvasHazLayers . currentItem ( ) else : return None try : layer_id = item . data ( Qt . UserRole ) except ( AttributeError , NameError ) : layer_id = None # noinspection PyArgumentList layer = QgsProject . instance ( ) . mapLayer ( layer_id ) return layer
Obtain the canvas layer selected by user .
108
9
26,492
def list_compatible_canvas_layers ( self ) : italic_font = QFont ( ) italic_font . setItalic ( True ) list_widget = self . lstCanvasHazLayers # Add compatible layers list_widget . clear ( ) for layer in self . parent . get_compatible_canvas_layers ( 'hazard' ) : item = QListWidgetItem ( layer [ 'name' ] , list_widget ) item . setData ( Qt . UserRole , layer [ 'id' ] ) if not layer [ 'keywords' ] : item . setFont ( italic_font ) list_widget . addItem ( item )
Fill the list widget with compatible layers .
146
8
26,493
def set_widgets ( self ) : # The list is already populated in the previous step, but now we # need to do it again in case we're back from the Keyword Wizard. # First, preserve self.parent.layer before clearing the list last_layer = self . parent . layer and self . parent . layer . id ( ) or None self . lblDescribeCanvasHazLayer . clear ( ) self . list_compatible_canvas_layers ( ) self . auto_select_one_item ( self . lstCanvasHazLayers ) # Try to select the last_layer, if found: if last_layer : layers = [ ] for index in range ( self . lstCanvasHazLayers . count ( ) ) : item = self . lstCanvasHazLayers . item ( index ) layers += [ item . data ( Qt . UserRole ) ] if last_layer in layers : self . lstCanvasHazLayers . setCurrentRow ( layers . index ( last_layer ) ) # Set icon hazard = self . parent . step_fc_functions1 . selected_value ( layer_purpose_hazard [ 'key' ] ) icon_path = get_image_path ( hazard ) self . lblIconIFCWHazardFromCanvas . setPixmap ( QPixmap ( icon_path ) )
Set widgets on the Hazard Layer From TOC tab .
298
11
26,494
def performance_log_message ( self ) : message = m . Message ( ) table = m . Table ( style_class = 'table table-condensed table-striped' ) row = m . Row ( ) row . add ( m . Cell ( tr ( 'Function' ) , header = True ) ) row . add ( m . Cell ( tr ( 'Time' ) , header = True ) ) if setting ( key = 'memory_profile' , expected_type = bool ) : row . add ( m . Cell ( tr ( 'Memory' ) , header = True ) ) table . add ( row ) if self . performance_log is None : message . add ( table ) return message indent = - 1 def display_tree ( tree , space ) : space += 1 new_row = m . Row ( ) # This is a kind of hack to display the tree with indentation text = '|' text += '*' * space if tree . children : text += '\ ' else : text += '| ' text += tree . __str__ ( ) busy = tr ( 'Busy' ) new_row . add ( m . Cell ( text ) ) time = tree . elapsed_time if time is None : time = busy new_row . add ( m . Cell ( time ) ) if setting ( key = 'memory_profile' , expected_type = bool ) : memory_used = tree . memory_used if memory_used is None : memory_used = busy new_row . add ( m . Cell ( memory_used ) ) table . add ( new_row ) if tree . children : for child in tree . children : display_tree ( child , space ) space -= 1 # noinspection PyTypeChecker display_tree ( self . performance_log , indent ) message . add ( table ) return message
Return the profiling log as a message .
388
8
26,495
def requested_extent ( self , extent ) : if isinstance ( extent , QgsRectangle ) : self . _requested_extent = extent self . _is_ready = False else : raise InvalidExtentError ( '%s is not a valid extent.' % extent )
Setter for extent property .
61
6
26,496
def crs ( self , crs ) : if isinstance ( crs , QgsCoordinateReferenceSystem ) : self . _crs = crs self . _is_ready = False else : raise InvalidExtentError ( '%s is not a valid CRS object.' % crs )
Setter for extent_crs property .
64
9
26,497
def datastore ( self , datastore ) : if isinstance ( datastore , DataStore ) : self . _datastore = datastore else : raise Exception ( '%s is not a valid datastore.' % datastore )
Setter for the datastore .
55
8
26,498
def duration ( self ) : if self . end_datetime is None or self . start_datetime is None : return 0 return ( self . end_datetime - self . start_datetime ) . total_seconds ( )
The duration of running the impact function in seconds .
49
10
26,499
def console_progress_callback ( current , maximum , message = None ) : # noinspection PyChainedComparisons if maximum > 1000 and current % 1000 != 0 and current != maximum : return if message is not None : LOGGER . info ( message [ 'description' ] ) LOGGER . info ( 'Task progress: %i of %i' % ( current , maximum ) )
Simple console based callback implementation for tests .
81
8