idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
4,500
def __retrieve_data ( self ) : if self . __eof is True : return b'' logging . debug ( "Reading another block." ) block = self . read ( self . __block_size ) if block == b'' : self . __log . debug ( "We've encountered the EOF." ) self . __eof = True return block
Read more data from the file .
76
7
4,501
def set_mask_from_shapefile ( self , shapefile_path , cell_size ) : # make sure paths are absolute as the working directory changes shapefile_path = os . path . abspath ( shapefile_path ) # ADD MASK with tmp_chdir ( self . project_directory ) : mask_name = '{0}.msk' . format ( self . project_manager . name ) msk_file = WatershedMaskFile ( project_file = self . project_manager , session = self . db_session ) msk_file . generateFromWatershedShapefile ( shapefile_path , cell_size = cell_size , out_raster_path = mask_name , load_raster_to_db = self . load_rasters_to_db )
Adds a mask from a shapefile
174
7
4,502
def set_elevation ( self , elevation_grid_path , mask_shapefile ) : # ADD ELEVATION FILE ele_file = ElevationGridFile ( project_file = self . project_manager , session = self . db_session ) ele_file . generateFromRaster ( elevation_grid_path , mask_shapefile , load_raster_to_db = self . load_rasters_to_db )
Adds elevation file to project
94
5
4,503
def set_outlet ( self , latitude , longitude , outslope ) : self . project_manager . setOutlet ( latitude = latitude , longitude = longitude , outslope = outslope )
Adds outlet point to project
46
5
4,504
def set_event ( self , simulation_start = None , simulation_duration = None , simulation_end = None , rain_intensity = 2 , rain_duration = timedelta ( seconds = 30 * 60 ) , event_type = 'EVENT' , ) : # ADD TEMPORTAL EVENT INFORMAITON if event_type == 'LONG_TERM' : self . event = LongTermMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_end = simulation_end , simulation_duration = simulation_duration , ) else : # 'EVENT' self . event = EventMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_duration = simulation_duration , ) self . event . add_uniform_precip_event ( intensity = rain_intensity , duration = rain_duration )
Initializes event for GSSHA model
208
8
4,505
def write ( self ) : # write data self . project_manager . writeInput ( session = self . db_session , directory = self . project_directory , name = self . project_manager . name )
Write project to directory
44
4
4,506
def mirror ( self , handler , path_from , path_to , log_files = False ) : q = deque ( [ '' ] ) while q : path = q . popleft ( ) full_from = ( '%s/%s' % ( path_from , path ) ) if path else path_from full_to = ( '%s/%s' % ( path_to , path ) ) if path else path_to subdirs = handler ( full_from , full_to , log_files ) for subdir in subdirs : q . append ( ( '%s/%s' % ( path , subdir ) ) if path else subdir )
Recursively mirror the contents of path_from into path_to . handler should be self . mirror_to_local_no_recursion or self . mirror_to_remote_no_recursion to represent which way the files are moving .
149
52
4,507
def linkToChannelInputFile ( self , session , channelInputFile , force = False ) : # Only perform operation if the channel input file has not been assigned or the force parameter is true if self . channelInputFile is not None and not force : return # Set the channel input file relationship self . channelInputFile = channelInputFile # Retrieve the fluvial stream links orderedLinks = channelInputFile . getOrderedLinks ( session ) # Retrieve the LinkNodeTimeStep objects timeSteps = self . timeSteps # Link each link dataset in each time step for timeStep in timeSteps : # Retrieve link datasets linkDatasets = timeStep . linkDatasets # Link each node dataset for l , linkDataset in enumerate ( linkDatasets ) : # Get the fluvial link and nodes streamLink = orderedLinks [ l ] streamNodes = streamLink . nodes # Link link datasets to fluvial links linkDataset . link = streamLink # Retrieve node datasets nodeDatasets = linkDataset . nodeDatasets # Link the node dataset with the channel input file nodes if len ( nodeDatasets ) > 0 and len ( streamNodes ) > 0 : for n , nodeDataset in enumerate ( nodeDatasets ) : nodeDataset . node = streamNodes [ n ] session . add ( self ) session . commit ( )
Create database relationships between the link node dataset and the channel input file .
300
14
4,508
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Set file extension property self . fileExtension = extension # Dictionary of keywords/cards and parse function names KEYWORDS = ( 'NUM_LINKS' , 'TIME_STEP' , 'NUM_TS' , 'START_TIME' , 'TS' ) # Parse file into chunks associated with keywords/cards with open ( path , 'r' ) as f : self . name = f . readline ( ) . strip ( ) chunks = pt . chunk ( KEYWORDS , f ) # Parse chunks associated with each key for card , chunkList in iteritems ( chunks ) : # Parse each chunk in the chunk list for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) # Cases if card == 'NUM_LINKS' : # NUM_LINKS handler self . numLinks = schunk [ 1 ] elif card == 'TIME_STEP' : # TIME_STEP handler self . timeStepInterval = schunk [ 1 ] elif card == 'NUM_TS' : # NUM_TS handler self . numTimeSteps = schunk [ 1 ] elif card == 'START_TIME' : # START_TIME handler self . startTime = '%s %s %s %s %s %s' % ( schunk [ 1 ] , schunk [ 2 ] , schunk [ 3 ] , schunk [ 4 ] , schunk [ 5 ] , schunk [ 6 ] ) elif card == 'TS' : # TS handler for line in chunk : sline = line . strip ( ) . split ( ) token = sline [ 0 ] # Cases if token == 'TS' : # Time Step line handler timeStep = LinkNodeTimeStep ( timeStep = sline [ 1 ] ) timeStep . linkNodeDataset = self else : # Split the line spLinkLine = line . strip ( ) . split ( ) # Create LinkDataset GSSHAPY object linkDataset = LinkDataset ( ) linkDataset . numNodeDatasets = int ( spLinkLine [ 0 ] ) linkDataset . timeStep = timeStep linkDataset . linkNodeDatasetFile = self # Parse line into NodeDatasets NODE_VALUE_INCREMENT = 2 statusIndex = 1 valueIndex = statusIndex + 1 # Parse line into node datasets if linkDataset . numNodeDatasets > 0 : for i in range ( 0 , linkDataset . numNodeDatasets ) : # Create NodeDataset GSSHAPY object nodeDataset = NodeDataset ( ) nodeDataset . status = int ( spLinkLine [ statusIndex ] ) nodeDataset . value = float ( spLinkLine [ valueIndex ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self # Increment to next status/value pair statusIndex += NODE_VALUE_INCREMENT valueIndex += NODE_VALUE_INCREMENT else : nodeDataset = NodeDataset ( ) nodeDataset . value = float ( spLinkLine [ 1 ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self
Link Node Dataset File Read from File Method
745
10
4,509
def _write ( self , session , openFile , replaceParamFile ) : # Retrieve TimeStep objects timeSteps = self . timeSteps # Write Lines openFile . write ( '%s\n' % self . name ) openFile . write ( 'NUM_LINKS %s\n' % self . numLinks ) openFile . write ( 'TIME_STEP %s\n' % self . timeStepInterval ) openFile . write ( 'NUM_TS %s\n' % self . numTimeSteps ) openFile . write ( 'START_TIME %s\n' % self . startTime ) for timeStep in timeSteps : openFile . write ( 'TS %s\n' % timeStep . timeStep ) # Retrieve LinkDataset objects linkDatasets = timeStep . linkDatasets for linkDataset in linkDatasets : # Write number of node datasets values openFile . write ( '{0} ' . format ( linkDataset . numNodeDatasets ) ) # Retrieve NodeDatasets nodeDatasets = linkDataset . nodeDatasets if linkDataset . numNodeDatasets > 0 : for nodeDataset in nodeDatasets : # Write status and value openFile . write ( '{0} {1:.5f} ' . format ( nodeDataset . status , nodeDataset . value ) ) else : for nodeDataset in nodeDatasets : # Write status and value if linkDataset . numNodeDatasets < 0 : openFile . write ( '{0:.5f}' . format ( nodeDataset . value ) ) else : openFile . write ( '{0:.3f}' . format ( nodeDataset . value ) ) # Write new line character after each link dataset openFile . write ( '\n' ) # Insert empty line between time steps openFile . write ( '\n' )
Link Node Dataset File Write to File Method
433
10
4,510
def login ( container ) : columns , lines = shutil . get_terminal_size ( ) # Temporary try : subprocess . check_call ( [ "docker" , "exec" , "--env" , f"COLUMNS={str(columns)},LINES={str(lines)}" , # Temporary "--env" , f"LINES={str(lines)}" , # Temporary "--interactive" , "--tty" , container , "bash" , "--login" ] ) except subprocess . CalledProcessError : raise RuntimeError ( ) from None
Log into container .
125
4
4,511
def _update_simulation_start ( self , simulation_start ) : self . simulation_start = simulation_start if self . simulation_duration is not None and self . simulation_start is not None : self . simulation_end = self . simulation_start + self . simulation_duration self . _update_simulation_start_cards ( )
Update GSSHA simulation start time
74
7
4,512
def _update_simulation_start_cards ( self ) : if self . simulation_start is not None : self . _update_card ( "START_DATE" , self . simulation_start . strftime ( "%Y %m %d" ) ) self . _update_card ( "START_TIME" , self . simulation_start . strftime ( "%H %M" ) )
Update GSSHA cards for simulation start
87
8
4,513
def _update_simulation_end_from_lsm ( self ) : te = self . l2g . xd . lsm . datetime [ - 1 ] simulation_end = te . replace ( tzinfo = utc ) . astimezone ( tz = self . tz ) . replace ( tzinfo = None ) if self . simulation_end is None : self . simulation_end = simulation_end elif self . simulation_end > simulation_end : self . simulation_end = simulation_end self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) )
Update simulation end time from LSM
148
7
4,514
def add_precip_file ( self , precip_file_path , interpolation_type = None ) : # precip file read in self . _update_card ( 'PRECIP_FILE' , precip_file_path , True ) if interpolation_type is None : # check if precip type exists already in card if not self . project_manager . getCard ( 'RAIN_INV_DISTANCE' ) and not self . project_manager . getCard ( 'RAIN_THIESSEN' ) : # if no type exists, then make it theissen self . _update_card ( 'RAIN_THIESSEN' , '' ) else : if interpolation_type . upper ( ) not in self . PRECIP_INTERP_TYPES : raise IndexError ( "Invalid interpolation_type {0}" . format ( interpolation_type ) ) interpolation_type = interpolation_type . upper ( ) if interpolation_type == "INV_DISTANCE" : self . _update_card ( 'RAIN_INV_DISTANCE' , '' ) self . project_manager . deleteCard ( 'RAIN_THIESSEN' , self . db_session ) else : self . _update_card ( 'RAIN_THIESSEN' , '' ) self . project_manager . deleteCard ( 'RAIN_INV_DISTANCE' , self . db_session )
Adds a precip file to project with interpolation_type
316
11
4,515
def prepare_gag_lsm ( self , lsm_precip_data_var , lsm_precip_type , interpolation_type = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) # remove uniform precip cards for unif_precip_card in self . UNIFORM_PRECIP_CARDS : self . project_manager . deleteCard ( unif_precip_card , self . db_session ) with tmp_chdir ( self . project_manager . project_directory ) : # PRECIPITATION CARD out_gage_file = '{0}.gag' . format ( self . project_manager . name ) self . l2g . lsm_precip_to_gssha_precip_gage ( out_gage_file , lsm_data_var = lsm_precip_data_var , precip_type = lsm_precip_type ) # SIMULATION TIME CARDS self . _update_simulation_end_from_lsm ( ) self . set_simulation_duration ( self . simulation_end - self . simulation_start ) # precip file read in self . add_precip_file ( out_gage_file , interpolation_type ) # make sure xarray dataset closed self . l2g . xd . close ( )
Prepares Gage output for GSSHA simulation
312
10
4,516
def prepare_rapid_streamflow ( self , path_to_rapid_qout , connection_list_file ) : ihg_filename = '{0}.ihg' . format ( self . project_manager . name ) with tmp_chdir ( self . project_manager . project_directory ) : # write out IHG file time_index_range = [ ] with RAPIDDataset ( path_to_rapid_qout , out_tzinfo = self . tz ) as qout_nc : time_index_range = qout_nc . get_time_index_range ( date_search_start = self . simulation_start , date_search_end = self . simulation_end ) if len ( time_index_range ) > 0 : time_array = qout_nc . get_time_array ( return_datetime = True , time_index_array = time_index_range ) # GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP if self . simulation_start is not None : if self . simulation_start == time_array [ 0 ] : log . warning ( "First timestep of streamflow skipped " "in order for GSSHA to capture the streamflow." ) time_index_range = time_index_range [ 1 : ] time_array = time_array [ 1 : ] if len ( time_index_range ) > 0 : start_datetime = time_array [ 0 ] if self . simulation_start is None : self . _update_simulation_start ( start_datetime ) if self . simulation_end is None : self . simulation_end = time_array [ - 1 ] qout_nc . write_flows_to_gssha_time_series_ihg ( ihg_filename , connection_list_file , date_search_start = start_datetime , date_search_end = self . simulation_end , ) else : log . warning ( "No streamflow values found in time range ..." ) if len ( time_index_range ) > 0 : # update cards self . _update_simulation_start_cards ( ) self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) ) self . _update_card ( "CHAN_POINT_INPUT" , ihg_filename , True ) # update duration self . set_simulation_duration ( self . simulation_end - self . simulation_start ) # UPDATE GMT CARD self . _update_gmt ( ) else : # cleanup os . remove ( ihg_filename ) self . project_manager . deleteCard ( 'CHAN_POINT_INPUT' , self . db_session )
Prepares RAPID streamflow for GSSHA simulation
614
12
4,517
def add_uniform_precip_event ( self , intensity , duration ) : self . project_manager . setCard ( 'PRECIP_UNIF' , '' ) self . project_manager . setCard ( 'RAIN_INTENSITY' , str ( intensity ) ) self . project_manager . setCard ( 'RAIN_DURATION' , str ( duration . total_seconds ( ) / 60.0 ) )
Add a uniform precip event
95
5
4,518
def _update_gmt ( self ) : if self . simulation_start is not None : # NOTE: Because of daylight savings time, # offset result depends on time of the year offset_string = str ( self . simulation_start . replace ( tzinfo = self . tz ) . utcoffset ( ) . total_seconds ( ) / 3600. ) self . _update_card ( 'GMT' , offset_string )
Based on timezone and start date the GMT card is updated
94
12
4,519
def prepare_hmet_lsm ( self , lsm_data_var_map_array , hmet_ascii_output_folder = None , netcdf_file_path = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) with tmp_chdir ( self . project_manager . project_directory ) : # GSSHA simulation does not work after HMET data is finished self . _update_simulation_end_from_lsm ( ) # HMET CARDS if netcdf_file_path is not None : self . l2g . lsm_data_to_subset_netcdf ( netcdf_file_path , lsm_data_var_map_array ) self . _update_card ( "HMET_NETCDF" , netcdf_file_path , True ) self . project_manager . deleteCard ( 'HMET_ASCII' , self . db_session ) else : if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder : hmet_ascii_output_folder = hmet_ascii_output_folder . format ( self . simulation_start . strftime ( "%Y%m%d%H%M" ) , self . simulation_end . strftime ( "%Y%m%d%H%M" ) ) self . l2g . lsm_data_to_arc_ascii ( lsm_data_var_map_array , main_output_folder = os . path . join ( self . gssha_directory , hmet_ascii_output_folder ) ) self . _update_card ( "HMET_ASCII" , os . path . join ( hmet_ascii_output_folder , 'hmet_file_list.txt' ) , True ) self . project_manager . deleteCard ( 'HMET_NETCDF' , self . db_session ) # UPDATE GMT CARD self . _update_gmt ( )
Prepares HMET data for GSSHA simulation from land surface model data .
465
16
4,520
def get_remaining_width ( sample_string , max_terminal_width = None ) : if max_terminal_width is not None : available_width = min ( terminal_width ( ) , max_terminal_width ) else : available_width = terminal_width ( ) return available_width - len ( sample_string )
Returns the number of characters available if sample string were to be printed in the terminal .
74
17
4,521
def _define_csbi ( ) : if _WindowsCSBI . CSBI is not None : return class COORD ( ctypes . Structure ) : """Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119""" _fields_ = [ ( 'X' , ctypes . c_short ) , ( 'Y' , ctypes . c_short ) ] class SmallRECT ( ctypes . Structure ) : """Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311""" _fields_ = [ ( 'Left' , ctypes . c_short ) , ( 'Top' , ctypes . c_short ) , ( 'Right' , ctypes . c_short ) , ( 'Bottom' , ctypes . c_short ) ] class ConsoleScreenBufferInfo ( ctypes . Structure ) : """Windows CONSOLE_SCREEN_BUFFER_INFO structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093 """ _fields_ = [ ( 'dwSize' , COORD ) , ( 'dwCursorPosition' , COORD ) , ( 'wAttributes' , ctypes . wintypes . WORD ) , ( 'srWindow' , SmallRECT ) , ( 'dwMaximumWindowSize' , COORD ) ] _WindowsCSBI . CSBI = ConsoleScreenBufferInfo
Defines structs and populates _WindowsCSBI . CSBI .
325
15
4,522
def initialize ( ) : _WindowsCSBI . _define_csbi ( ) _WindowsCSBI . HANDLE_STDERR = _WindowsCSBI . HANDLE_STDERR or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 12 ) _WindowsCSBI . HANDLE_STDOUT = _WindowsCSBI . HANDLE_STDOUT or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 11 ) if _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes : return _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . argtypes = [ ctypes . wintypes . DWORD ] _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . restype = ctypes . wintypes . HANDLE _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . restype = ctypes . wintypes . BOOL _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes = [ ctypes . wintypes . HANDLE , ctypes . POINTER ( _WindowsCSBI . CSBI ) ]
Initializes the WINDLL resource and populated the CSBI class variable .
265
15
4,523
def stencil ( * * kwargs ) : alnfile = kwargs . get ( 'alnfile' ) gtypefile = kwargs . get ( 'gtypefile' ) grpfile = kwargs . get ( 'grpfile' ) if grpfile is None : grpfile2chk = os . path . join ( DATA_DIR , 'ref.gene2transcripts.tsv' ) if os . path . exists ( grpfile2chk ) : grpfile = grpfile2chk else : print >> sys . stderr , '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.' # Load alignment incidence matrix ('alnfile' is assumed to be in multiway transcriptome) alnmat = emase . AlignmentPropertyMatrix ( h5file = alnfile , grpfile = grpfile ) # Load genotype calls hid = dict ( zip ( alnmat . hname , np . arange ( alnmat . num_haplotypes ) ) ) gid = dict ( zip ( alnmat . gname , np . arange ( len ( alnmat . gname ) ) ) ) gtmask = np . zeros ( ( alnmat . num_haplotypes , alnmat . num_loci ) ) gtcall_g = dict . fromkeys ( alnmat . gname ) with open ( gtypefile ) as fh : if grpfile is not None : gtcall_t = dict . fromkeys ( alnmat . lname ) for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) tid2set = np . array ( alnmat . groups [ gid [ g ] ] ) gtmask [ np . meshgrid ( hid2set , tid2set ) ] = 1.0 for t in tid2set : gtcall_t [ alnmat . lname [ t ] ] = gt else : for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) gtmask [ np . meshgrid ( hid2set , gid [ g ] ) ] = 1.0 alnmat . multiply ( gtmask , axis = 2 ) for h in xrange ( alnmat . num_haplotypes ) : alnmat . data [ h ] . eliminate_zeros ( ) outfile = kwargs . get ( 'outfile' ) if outfile is None : outfile = 'gbrs.stenciled.' + os . path . basename ( alnfile ) alnmat . save ( h5file = outfile )
Applying genotype calls to multi - way alignment incidence matrix
700
12
4,524
def register_items ( self , items ) : for item in items : item . set_parent ( self ) self . items . extend ( items )
Bulk register_item .
31
6
4,525
def endpoints ( self ) : children = [ item . endpoints ( ) for item in self . items ] return self . name , self . endpoint , children
Get all the endpoints under this node in a tree like structure .
33
14
4,526
def absolute_name ( self ) : if self . is_root ( ) or self . parent . is_root ( ) : return utils . slugify ( self . name ) return ':' . join ( [ self . parent . absolute_name , utils . slugify ( self . name ) ] )
Get the absolute name of self .
65
7
4,527
def absolute_url ( self ) : if self . is_root ( ) : return utils . concat_urls ( self . url ) return utils . concat_urls ( self . parent . absolute_url , self . url )
Get the absolute url of self .
53
7
4,528
def split_tracks ( lat , lon , * args ) : tracks = [ ] lt , ln = [ lat [ 0 ] ] , [ lon [ 0 ] ] zz = [ [ z [ 0 ] ] for z in args ] for i in range ( 1 , len ( lon ) ) : lt . append ( lat [ i ] ) for z , a in zip ( zz , args ) : z . append ( a [ i ] ) d1 = abs ( lon [ i ] - lon [ i - 1 ] ) d2 = abs ( ( lon [ i - 1 ] + 360 ) - lon [ i ] ) d3 = abs ( lon [ i - 1 ] - ( lon [ i ] + 360 ) ) if d2 < d1 : ln . append ( lon [ i ] - 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] + 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] ] for z in args ] elif d3 < d1 : ln . append ( lon [ i ] + 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] - 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] , z [ i ] ] for z in args ] else : ln . append ( lon [ i ] ) if len ( lt ) : tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) return tracks
assumes eastward motion
446
5
4,529
def str_rate ( self ) : # Handle special cases. if not self . _eta . started or self . _eta . stalled or not self . rate : return '--.-KiB/s' unit_rate , unit = UnitByte ( self . _eta . rate_overall if self . done else self . rate ) . auto if unit_rate >= 100 : formatter = '%d' elif unit_rate >= 10 : formatter = '%.1f' else : formatter = '%.2f' return '{0}{1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
Returns the rate with formatting . If done returns the overall rate instead .
144
14
4,530
def str_rate ( self ) : # Handle special cases. if not self . _eta . started or self . _eta . stalled or not self . rate : return '--- KiB/s' unit_rate , unit = UnitByte ( self . rate ) . auto_no_thousands if unit_rate >= 10 : formatter = '%d' else : formatter = '%0.1f' return '{0} {1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
Returns the rate with formatting .
120
6
4,531
def init_db ( sqlalchemy_url ) : engine = create_engine ( sqlalchemy_url ) start = time . time ( ) metadata . create_all ( engine ) return time . time ( ) - start
Initialize database with gsshapy tables
47
8
4,532
def get_sessionmaker ( sqlalchemy_url , engine = None ) : if engine is None : engine = create_engine ( sqlalchemy_url ) return sessionmaker ( bind = engine )
Create session with database to work in
42
7
4,533
def get_project_session ( project_name , project_directory , map_type = None ) : sqlalchemy_url , sql_engine = init_sqlite_memory ( ) gdb_sessionmaker = get_sessionmaker ( sqlalchemy_url , sql_engine ) project_manager = ProjectFile ( name = project_name , project_directory = project_directory , map_type = map_type ) return project_manager , gdb_sessionmaker
Load project manager and in memory sqlite db sessionmaker for GSSHA project
100
16
4,534
def get_settings ( config_uri , section = None , defaults = None ) : loader = get_loader ( config_uri ) return loader . get_settings ( section , defaults )
Load the settings from a named section .
39
8
4,535
def find_loaders ( scheme , protocols = None ) : # build a list of all required entry points matching_groups = [ 'plaster.loader_factory' ] if protocols : matching_groups += [ 'plaster.{0}_loader_factory' . format ( proto ) for proto in protocols ] scheme = scheme . lower ( ) # if a distribution is specified then it overrides the default search parts = scheme . split ( '+' , 1 ) if len ( parts ) == 2 : try : distro = pkg_resources . get_distribution ( parts [ 0 ] ) except pkg_resources . DistributionNotFound : pass else : ep = _find_ep_in_dist ( distro , parts [ 1 ] , matching_groups ) # if we got one or more loaders from a specific distribution # then they override everything else so we'll just return them if ep : return [ EntryPointLoaderInfo ( ep , protocols ) ] # find any distributions supporting the default loader protocol possible_entry_points = [ ep for ep in pkg_resources . iter_entry_points ( 'plaster.loader_factory' ) if scheme is None or scheme == ep . name . lower ( ) ] distros = { ep . dist for ep in possible_entry_points } matched_entry_points = list ( filter ( None , [ _find_ep_in_dist ( distro , scheme , matching_groups ) for distro in distros ] ) ) return [ EntryPointLoaderInfo ( ep , protocols = protocols ) for ep in matched_entry_points ]
Find all loaders that match the requested scheme and protocols .
338
12
4,536
def combine_dicts ( * dicts , copy = False , base = None ) : if len ( dicts ) == 1 and base is None : # Only one input dict. cd = dicts [ 0 ] . copy ( ) else : cd = { } if base is None else base # Initialize empty dict. for d in dicts : # Combine dicts. if d : # noinspection PyTypeChecker cd . update ( d ) # Return combined dict. return { k : _copy . deepcopy ( v ) for k , v in cd . items ( ) } if copy else cd
Combines multiple dicts in one .
127
8
4,537
def kk_dict ( * kk , * * adict ) : for k in kk : if isinstance ( k , dict ) : if not set ( k ) . isdisjoint ( adict ) : raise ValueError ( 'keyword argument repeated' ) adict . update ( k ) elif k in adict : raise ValueError ( 'keyword argument repeated' ) else : adict [ k ] = k return adict
Merges and defines dictionaries with values identical to keys .
95
12
4,538
def bypass ( * inputs , copy = False ) : if len ( inputs ) == 1 : inputs = inputs [ 0 ] # Same inputs. return _copy . deepcopy ( inputs ) if copy else inputs
Returns the same arguments .
42
5
4,539
def map_dict ( key_map , * dicts , copy = False , base = None ) : it = combine_dicts ( * dicts ) . items ( ) # Combine dicts. get = key_map . get # Namespace shortcut. # Return mapped dict. return combine_dicts ( { get ( k , k ) : v for k , v in it } , copy = copy , base = base )
Returns a dict with new key values .
90
8
4,540
def map_list ( key_map , * inputs , copy = False , base = None ) : d = { } if base is None else base # Initialize empty dict. for m , v in zip ( key_map , inputs ) : if isinstance ( m , dict ) : map_dict ( m , v , base = d ) # Apply a map dict. elif isinstance ( m , list ) : map_list ( m , * v , base = d ) # Apply a map list. else : d [ m ] = v # Apply map. return combine_dicts ( copy = copy , base = d )
Returns a new dict .
133
5
4,541
def selector ( keys , dictionary , copy = False , output_type = 'dict' , allow_miss = False ) : if not allow_miss : # noinspection PyUnusedLocal def check ( key ) : return True else : def check ( key ) : return key in dictionary if output_type == 'list' : # Select as list. res = [ dictionary [ k ] for k in keys if check ( k ) ] return _copy . deepcopy ( res ) if copy else res elif output_type == 'values' : return bypass ( * [ dictionary [ k ] for k in keys if check ( k ) ] , copy = copy ) # Select as dict. return bypass ( { k : dictionary [ k ] for k in keys if check ( k ) } , copy = copy )
Selects the chosen dictionary keys from the given dictionary .
168
11
4,542
def replicate_value ( value , n = 2 , copy = True ) : return bypass ( * [ value ] * n , copy = copy )
Replicates n times the input value .
30
8
4,543
def stack_nested_keys ( nested_dict , key = ( ) , depth = - 1 ) : if depth != 0 and hasattr ( nested_dict , 'items' ) : for k , v in nested_dict . items ( ) : yield from stack_nested_keys ( v , key = key + ( k , ) , depth = depth - 1 ) else : yield key , nested_dict
Stacks the keys of nested - dictionaries into tuples and yields a list of k - v pairs .
87
22
4,544
def are_in_nested_dicts ( nested_dict , * keys ) : if keys : # noinspection PyBroadException try : return are_in_nested_dicts ( nested_dict [ keys [ 0 ] ] , * keys [ 1 : ] ) except Exception : # Key error or not a dict. return False return True
Nested keys are inside of nested - dictionaries .
74
11
4,545
def combine_nested_dicts ( * nested_dicts , depth = - 1 , base = None ) : if base is None : base = { } for nested_dict in nested_dicts : for k , v in stack_nested_keys ( nested_dict , depth = depth ) : while k : # noinspection PyBroadException try : get_nested_dicts ( base , * k [ : - 1 ] ) [ k [ - 1 ] ] = v break except Exception : # A branch of the nested_dict is longer than the base. k = k [ : - 1 ] v = get_nested_dicts ( nested_dict , * k ) return base
Merge nested - dictionaries .
149
7
4,546
def add_function ( dsp , inputs_kwargs = False , inputs_defaults = False , * * kw ) : def decorator ( f ) : dsp . add_func ( f , inputs_kwargs = inputs_kwargs , inputs_defaults = inputs_defaults , * * kw ) return f return decorator
Decorator to add a function to a dispatcher .
74
11
4,547
def blue ( self , memo = None ) : memo = { } if memo is None else memo if self not in memo : import inspect from . blue import Blueprint , _parent_blue keys = tuple ( inspect . signature ( self . __init__ ) . parameters ) memo [ self ] = Blueprint ( * * { k : _parent_blue ( v , memo ) for k , v in self . __dict__ . items ( ) if k in keys } ) . _set_cls ( self . __class__ ) return memo [ self ]
Constructs a Blueprint out of the current object .
114
10
4,548
def value_from_datadict ( self , data , files , name ) : value = super ( FileSizeWidget , self ) . value_from_datadict ( data , files , name ) if value not in EMPTY_VALUES : try : return parse_size ( value ) except ValueError : pass return value
Given a dictionary of data and this widget s name returns the value of this widget . Returns None if it s not provided .
69
25
4,549
def connect_ssh_with_cb ( ssh_cb , user , host , auth_cb , allow_new = True , verbosity = 0 ) : with connect_ssh ( user , host , auth_cb , allow_new = True , verbosity = 0 ) as ssh : ssh_cb ( ssh )
A managed SSH session . When the session is ready we ll invoke the ssh_cb callback .
66
19
4,550
def connect_sftp_with_cb ( sftp_cb , * args , * * kwargs ) : with _connect_sftp ( * args , * * kwargs ) as ( ssh , sftp ) : sftp_cb ( ssh , sftp )
A managed SFTP session . When the SSH session and an additional SFTP session are ready invoke the sftp_cb callback .
65
27
4,551
def get_key_auth_cb ( key_filepath ) : def auth_cb ( ssh ) : key = ssh_pki_import_privkey_file ( key_filepath ) ssh . userauth_publickey ( key ) return auth_cb
This is just a convenience function for key - based login .
56
12
4,552
def add_edge_fun ( graph ) : # Namespace shortcut for speed. succ , pred , node = graph . _succ , graph . _pred , graph . _node def add_edge ( u , v , * * attr ) : if v not in succ : # Add nodes. succ [ v ] , pred [ v ] , node [ v ] = { } , { } , { } succ [ u ] [ v ] = pred [ v ] [ u ] = attr # Add the edge. return add_edge
Returns a function that adds an edge to the graph checking only the out node .
113
16
4,553
def remove_edge_fun ( graph ) : # Namespace shortcut for speed. rm_edge , rm_node = graph . remove_edge , graph . remove_node from networkx import is_isolate def remove_edge ( u , v ) : rm_edge ( u , v ) # Remove the edge. if is_isolate ( graph , v ) : # Check if v is isolate. rm_node ( v ) # Remove the isolate out node. return remove_edge
Returns a function that removes an edge from the graph .
102
11
4,554
def get_unused_node_id ( graph , initial_guess = 'unknown' , _format = '{}<%d>' ) : has_node = graph . has_node # Namespace shortcut for speed. n = counter ( ) # Counter. node_id_format = _format . format ( initial_guess ) # Node id format. node_id = initial_guess # Initial guess. while has_node ( node_id ) : # Check if node id is used. node_id = node_id_format % n ( ) # Guess. return node_id
Finds an unused node id in graph .
129
9
4,555
def add_func_edges ( dsp , fun_id , nodes_bunch , edge_weights = None , input = True , data_nodes = None ) : # Namespace shortcut for speed. add_edge = _add_edge_dmap_fun ( dsp . dmap , edge_weights ) node , add_data = dsp . dmap . nodes , dsp . add_data remove_nodes = dsp . dmap . remove_nodes_from # Define an error message. msg = 'Invalid %sput id: {} is not a data node' % [ 'out' , 'in' ] [ input ] i , j = ( 'i' , 'o' ) if input else ( 'o' , 'i' ) data_nodes = data_nodes or [ ] # Update data nodes. for u in nodes_bunch : # Iterate nodes. try : if node [ u ] [ 'type' ] != 'data' : # The node is not a data node. data_nodes . append ( fun_id ) # Add function id to be removed. remove_nodes ( data_nodes ) # Remove function and new data nodes. raise ValueError ( msg . format ( u ) ) # Raise error. except KeyError : data_nodes . append ( add_data ( data_id = u ) ) # Add new data node. add_edge ( * * { i : u , j : fun_id , 'w' : u } ) # Add edge. return data_nodes
Adds function node edges .
336
5
4,556
def _add_edge_dmap_fun ( graph , edges_weights = None ) : add = graph . add_edge # Namespace shortcut for speed. if edges_weights is not None : def add_edge ( i , o , w ) : if w in edges_weights : add ( i , o , weight = edges_weights [ w ] ) # Weighted edge. else : add ( i , o ) # Normal edge. else : # noinspection PyUnusedLocal def add_edge ( i , o , w ) : add ( i , o ) # Normal edge. return add_edge
Adds edge to the dispatcher map .
129
7
4,557
def _get_node ( nodes , node_id , fuzzy = True ) : try : return node_id , nodes [ node_id ] # Return dispatcher node and its id. except KeyError as ex : if fuzzy : it = sorted ( nodes . items ( ) ) n = next ( ( ( k , v ) for k , v in it if node_id in k ) , EMPTY ) if n is not EMPTY : return n raise ex
Returns a dispatcher node that match the given node id .
95
11
4,558
def get_full_pipe ( sol , base = ( ) ) : pipe , i = DspPipe ( ) , len ( base ) for p in sol . _pipe : n , s = p [ - 1 ] d = s . dsp p = { 'task' : p } if n in s . _errors : p [ 'error' ] = s . _errors [ n ] node_id = s . full_name + ( n , ) assert base == node_id [ : i ] , '%s != %s' % ( node_id [ : i ] , base ) n_id = node_id [ i : ] n , path = d . get_node ( n , node_attr = None ) if n [ 'type' ] == 'function' and 'function' in n : try : sub_sol = s . workflow . node [ path [ - 1 ] ] [ 'solution' ] sp = get_full_pipe ( sub_sol , base = node_id ) if sp : p [ 'sub_pipe' ] = sp except KeyError : pass pipe [ bypass ( * n_id ) ] = p return pipe
Returns the full pipe of a dispatch run .
249
9
4,559
def connectChunk ( key , chunk ) : schunk = chunk [ 0 ] . strip ( ) . split ( ) result = { 'slinkNumber' : schunk [ 1 ] , 'upSjunc' : schunk [ 2 ] , 'downSjunc' : schunk [ 3 ] } return result
Parse Storm Pipe CONNECT Chunk Method
68
9
4,560
def get_items ( self , page = 1 , order_by = None , filters = None ) : start = ( page - 1 ) * self . per_page query = self . get_query ( ) if order_by is not None : query = query . order_by ( self . _get_field ( order_by ) ) if filters is not None : query = self . _filter ( query , filters ) return query . offset ( start ) . limit ( self . per_page ) , self . count ( query )
Fetch database for items matching .
112
7
4,561
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : # Persist name and extension of file self . name = name self . fileExtension = extension # Open file and parse into a data structure with open ( path , 'r' ) as f : self . text = f . read ( )
Generic File Read from File Method
79
6
4,562
def isdisjoint ( self , other ) : if isinstance ( other , _sequence_types + ( BaseMultiset , ) ) : pass elif not isinstance ( other , Container ) : other = self . _as_multiset ( other ) return all ( element not in other for element in self . _elements . keys ( ) )
r Return True if the set has no elements in common with other .
76
14
4,563
def difference ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : if element in _elements : old_multiplicity = _elements [ element ] new_multiplicity = old_multiplicity - multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity else : del _elements [ element ] _total -= old_multiplicity result . _total = _total return result
r Return a new multiset with all elements from the others removed .
142
15
4,564
def union ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity result . _total = _total return result
r Return a new multiset with all elements from the multiset and the others with maximal multiplicities .
114
24
4,565
def intersection ( self , * others ) : result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in list ( _elements . items ( ) ) : new_multiplicity = other . get ( element , 0 ) if new_multiplicity < multiplicity : if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity - new_multiplicity else : del _elements [ element ] _total -= multiplicity result . _total = _total return result
r Return a new multiset with elements common to the multiset and all others .
141
19
4,566
def symmetric_difference ( self , other ) : other = self . _as_multiset ( other ) result = self . __class__ ( ) _total = 0 _elements = result . _elements self_elements = self . _elements other_elements = other . _elements dist_elements = set ( self_elements . keys ( ) ) | set ( other_elements . keys ( ) ) for element in dist_elements : multiplicity = self_elements . get ( element , 0 ) other_multiplicity = other_elements . get ( element , 0 ) new_multiplicity = ( multiplicity - other_multiplicity if multiplicity > other_multiplicity else other_multiplicity - multiplicity ) _total += new_multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity result . _total = _total return result
r Return a new set with elements in either the set or other but not both .
198
17
4,567
def times ( self , factor ) : if factor == 0 : return self . __class__ ( ) if factor < 0 : raise ValueError ( 'The factor must no be negative.' ) result = self . __copy__ ( ) _elements = result . _elements for element in _elements : _elements [ element ] *= factor result . _total *= factor return result
Return a new set with each element s multiplicity multiplied with the given scalar factor .
82
18
4,568
def union_update ( self , * others ) : _elements = self . _elements _total = self . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity self . _total = _total
r Update the multiset adding elements from all others using the maximum multiplicity .
105
17
4,569
def intersection_update ( self , * others ) : for other in map ( self . _as_mapping , others ) : for element , current_count in list ( self . items ( ) ) : multiplicity = other . get ( element , 0 ) if multiplicity < current_count : self [ element ] = multiplicity
r Update the multiset keeping only elements found in it and all others .
69
16
4,570
def difference_update ( self , * others ) : for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : self . discard ( element , multiplicity )
r Remove all elements contained the others from this multiset .
49
13
4,571
def symmetric_difference_update ( self , other ) : other = self . _as_multiset ( other ) elements = set ( self . distinct_elements ( ) ) | set ( other . distinct_elements ( ) ) for element in elements : multiplicity = self [ element ] other_count = other [ element ] self [ element ] = ( multiplicity - other_count if multiplicity > other_count else other_count - multiplicity )
r Update the multiset to contain only elements in either this multiset or the other but not both .
99
23
4,572
def times_update ( self , factor ) : if factor < 0 : raise ValueError ( "The factor must not be negative." ) elif factor == 0 : self . clear ( ) else : _elements = self . _elements for element in _elements : _elements [ element ] *= factor self . _total *= factor
Update each this multiset by multiplying each element s multiplicity with the given scalar factor .
73
20
4,573
def add ( self , element , multiplicity = 1 ) : if multiplicity < 1 : raise ValueError ( "Multiplicity must be positive" ) self . _elements [ element ] += multiplicity self . _total += multiplicity
Adds an element to the multiset .
50
9
4,574
def remove ( self , element , multiplicity = None ) : _elements = self . _elements if element not in _elements : raise KeyError old_multiplicity = _elements . get ( element , 0 ) if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must be not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity
Removes an element from the multiset .
128
10
4,575
def discard ( self , element , multiplicity = None ) : _elements = self . _elements if element in _elements : old_multiplicity = _elements [ element ] if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity else : return 0
Removes the element from the multiset .
123
10
4,576
def shutdown_executors ( wait = True ) : return { k : shutdown_executor ( k , wait ) for k in list ( _EXECUTORS . keys ( ) ) }
Clean - up the resources of all initialized executors .
41
11
4,577
def async_thread ( sol , args , node_attr , node_id , * a , * * kw ) : executor = _get_executor ( _executor_name ( kw . get ( 'executor' , False ) , sol . dsp ) ) if not executor : return sol . _evaluate_node ( args , node_attr , node_id , * a , * * kw ) futures = args if node_attr [ 'type' ] == 'data' and ( node_attr [ 'wait_inputs' ] or 'function' in node_attr ) : futures = args [ 0 ] . values ( ) from concurrent . futures import Future futures = { v for v in futures if isinstance ( v , Future ) } def _submit ( ) : return executor . thread ( _async_eval , sol , args , node_attr , node_id , * a , * * kw ) if futures : # Chain results. result = Future ( ) def _set_res ( fut ) : try : result . set_result ( fut . result ( ) ) except BaseException as ex : result . set_exception ( ex ) def _submit_task ( fut = None ) : futures . discard ( fut ) not futures and _submit ( ) . add_done_callback ( _set_res ) for f in list ( futures ) : f . add_done_callback ( _submit_task ) else : result = _submit ( ) timeout = node_attr . get ( 'await_result' , False ) if timeout is not False : return _await_result ( result , timeout , sol , node_id ) n = len ( node_attr . get ( 'outputs' , [ ] ) ) return AsyncList ( future = result , n = n ) if n > 1 else result
Execute sol . _evaluate_node in an asynchronous thread .
394
13
4,578
def await_result ( obj , timeout = None ) : from concurrent . futures import Future return obj . result ( timeout ) if isinstance ( obj , Future ) else obj
Return the result of a Future object .
35
8
4,579
def pivot ( table , left , top , value ) : rs = { } ysort = [ ] xsort = [ ] for row in table : yaxis = tuple ( [ row [ c ] for c in left ] ) # e.g. yaxis = ('Simon',) if yaxis not in ysort : ysort . append ( yaxis ) xaxis = tuple ( [ row [ c ] for c in top ] ) # e.g. xaxis = ('2004',) if xaxis not in xsort : xsort . append ( xaxis ) try : rs [ yaxis ] except KeyError : rs [ yaxis ] = { } if xaxis not in rs [ yaxis ] : rs [ yaxis ] [ xaxis ] = 0 rs [ yaxis ] [ xaxis ] += row [ value ] """ In the following loop we take care of missing data, e.g 'Eric' has a value in 2004 but not in 2005 """ for key in rs : if len ( rs [ key ] ) - len ( xsort ) : for var in xsort : if var not in rs [ key ] . keys ( ) : rs [ key ] [ var ] = '' headings = list ( left ) headings . extend ( xsort ) t = [ ] """ The lists 'sortedkeys' and 'sortedvalues' make sure that even if the field 'top' is unordered, data will be transposed correctly. E.g. in the example above the table rows are not ordered by the year """ for left in ysort : row = list ( left ) sortedkeys = sorted ( rs [ left ] . keys ( ) ) sortedvalues = map ( rs [ left ] . get , sortedkeys ) row . extend ( sortedvalues ) t . append ( dict ( zip ( headings , row ) ) ) return t
Creates a cross - tab or pivot table from a normalised input table . Use this function to denormalize a table of normalized records .
387
29
4,580
def download_hrrr_for_gssha ( main_directory , forecast_start_date_string , #EX. '20160913' forecast_start_hour_string , #EX. '00' to '23' leftlon = - 180 , rightlon = 180 , toplat = 90 , bottomlat = - 90 ) : out_directory = path . join ( main_directory , forecast_start_date_string ) try : mkdir ( out_directory ) except OSError : pass forecast_timestep_hour_string_array = [ '00' , '01' , '02' , '03' , '04' , '05' , '06' , '07' , '08' , '09' , '10' , '11' , '12' , '13' , '14' , '15' , '16' , '17' , '18' ] downloaded_file_list = [ ] for forecast_timestep_hour_string in forecast_timestep_hour_string_array : file_name = 'hrrr.t{0}z.wrfsfcf{1}.grib2' . format ( forecast_start_hour_string , forecast_timestep_hour_string ) payload = { 'file' : file_name , 'lev_10_m_above_ground' : 'on' , 'lev_2_m_above_ground' : 'on' , 'lev_entire_atmosphere' : 'on' , 'lev_surface' : 'on' , 'var_DSWRF' : 'on' , 'var_PRATE' : 'on' , 'var_PRES' : 'on' , 'var_RH' : 'on' , 'var_TMP' : 'on' , 'var_UGRD' : 'on' , 'var_VGRD' : 'on' , 'var_TCDC' : 'on' , 'subregion' : '' , 'leftlon' : str ( leftlon ) , 'rightlon' : str ( rightlon ) , 'toplat' : str ( toplat ) , 'bottomlat' : str ( bottomlat ) , 'dir' : '/hrrr.{0}' . format ( forecast_start_date_string ) , } r = requests . get ( 'http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl' , params = payload , stream = True ) if r . status_code == requests . codes . ok : out_file = path . join ( out_directory , file_name ) downloaded_file_list . append ( out_file ) with open ( out_file , 'wb' ) as fd : for chunk in r . iter_content ( chunk_size = 1024 ) : fd . write ( chunk ) else : log . error ( "Problem downloading {0}" . format ( file_name ) ) for filename in downloaded_file_list : try : remove ( filename ) except OSError : pass downloaded_file_list = [ ] break return downloaded_file_list
Function to download HRRR data for GSSHA
688
10
4,581
def _patch_resource ( self , method ) : resource = self . client . get_resource ( "" , self . resource . path , method ) if not resource : raise UnsupportedResourceMethodError ( self . resource . path , method ) self . resource = resource
Patch the current RAML ResourceNode by the resource with the correct method if it exists
55
17
4,582
def parse_raml ( self ) : if utils . is_url ( self . ramlfile ) : raml = utils . download_file ( self . ramlfile ) else : with codecs . open ( self . ramlfile , "rb" , encoding = "utf-8" ) as raml_f : raml = raml_f . read ( ) loader = ramlfications . loads ( raml ) config = ramlfications . setup_config ( self . ramlconfig ) self . raml = ramlfications . parse_raml ( loader , config )
Parse RAML file
128
5
4,583
def get_resource ( self , base_resource_path , resource_path , method = None ) : basic_path = base_resource_path + resource_path dynamic_path = base_resource_path + "{" + resource_path + "}" for resource in self . raml . resources : method_matched = method is None or resource . method == method if method_matched and ( resource . path == basic_path or resource . path == basic_path + '/' ) : return resource if resource . path == dynamic_path and method_matched : return NodeParameter ( resource = resource , parameter = resource_path ) return None
Gets a resource by it s path and optional by it s method
134
14
4,584
def auto_no_thousands ( self ) : if self . _value >= 1000000000000 : return self . TiB , 'TiB' if self . _value >= 1000000000 : return self . GiB , 'GiB' if self . _value >= 1000000 : return self . MiB , 'MiB' if self . _value >= 1000 : return self . KiB , 'KiB' else : return self . B , 'B'
Like self . auto but calculates the next unit if > 999 . 99 .
98
15
4,585
def error ( message , code = 1 ) : if message : print ( 'ERROR: {0}' . format ( message ) , file = sys . stderr ) else : print ( file = sys . stderr ) sys . exit ( code )
Prints an error message to stderr and exits with a status of 1 by default .
54
19
4,586
def update_hmet_card_file ( hmet_card_file_path , new_hmet_data_path ) : hmet_card_file_path_temp = "{0}_tmp" . format ( hmet_card_file_path ) try : remove ( hmet_card_file_path_temp ) except OSError : pass copy ( hmet_card_file_path , hmet_card_file_path_temp ) with io_open ( hmet_card_file_path_temp , 'w' , newline = '\r\n' ) as out_hmet_list_file : with open ( hmet_card_file_path ) as old_hmet_list_file : for date_path in old_hmet_list_file : out_hmet_list_file . write ( u"{0}\n" . format ( path . join ( new_hmet_data_path , path . basename ( date_path ) ) ) ) try : remove ( hmet_card_file_path ) except OSError : pass rename ( hmet_card_file_path_temp , hmet_card_file_path )
This function updates the paths in the HMET card file to the new location of the HMET data . This is necessary because the file paths are absolute and will need to be updated if moved .
266
39
4,587
def _set_subset_indices ( self , y_min , y_max , x_min , x_max ) : y_coords , x_coords = self . xd . lsm . coords dx = self . xd . lsm . dx dy = self . xd . lsm . dy lsm_y_indices_from_y , lsm_x_indices_from_y = np . where ( ( y_coords >= ( y_min - 2 * dy ) ) & ( y_coords <= ( y_max + 2 * dy ) ) ) lsm_y_indices_from_x , lsm_x_indices_from_x = np . where ( ( x_coords >= ( x_min - 2 * dx ) ) & ( x_coords <= ( x_max + 2 * dx ) ) ) lsm_y_indices = np . intersect1d ( lsm_y_indices_from_y , lsm_y_indices_from_x ) lsm_x_indices = np . intersect1d ( lsm_x_indices_from_y , lsm_x_indices_from_x ) self . xslice = slice ( np . amin ( lsm_x_indices ) , np . amax ( lsm_x_indices ) + 1 ) self . yslice = slice ( np . amin ( lsm_y_indices ) , np . amax ( lsm_y_indices ) + 1 )
load subset based on extent
345
5
4,588
def _time_to_string ( self , dt , conversion_string = "%Y %m %d %H %M" ) : if self . output_timezone is not None : dt = dt . replace ( tzinfo = utc ) . astimezone ( self . output_timezone ) return dt . strftime ( conversion_string )
This converts a UTC time integer to a string
79
9
4,589
def _load_lsm_data ( self , data_var , conversion_factor = 1 , calc_4d_method = None , calc_4d_dim = None , time_step = None ) : data = self . xd . lsm . getvar ( data_var , yslice = self . yslice , xslice = self . xslice , calc_4d_method = calc_4d_method , calc_4d_dim = calc_4d_dim ) if isinstance ( time_step , datetime ) : data = data . loc [ { self . lsm_time_dim : [ pd . to_datetime ( time_step ) ] } ] elif time_step is not None : data = data [ { self . lsm_time_dim : [ time_step ] } ] data = data . fillna ( 0 ) data . values *= conversion_factor return data
This extracts the LSM data from a folder of netcdf files
200
14
4,590
def _check_lsm_input ( self , data_var_map_array ) : REQUIRED_HMET_VAR_LIST = [ 'Prcp' , 'Pres' , 'Temp' , 'Clod' , 'RlHm' , 'Drad' , 'Grad' , 'WndS' ] # make sure all required variables exist given_hmet_var_list = [ ] for gssha_data_var , lsm_data_var in data_var_map_array : gssha_data_hmet_name = self . netcdf_attributes [ gssha_data_var ] [ 'hmet_name' ] if gssha_data_hmet_name in given_hmet_var_list : raise ValueError ( "Duplicate parameter for HMET variable {0}" . format ( gssha_data_hmet_name ) ) else : given_hmet_var_list . append ( gssha_data_hmet_name ) for REQUIRED_HMET_VAR in REQUIRED_HMET_VAR_LIST : if REQUIRED_HMET_VAR not in given_hmet_var_list : raise ValueError ( "ERROR: HMET param is required to continue " "{0} ..." . format ( REQUIRED_HMET_VAR ) )
This function checks the input var map array to ensure the required input variables exist
305
15
4,591
def _resample_data ( self , gssha_var ) : self . data = self . data . lsm . resample ( gssha_var , self . gssha_grid )
This function resamples the data to match the GSSHA grid IN TESTING MODE
44
18
4,592
def _convert_data_to_hourly ( self , gssha_data_var ) : time_step_hours = np . diff ( self . data . time ) [ 0 ] / np . timedelta64 ( 1 , 'h' ) calc_function = self . _get_calc_function ( gssha_data_var ) resampled_data = None if time_step_hours < 1 : resampled_data = self . data . resample ( '1H' , dim = 'time' , how = calc_function , keep_attrs = True ) elif time_step_hours > 1 : resampled_data = self . data . resample ( '1H' , dim = 'time' , keep_attrs = True ) for time_idx in range ( self . data . dims [ 'time' ] ) : if time_idx + 1 < self . data . dims [ 'time' ] : # interpolate between time steps start_time = self . data . time [ time_idx ] . values end_time = self . data . time [ time_idx + 1 ] . values slope_timeslice = slice ( str ( start_time ) , str ( end_time ) ) slice_size = resampled_data . sel ( time = slope_timeslice ) . dims [ 'time' ] - 1 first_timestep = resampled_data . sel ( time = str ( start_time ) ) [ gssha_data_var ] slope = ( resampled_data . sel ( time = str ( end_time ) ) [ gssha_data_var ] - first_timestep ) / float ( slice_size ) data_timeslice = slice ( str ( start_time + np . timedelta64 ( 1 , 'm' ) ) , str ( end_time - np . timedelta64 ( 1 , 'm' ) ) ) data_subset = resampled_data . sel ( time = data_timeslice ) for xidx in range ( data_subset . dims [ 'time' ] ) : data_subset [ gssha_data_var ] [ xidx ] = first_timestep + slope * ( xidx + 1 ) else : # just continue to repeat the timestep start_time = self . data . time [ time_idx ] . values end_time = resampled_data . time [ - 1 ] . values if end_time > start_time : first_timestep = resampled_data . sel ( time = str ( start_time ) ) [ gssha_data_var ] data_timeslice = slice ( str ( start_time ) , str ( end_time ) ) data_subset = resampled_data . sel ( time = data_timeslice ) slice_size = 1 if calc_function == "mean" : slice_size = data_subset . dims [ 'time' ] for xidx in range ( data_subset . dims [ 'time' ] ) : data_subset [ gssha_data_var ] [ xidx ] = first_timestep / float ( slice_size ) if resampled_data is not None : # make sure coordinates copied if self . data . lsm . x_var not in resampled_data . coords : resampled_data . coords [ self . data . lsm . x_var ] = self . data . coords [ self . data . lsm . x_var ] if self . data . lsm . y_var not in resampled_data . coords : resampled_data . coords [ self . data . lsm . y_var ] = self . data . coords [ self . data . lsm . y_var ] self . data = resampled_data
This function converts the data to hourly data and then puts it into the data_np_array USED WHEN GENERATING HMET DATA ONLY
868
29
4,593
def lsm_var_to_grid ( self , out_grid_file , lsm_data_var , gssha_convert_var , time_step = 0 , ascii_format = 'grass' ) : self . _load_converted_gssha_data_from_lsm ( gssha_convert_var , lsm_data_var , 'grid' , time_step ) gssha_data_var_name = self . netcdf_attributes [ gssha_convert_var ] [ 'gssha_name' ] self . data = self . data . lsm . to_projection ( gssha_data_var_name , projection = self . gssha_grid . projection ) self . _resample_data ( gssha_data_var_name ) arr_grid = ArrayGrid ( in_array = self . data [ gssha_data_var_name ] . values , wkt_projection = self . data . lsm . projection . ExportToWkt ( ) , geotransform = self . data . lsm . geotransform ) if ascii_format . strip ( ) . lower ( ) == 'grass' : arr_grid . to_grass_ascii ( out_grid_file ) elif ascii_format . strip ( ) . lower ( ) == 'arc' : arr_grid . to_arc_ascii ( out_grid_file ) else : raise ValueError ( "Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed." )
This function takes array data and writes out a GSSHA ascii grid .
357
17
4,594
def _write_hmet_card_file ( self , hmet_card_file_path , main_output_folder ) : with io_open ( hmet_card_file_path , 'w' ) as out_hmet_list_file : for hour_time in self . data . lsm . datetime : date_str = self . _time_to_string ( hour_time , "%Y%m%d%H" ) out_hmet_list_file . write ( u"{0}\n" . format ( path . join ( main_output_folder , date_str ) ) )
This function writes the HMET_ASCII card file with ASCII file list for input to GSSHA
136
21
4,595
def lsm_data_to_arc_ascii ( self , data_var_map_array , main_output_folder = "" ) : self . _check_lsm_input ( data_var_map_array ) if not main_output_folder : main_output_folder = path . join ( self . gssha_project_folder , "hmet_ascii_data" ) try : mkdir ( main_output_folder ) except OSError : pass log . info ( "Outputting HMET data to {0}" . format ( main_output_folder ) ) #PART 2: DATA for data_var_map in data_var_map_array : gssha_data_var , lsm_data_var = data_var_map gssha_data_hmet_name = self . netcdf_attributes [ gssha_data_var ] [ 'hmet_name' ] gssha_data_var_name = self . netcdf_attributes [ gssha_data_var ] [ 'gssha_name' ] self . _load_converted_gssha_data_from_lsm ( gssha_data_var , lsm_data_var , 'ascii' ) self . _convert_data_to_hourly ( gssha_data_var_name ) self . data = self . data . lsm . to_projection ( gssha_data_var_name , projection = self . gssha_grid . projection ) for time_idx in range ( self . data . dims [ 'time' ] ) : arr_grid = ArrayGrid ( in_array = self . data [ gssha_data_var_name ] [ time_idx ] . values , wkt_projection = self . data . lsm . projection . ExportToWkt ( ) , geotransform = self . data . lsm . geotransform , nodata_value = - 9999 ) date_str = self . _time_to_string ( self . data . lsm . datetime [ time_idx ] , "%Y%m%d%H" ) ascii_file_path = path . join ( main_output_folder , "{0}_{1}.asc" . format ( date_str , gssha_data_hmet_name ) ) arr_grid . to_arc_ascii ( ascii_file_path ) #PART 3: HMET_ASCII card input file with ASCII file list hmet_card_file_path = path . join ( main_output_folder , 'hmet_file_list.txt' ) self . _write_hmet_card_file ( hmet_card_file_path , main_output_folder )
Writes extracted data to Arc ASCII file format into folder to be read in by GSSHA . Also generates the HMET_ASCII card file for GSSHA in the folder named hmet_file_list . txt .
625
48
4,596
def lsm_data_to_subset_netcdf ( self , netcdf_file_path , data_var_map_array , resample_method = None ) : self . _check_lsm_input ( data_var_map_array ) output_datasets = [ ] #DATA for gssha_var , lsm_var in data_var_map_array : if gssha_var in self . netcdf_attributes : self . _load_converted_gssha_data_from_lsm ( gssha_var , lsm_var , 'netcdf' ) #previously just added data, but needs to be hourly gssha_data_var_name = self . netcdf_attributes [ gssha_var ] [ 'gssha_name' ] self . _convert_data_to_hourly ( gssha_data_var_name ) if resample_method : self . _resample_data ( gssha_data_var_name ) else : self . data = self . data . lsm . to_projection ( gssha_data_var_name , projection = self . gssha_grid . projection ) output_datasets . append ( self . data ) else : raise ValueError ( "Invalid GSSHA variable name: {0} ..." . format ( gssha_var ) ) output_dataset = xr . merge ( output_datasets ) #add global attributes output_dataset . attrs [ 'Convention' ] = 'CF-1.6' output_dataset . attrs [ 'title' ] = 'GSSHA LSM Input' output_dataset . attrs [ 'history' ] = 'date_created: {0}' . format ( datetime . utcnow ( ) ) output_dataset . attrs [ 'proj4' ] = self . data . attrs [ 'proj4' ] output_dataset . attrs [ 'geotransform' ] = self . data . attrs [ 'geotransform' ] output_dataset . to_netcdf ( netcdf_file_path )
Writes extracted data to the NetCDF file format
494
11
4,597
def export ( self , * * kwargs ) : query_params = { "_actions" : "false" , "_links" : "true" , "_embedded" : "true" } path_params = { } headers = { } body = None if "applicationId" in kwargs : path_params [ "applicationId" ] = kwargs [ "applicationId" ] if "query" in kwargs : body = kwargs [ "query" ] if "losantdomain" in kwargs : headers [ "losantdomain" ] = kwargs [ "losantdomain" ] if "_actions" in kwargs : query_params [ "_actions" ] = kwargs [ "_actions" ] if "_links" in kwargs : query_params [ "_links" ] = kwargs [ "_links" ] if "_embedded" in kwargs : query_params [ "_embedded" ] = kwargs [ "_embedded" ] path = "/applications/{applicationId}/data/export" . format ( * * path_params ) return self . client . request ( "POST" , path , params = query_params , headers = headers , body = body )
Creates a csv file from a query of devices and attributes over a time range .
266
18
4,598
def _generate ( self , message ) : raw_params = { "INPUT_TEXT" : message . encode ( 'UTF8' ) , "INPUT_TYPE" : self . input_type , "OUTPUT_TYPE" : self . output_type , "LOCALE" : self . _locale , "AUDIO" : self . audio , "VOICE" : self . _voice , } params = urlencode ( raw_params ) headers = { } logging . debug ( 'maryclient: generate, raw_params=%s' % repr ( raw_params ) ) # Open connection to self._host, self._port. conn = httplib . HTTPConnection ( self . _host , self . _port ) #conn.set_debuglevel(5) conn . request ( "POST" , "/process" , params , headers ) response = conn . getresponse ( ) if response . status != 200 : logging . error ( response . getheaders ( ) ) raise Exception ( "{0}: {1}" . format ( response . status , response . reason ) ) return response . read ( )
Given a message in message return a response in the appropriate format .
239
13
4,599
def receive ( self , msg ) : if msg [ TYPE ] == TELL and msg [ METHOD ] == 'stop' : self . running = False self . future_manager . stop ( ) else : result = None try : invoke = getattr ( self . _obj , msg [ METHOD ] ) params = msg [ PARAMS ] result = invoke ( * params [ 0 ] , * * params [ 1 ] ) except Exception , e : if msg [ TYPE ] == TELL : print e return result = e self . send_response ( result , msg )
The message received from the queue specify a method of the class the actor represents . This invokes it . If the communication is an ASK sends the result back to the channel included in the message as an ASKRESPONSE .
118
48