idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
33,200 | def parse_token ( self , token ) : try : data = self . unsign ( token ) except signing . SignatureExpired : logger . debug ( "Expired token: %s" , token ) return except signing . BadSignature : logger . debug ( "Bad token: %s" , token ) return except Exception : logger . exception ( "Valid signature but unexpected toke... | Obtain a user from a signed token . |
33,201 | def authenticate ( self , request , url_auth_token = None ) : try : return self . parse_token ( url_auth_token ) except TypeError : backend = "%s.%s" % ( self . __module__ , self . __class__ . __name__ ) logger . exception ( "TypeError in %s, here's the traceback before " "Django swallows it:" , backend ) raise | Check the token and return the corresponding user . |
33,202 | def _add_pos1 ( token ) : result = token . copy ( ) result [ 'pos1' ] = _POSMAP [ token [ 'pos' ] . split ( "(" ) [ 0 ] ] return result | Adds a pos1 element to a frog token . |
33,203 | def frog_to_saf ( tokens ) : tokens = [ _add_pos1 ( token ) for token in tokens ] module = { 'module' : "frog" , "started" : datetime . datetime . now ( ) . isoformat ( ) } return { "header" : { 'format' : "SAF" , 'format-version' : "0.0" , 'processed' : [ module ] } , "tokens" : tokens } | Convert frog tokens into a new SAF document |
33,204 | def copy_cwl_files ( from_dir = CWL_PATH , to_dir = None ) : cwl_files = glob . glob ( '{}{}*.cwl' . format ( from_dir , os . sep ) ) if len ( cwl_files ) > 0 : create_dirs ( to_dir ) for fi in cwl_files : fo = os . path . join ( to_dir , os . path . basename ( fi ) ) shutil . copy2 ( fi , fo ) return len ( cwl_files ) | Copy cwl files to a directory where the cwl - runner can find them . |
33,205 | def main ( to_dir , from_dir ) : num = copy_cwl_files ( from_dir = from_dir , to_dir = to_dir ) if num > 0 : click . echo ( 'Copied {} CWL files to "{}".' . format ( num , to_dir ) ) else : msg = 'No CWL files found in "{}". Copied 0 files' . format ( from_dir ) click . echo ( msg ) | Copy CWL files . |
33,206 | def find ( self , datum ) : if isinstance ( datum . value , dict ) and self . expressions : return datum if isinstance ( datum . value , dict ) or isinstance ( datum . value , list ) : key = ( functools . cmp_to_key ( self . _compare ) if self . expressions else None ) return [ jsonpath_rw . DatumInContext . wrap ( [ v... | Return sorted value of This if list or dict . |
33,207 | def save ( self , fname , mode = None , validate = True , wd = False , inline = False , relative = True , pack = False , encoding = 'utf-8' ) : super ( WorkflowGenerator , self ) . save ( fname , mode = mode , validate = validate , wd = wd , inline = inline , relative = relative , pack = pack , encoding = encoding ) | Save workflow to file |
33,208 | def match ( pattern , data , ** parse_kwargs ) : return [ m . value for m in parse ( pattern , ** parse_kwargs ) . find ( data ) ] | Returns all matched values of pattern in data |
33,209 | def match1 ( pattern , data , ** parse_kwargs ) : matches = match ( pattern , data , ** parse_kwargs ) return matches [ 0 ] if matches else None | Returns first matched value of pattern in data or None if no matches |
33,210 | def create_chunked_list ( in_dir , size , out_dir , out_name ) : create_dirs ( out_dir ) in_files = get_files ( in_dir ) chunks = chunk ( in_files , size ) division = { } for i , files in enumerate ( chunks ) : division [ i ] = [ os . path . basename ( f ) for f in files ] out_file = os . path . join ( out_dir , out_na... | Create a division of the input files in chunks . |
33,211 | def remove_ext ( fname ) : bn = os . path . basename ( fname ) return os . path . splitext ( bn ) [ 0 ] | Removes the extension from a filename |
33,212 | def out_file_name ( out_dir , fname , ext = None ) : if ext is None : return os . path . join ( out_dir , os . path . basename ( fname ) ) fname = remove_ext ( fname ) return os . path . join ( out_dir , '{}.{}' . format ( fname , ext ) ) | Return path of output file given a directory file name and extension . |
33,213 | def get_files ( directory , recursive = False ) : files_out = [ ] if recursive : for root , dirs , files in os . walk ( os . path . abspath ( directory ) ) : files = [ os . path . join ( root , f ) for f in files ] files_out . append ( files ) files_out = list ( itertools . chain ( * files_out ) ) else : files_out = [ ... | Return a list of all files in the directory . |
33,214 | def _reformat ( p , buf ) : if numpy . ndim ( buf ) != 1 : raise ValueError ( "Buffer ``buf`` must be 1-d." ) if hasattr ( p , 'keys' ) : ans = _gvar . BufferDict ( p ) if ans . size != len ( buf ) : raise ValueError ( "p, buf size mismatch: %d, %d" % ( ans . size , len ( buf ) ) ) ans = _gvar . BufferDict ( ans , buf ... | Apply format of p to data in 1 - d array buf . |
33,215 | def _unpack_gvars ( g ) : if g is not None : g = _gvar . gvar ( g ) if not hasattr ( g , 'flat' ) : g = numpy . asarray ( g ) return g | Unpack collection of GVars to BufferDict or numpy array . |
33,216 | def _unpack_p0 ( p0 , p0file , prior ) : if p0file is not None : try : with open ( p0file , "rb" ) as f : p0 = pickle . load ( f ) except ( IOError , EOFError ) : if prior is None : raise IOError ( "No prior and can't read parameters from " + p0file ) else : p0 = None if p0 is not None : if p0 is True : p0 = next ( _gv... | Create proper p0 . |
33,217 | def _unpack_fcn ( fcn , p0 , y , x ) : if y . shape is not None : if p0 . shape is not None : def nfcn ( p , x = x , fcn = fcn , pshape = p0 . shape ) : po = p . reshape ( pshape ) ans = fcn ( po ) if x is False else fcn ( x , po ) if hasattr ( ans , 'flat' ) : return ans . flat else : return numpy . array ( ans ) . fl... | reconfigure fitting fcn so inputs outputs = flat arrays ; hide x |
33,218 | def check_roundoff ( self , rtol = 0.25 , atol = 1e-6 ) : psdev = _gvar . sdev ( self . p . flat ) paltsdev = _gvar . sdev ( self . palt . flat ) if not numpy . allclose ( psdev , paltsdev , rtol = rtol , atol = atol ) : warnings . warn ( "Possible roundoff errors in fit.p; try svd cut." ) | Check for roundoff errors in fit . p . |
33,219 | def plot_residuals ( self , plot = None ) : if plot is None : import matplotlib . pyplot as plot x = numpy . arange ( 1 , len ( self . residuals ) + 1 ) y = _gvar . mean ( self . residuals ) yerr = _gvar . sdev ( self . residuals ) plot . errorbar ( x = x , y = y , yerr = yerr , fmt = 'o' , color = 'b' ) plot . ylabel ... | Plot normalized fit residuals . |
33,220 | def load_parameters ( filename ) : warnings . warn ( "nonlinear_fit.load_parameters deprecated; use pickle.load or gvar.load instead" , DeprecationWarning , ) with open ( filename , "rb" ) as f : return pickle . load ( f ) | Load parameters stored in file filename . |
33,221 | def simulated_fit_iter ( self , n = None , pexact = None , add_priornoise = False , bootstrap = None , ** kargs ) : pexact = self . pmean if pexact is None else pexact if bootstrap is not None : add_priornoise = bootstrap fargs = dict ( fcn = self . fcn , svdcut = None , p0 = pexact , fitter = self . fitter , ) fargs .... | Iterator that returns simulation copies of a fit . |
33,222 | def simulated_data_iter ( self , n = None , pexact = None , add_priornoise = False , bootstrap = None ) : pexact = self . pmean if pexact is None else pexact if bootstrap is not None : add_priornoise = bootstrap f = self . fcn ( pexact ) if self . x is False else self . fcn ( self . x , pexact ) y = copy . deepcopy ( s... | Iterator that returns simulated data based upon a fit s data . |
33,223 | def formatall ( self , * args , ** kargs ) : " Add-on method for fits returned by chained_nonlinear_fit. " ans = '' for x in self . chained_fits : ans += 10 * '=' + ' ' + str ( x ) + '\n' ans += self . chained_fits [ x ] . format ( * args , ** kargs ) ans += '\n' return ans [ : - 1 ] | Add - on method for fits returned by chained_nonlinear_fit . |
33,224 | def set ( self , ** kargs ) : kwords = set ( [ 'mopt' , 'fast' , 'ratio' , 'wavg_kargs' , 'wavg_all' , 'fitterargs' , 'fitname' , ] ) kargs = dict ( kargs ) oldkargs = { } fargs = { } for k in list ( kargs . keys ( ) ) : if k in kwords : oldkargs [ k ] = getattr ( self , k ) setattr ( self , k , kargs [ k ] ) kwords . ... | Reset default keyword parameters . |
33,225 | def buildfitfcn ( self ) : def _fitfcn ( p , flatmodels = self . flatmodels ) : ans = gvar . BufferDict ( ) for m in flatmodels : ans [ m . datatag ] = ( m . fitfcn ( p ) if m . ncg <= 1 else MultiFitter . coarse_grain ( m . fitfcn ( p ) , m . ncg ) ) return ans return _fitfcn | Create fit function to fit models in list models . |
33,226 | def builddata ( self , mopt = None , data = None , pdata = None , prior = None ) : if pdata is None : if data is None : raise ValueError ( 'no data or pdata' ) pdata = gvar . BufferDict ( ) for m in self . flatmodels : pdata [ m . datatag ] = ( m . builddata ( data ) if m . ncg <= 1 else MultiFitter . coarse_grain ( m ... | Rebuild pdata to account for marginalization . |
33,227 | def buildprior ( self , prior , mopt = None ) : nprior = gvar . BufferDict ( ) for m in self . flatmodels : nprior . update ( m . buildprior ( prior , mopt = mopt , ) ) if not self . fast : for k in prior : if k not in nprior : nprior [ k ] = prior [ k ] return nprior | Create prior to fit models in list models . |
33,228 | def _flatten_models ( tasklist ) : " Create 1d-array containing all disctinct models from ``tasklist``. " ans = gvar . BufferDict ( ) for task , mlist in tasklist : if task != 'fit' : continue for m in mlist : id_m = id ( m ) if id_m not in ans : ans [ id_m ] = m return ans . buf . tolist ( ) | Create 1d - array containing all disctinct models from tasklist . |
33,229 | def flatten_models ( models ) : " Create 1d-array containing all disctinct models from ``models``. " if isinstance ( models , MultiFitterModel ) : ans = [ models ] else : tasklist = MultiFitter . _compile_models ( models ) ans = MultiFitter . _flatten_models ( tasklist ) return ans | Create 1d - array containing all disctinct models from models . |
33,230 | def lsqfit ( self , data = None , pdata = None , prior = None , p0 = None , ** kargs ) : if prior is None : raise ValueError ( 'no prior' ) kargs , oldargs = self . set ( ** kargs ) fitter_args_kargs = ( self . chained_lsqfit , dict ( data = data , prior = prior , pdata = pdata , models = self . models ) , dict ( kargs... | Compute least - squares fit of models to data . |
33,231 | def _compile_models ( models ) : tasklist = [ ] for m in models : if isinstance ( m , MultiFitterModel ) : tasklist += [ ( 'fit' , [ m ] ) ] tasklist += [ ( 'update-prior' , None ) ] elif hasattr ( m , 'keys' ) : tasklist += [ ( 'update-kargs' , m ) ] elif isinstance ( m , tuple ) : tasklist += [ ( 'fit' , list ( m ) )... | Convert models into a list of tasks . |
33,232 | def coarse_grain ( G , ncg ) : if ncg <= 1 : return G G = numpy . asarray ( G ) nbin , remainder = divmod ( G . shape [ - 1 ] , ncg ) if remainder != 0 : nbin += 1 return numpy . transpose ( [ numpy . sum ( G [ ... , i : i + ncg ] , axis = - 1 ) / G [ ... , i : i + ncg ] . shape [ - 1 ] for i in numpy . arange ( 0 , nc... | Coarse - grain last index of array G . |
33,233 | def process_data ( data , models ) : pdata = gvar . BufferDict ( ) for m in MultiFitter . flatten_models ( models ) : pdata [ m . datatag ] = ( m . builddata ( data ) if m . ncg <= 1 else MultiFitter . coarse_grain ( m . builddata ( data ) , ncg = m . ncg ) ) return pdata | Convert data to processed data using models . |
33,234 | def process_dataset ( dataset , models , ** kargs ) : dset = collections . OrderedDict ( ) for m in MultiFitter . flatten_models ( models ) : dset [ m . datatag ] = ( m . builddataset ( dataset ) if m . ncg <= 1 else MultiFitter . coarse_grain ( m . builddataset ( dataset ) , ncg = m . ncg ) ) return gvar . dataset . a... | Convert dataset to processed data using models . |
33,235 | def buildprior ( self , prior , mopt = None , extend = False ) : " Extract the model's parameters from prior. " newprior = { } intercept , slope = gv . get_dictkeys ( prior , [ self . intercept , self . slope ] ) newprior [ intercept ] = prior [ intercept ] if mopt is None : newprior [ slope ] = prior [ slope ] return ... | Extract the model s parameters from prior . |
33,236 | def show_plot ( t_array , th_array ) : th_mean = gv . mean ( th_array ) th_sdev = gv . sdev ( th_array ) thp = th_mean + th_sdev thm = th_mean - th_sdev plt . fill_between ( t_array , thp , thm , color = '0.8' ) plt . plot ( t_array , th_mean , linewidth = 0.5 ) plt . xlabel ( '$t$' ) plt . ylabel ( r'$\theta(t)$' ) pl... | Display theta vs t plot . |
33,237 | def aggregated_records ( all_records , key_fields = KEY_FIELDS ) : flow_table = defaultdict ( _FlowStats ) for flow_record in all_records : key = tuple ( getattr ( flow_record , attr ) for attr in key_fields ) if any ( x is None for x in key ) : continue flow_table [ key ] . update ( flow_record ) for key in flow_table... | Yield dicts that correspond to aggregates of the flow records given by the sequence of FlowRecords in all_records . Skips incomplete records . This will consume the all_records iterator and requires enough memory to be able to read it entirely . key_fields optionally contains the fields over which to aggregate . By def... |
33,238 | def action_print ( reader , * args ) : arg_count = len ( args ) if arg_count == 0 : stop_after = 0 elif arg_count == 1 : stop_after = int ( args [ 0 ] ) else : raise RuntimeError ( "0 or 1 arguments expected for action 'print'" ) for i , record in enumerate ( reader , 1 ) : print ( record . to_message ( ) ) if i == sto... | Simply print the Flow Log records to output . |
33,239 | def action_ipset ( reader , * args ) : ip_set = set ( ) for record in reader : if record . log_status in ( SKIPDATA , NODATA ) : continue ip_set . add ( record . srcaddr ) ip_set . add ( record . dstaddr ) for ip in ip_set : print ( ip ) | Show the set of IPs seen in Flow Log records . |
33,240 | def action_findip ( reader , * args ) : target_ips = set ( args ) for record in reader : if ( record . srcaddr in target_ips ) or ( record . dstaddr in target_ips ) : print ( record . to_message ( ) ) | Find Flow Log records involving a specific IP or IPs . |
33,241 | def action_aggregate ( reader , * args ) : all_aggregated = aggregated_records ( reader ) first_row = next ( all_aggregated ) keys = sorted ( first_row . keys ( ) ) print ( * keys , sep = '\t' ) iterable = chain ( [ first_row ] , all_aggregated ) for item in iterable : print ( * [ item [ k ] for k in keys ] , sep = '\t... | Aggregate flow records by 5 - tuple and print a tab - separated stream |
33,242 | def get_poly_area_geo ( poly ) : minx , miny , maxx , maxy = poly . bounds reprojected_for_area = Proj ( "+proj=aea +lat_1={0} +lat_1={1} " "+lat_0={2} +lon_0={3}" . format ( miny , maxy , ( miny + maxy ) / 2.0 , ( minx + maxx ) / 2.0 ) ) geographic_proj = Proj ( init = 'epsg:4326' ) project_func = partial ( transform ... | Calculates the area in meters squared of the individual polygon |
33,243 | def CreateWeightTableECMWF ( in_ecmwf_nc , in_catchment_shapefile , river_id , in_connectivity_file , out_weight_table , area_id = None , file_geodatabase = None ) : data_ecmwf_nc = Dataset ( in_ecmwf_nc ) variables_list = data_ecmwf_nc . variables . keys ( ) in_ecmwf_lat_var = 'lat' if 'latitude' in variables_list : i... | Create Weight Table for ECMWF Grids |
33,244 | def CreateWeightTableLDAS ( in_ldas_nc , in_nc_lon_var , in_nc_lat_var , in_catchment_shapefile , river_id , in_connectivity_file , out_weight_table , area_id = None , file_geodatabase = None ) : data_ldas_nc = Dataset ( in_ldas_nc ) variables_list = data_ldas_nc . variables . keys ( ) if in_nc_lon_var not in variables... | Create Weight Table for NLDAS GLDAS grids as well as for 2D Joules or LIS Grids |
33,245 | def generate_single_seasonal_average ( args ) : qout_file = args [ 0 ] seasonal_average_file = args [ 1 ] day_of_year = args [ 2 ] mp_lock = args [ 3 ] min_day = day_of_year - 3 max_day = day_of_year + 3 with RAPIDDataset ( qout_file ) as qout_nc_file : time_indices = [ ] for idx , t in enumerate ( qout_nc_file . get_t... | This function calculates the seasonal average for a single day of the year for all river segments |
33,246 | def generate_seasonal_averages ( qout_file , seasonal_average_file , num_cpus = multiprocessing . cpu_count ( ) ) : with RAPIDDataset ( qout_file ) as qout_nc_file : print ( "Generating seasonal average file ..." ) seasonal_avg_nc = Dataset ( seasonal_average_file , 'w' ) seasonal_avg_nc . createDimension ( 'rivid' , q... | This function loops through a CF compliant rapid streamflow file to produce a netCDF file with a seasonal average for 365 days a year |
33,247 | def open_shapefile ( shapefile_path , file_geodatabase = None ) : if file_geodatabase : gdb_driver = ogr . GetDriverByName ( "OpenFileGDB" ) ogr_shapefile = gdb_driver . Open ( file_geodatabase ) ogr_shapefile_lyr = ogr_shapefile . GetLayer ( shapefile_path ) else : ogr_shapefile = ogr . Open ( shapefile_path ) ogr_sha... | Opens a shapefile using either a shapefile path or a file geodatabase |
33,248 | def _get_cygwin_path ( self , windows_path ) : conv_cmd = [ os . path . join ( self . _cygwin_bin_location , "cygpath.exe" ) , "-u" , windows_path ] process = Popen ( conv_cmd , stdout = PIPE , stderr = PIPE , shell = False ) out , err = process . communicate ( ) if err : print ( err ) raise Exception ( err ) return ou... | Convert windows path to cygpath |
33,249 | def _create_symlink_cygwin ( self , initial_path , final_path ) : symlink_cmd = [ os . path . join ( self . _cygwin_bin_location , "ln.exe" ) , "-s" , self . _get_cygwin_path ( initial_path ) , self . _get_cygwin_path ( final_path ) ] process = Popen ( symlink_cmd , stdout = PIPE , stderr = PIPE , shell = False ) out ,... | Use cygqin to generate symbolic link |
33,250 | def _dos2unix_cygwin ( self , file_path ) : dos2unix_cmd = [ os . path . join ( self . _cygwin_bin_location , "dos2unix.exe" ) , self . _get_cygwin_path ( file_path ) ] process = Popen ( dos2unix_cmd , stdout = PIPE , stderr = PIPE , shell = False ) process . communicate ( ) | Use cygwin to convert file to unix format |
33,251 | def update_reach_number_data ( self ) : if not self . rapid_connect_file : log ( "Missing rapid_connect_file. " "Please set before running this function ..." , "ERROR" ) if not self . riv_bas_id_file : log ( "Missing riv_bas_id_file. " "Please set before running this function ..." , "ERROR" ) rapid_connect_table = np .... | Update the reach number data for the namelist based on input files . |
33,252 | def generate_namelist_file ( self , rapid_namelist_file ) : log ( "Generating RAPID namelist file ..." , "INFO" ) try : os . remove ( rapid_namelist_file ) except OSError : pass with open ( rapid_namelist_file , 'w' ) as new_file : new_file . write ( '&NL_namelist\n' ) for attr , value in sorted ( list ( self . __dict_... | Generate rapid_namelist file . |
33,253 | def update_namelist_file ( self , rapid_namelist_file , new_namelist_file = None ) : if os . path . exists ( rapid_namelist_file ) and rapid_namelist_file : log ( "Adding missing inputs from RAPID input file ..." , "INFO" ) with open ( rapid_namelist_file , 'r' ) as old_file : for line in old_file : line = line . strip... | Update existing namelist file with new parameters |
33,254 | def generate_qinit_from_past_qout ( self , qinit_file , time_index = - 1 , out_datetime = None ) : if not self . Qout_file or not os . path . exists ( self . Qout_file ) : log ( 'Missing Qout_file. ' 'Please set before running this function ...' , "ERROR" ) if not self . rapid_connect_file or not self . rapid_connect_f... | Generate qinit from a RAPID qout file |
33,255 | def generate_seasonal_intitialization ( self , qinit_file , datetime_start_initialization = datetime . datetime . utcnow ( ) ) : if not self . Qout_file or not os . path . exists ( self . Qout_file ) : log ( "Missing Qout_file. " "Please set before running this function ..." , "ERROR" ) if not self . rapid_connect_file... | This creates a seasonal qinit file from a RAPID qout file . This requires a simulation Qout file with a longer time period of record and to be CF compliant . It takes the average of the current date + - 3 days and goes back as far as possible . |
33,256 | def read_in_weight_table ( self , in_weight_table ) : print ( "Reading the weight table..." ) with open_csv ( in_weight_table , "r" ) as csvfile : reader = csv . reader ( csvfile ) header_row = next ( reader ) if len ( header_row ) < len ( self . header_wt ) : raise Exception ( self . error_messages [ 4 ] ) if header_r... | Read in weight table |
33,257 | def _write_lat_lon ( data_out_nc , rivid_lat_lon_z_file ) : if rivid_lat_lon_z_file and os . path . exists ( rivid_lat_lon_z_file ) : lookup_table = np . loadtxt ( rivid_lat_lon_z_file , delimiter = "," , usecols = ( 0 , 1 , 2 ) , skiprows = 1 , dtype = { 'names' : ( 'rivid' , 'lat' , 'lon' ) , 'formats' : ( 'i8' , 'f8... | Add latitude and longitude each netCDF feature Lookup table is a CSV file with rivid Lat Lon columns . Columns must be in that order and these must be the first three columns . |
33,258 | def filter_nan ( s , o ) : data = np . array ( [ s . flatten ( ) , o . flatten ( ) ] ) data = np . transpose ( data ) data = data [ ~ np . isnan ( data ) . any ( 1 ) ] return data [ : , 0 ] , data [ : , 1 ] | this functions removed the data from simulated and observed data whereever the observed data contains nan |
33,259 | def find_goodness_of_fit ( rapid_qout_file , reach_id_file , observed_file , out_analysis_file , daily = False ) : reach_id_list = np . loadtxt ( reach_id_file , delimiter = "," , usecols = ( 0 , ) , ndmin = 1 , dtype = np . int32 ) data_nc = RAPIDDataset ( rapid_qout_file ) observed_table = np . loadtxt ( observed_fil... | Finds the goodness of fit comparing observed streamflow in a rapid Qout file with simulated flows in a csv file . |
33,260 | def find_goodness_of_fit_csv ( observed_simulated_file , out_file = None ) : observed_simulated_table = np . loadtxt ( observed_simulated_file , ndmin = 2 , delimiter = "," , usecols = ( 0 , 1 ) ) observed_array , simulated_array = filter_nan ( observed_simulated_table [ : , 0 ] , observed_simulated_table [ : , 1 ] ) i... | Finds the goodness of fit comparing observed and simulated flows In the file the first column is the observed flows and the second column is the simulated flows . |
33,261 | def log ( message , severity = "INFO" , print_debug = True ) : print_me = [ 'WARNING' , 'INFO' , 'DEBUG' ] if severity in print_me : if severity == 'DEBUG' : if print_debug : print ( "{0}: {1}" . format ( severity , message ) ) else : print ( "{0}: {1}" . format ( severity , message ) ) else : raise Exception ( "{0}: {... | Logs prints or raises a message . |
33,262 | def csv_to_list ( csv_file , delimiter = ',' ) : with open_csv ( csv_file ) as csv_con : if len ( delimiter ) > 1 : dialect = csv . Sniffer ( ) . sniff ( csv_con . read ( 1024 ) , delimiters = delimiter ) csv_con . seek ( 0 ) reader = csv . reader ( csv_con , dialect ) else : reader = csv . reader ( csv_con , delimiter... | Reads in a CSV file and returns the contents as list where every row is stored as a sublist and each element in the sublist represents 1 cell in the table . |
33,263 | def add_latlon_metadata ( lat_var , lon_var ) : lat_var . long_name = 'latitude' lat_var . standard_name = 'latitude' lat_var . units = 'degrees_north' lat_var . axis = 'Y' lon_var . long_name = 'longitude' lon_var . standard_name = 'longitude' lon_var . units = 'degrees_east' lon_var . axis = 'X' | Adds latitude and longitude metadata |
33,264 | def generate_inflows_from_runoff ( args ) : runoff_file_list = args [ 0 ] file_index_list = args [ 1 ] weight_table_file = args [ 2 ] grid_type = args [ 3 ] rapid_inflow_file = args [ 4 ] rapid_inflow_tool = args [ 5 ] mp_lock = args [ 6 ] time_start_all = datetime . utcnow ( ) if not isinstance ( runoff_file_list , li... | prepare runoff inflow file for rapid |
33,265 | def determine_start_end_timestep ( lsm_file_list , file_re_match = None , file_datetime_pattern = None , expected_time_step = None , lsm_grid_info = None ) : if lsm_grid_info is None : lsm_grid_info = identify_lsm_grid ( lsm_file_list [ 0 ] ) if None in ( lsm_grid_info [ 'time_var' ] , lsm_grid_info [ 'time_dim' ] ) or... | Determine the start and end date from LSM input files |
33,266 | def _get_voronoi_centroid_array ( lsm_lat_array , lsm_lon_array , extent ) : YMin = extent [ 2 ] YMax = extent [ 3 ] XMin = extent [ 0 ] XMax = extent [ 1 ] ptList = [ ] if ( lsm_lat_array . ndim == 2 ) and ( lsm_lon_array . ndim == 2 ) : if extent : lsm_dx = np . max ( np . absolute ( np . diff ( lsm_lon_array ) ) ) l... | This function generates a voronoi centroid point list from arrays of latitude and longitude |
33,267 | def _get_voronoi_poly_points ( vert_index_list , voronoi_vertices , voronoi_centroid ) : voronoi_poly_points = [ ] if - 1 not in vert_index_list and len ( vert_index_list ) > 3 : voronoi_poly_points = voronoi_vertices [ vert_index_list ] elif vert_index_list . size > 0 : vert_index_list = vert_index_list [ vert_index_l... | This function returns the corner points for a polygon from scipy voronoi information |
33,268 | def pointsToVoronoiGridShapefile ( lat , lon , vor_shp_path , extent = None ) : voronoi_centroids = _get_voronoi_centroid_array ( lat , lon , extent ) log ( "Creating output polygon shp {0}" . format ( os . path . basename ( vor_shp_path ) ) ) if os . path . exists ( vor_shp_path ) : os . remove ( vor_shp_path ) drv = ... | Converts points to shapefile grid via voronoi |
33,269 | def pointsToVoronoiGridArray ( lat , lon , extent = None ) : voronoi_centroids = _get_voronoi_centroid_array ( lat , lon , extent ) log ( "Building Voronoi polygons..." ) voronoi_manager = Voronoi ( voronoi_centroids ) voronoi_vertices = voronoi_manager . vertices voronoi_regions = voronoi_manager . regions feature_lis... | Converts points to grid array via voronoi |
33,270 | def _generate_time_values ( self ) : log ( 'writing times' , 'INFO' ) d1970 = datetime ( 1970 , 1 , 1 , tzinfo = utc ) time_array = [ [ int ( ( self . start_datetime - d1970 ) . total_seconds ( ) ) ] ] datetime_nc_start_simulation = self . start_datetime for raw_nc_index , raw_nc in enumerate ( self . raw_nc_list ) : r... | Generates time values for out nc file |
33,271 | def convert ( self ) : try : log ( 'Processing %s ...' % self . rapid_output_file_list [ 0 ] ) time_start_conversion = datetime . utcnow ( ) log ( 'validating input netCDF file' , 'INFO' ) id_len , time_len = self . _validate_raw_nc ( ) log ( 'initializing output' , 'INFO' ) self . _initialize_output ( time_len , id_le... | Copies data from RAPID netCDF output to a CF - compliant netCDF file . |
33,272 | def CreateMuskingumKFile ( lambda_k , in_kfac_file , out_k_file ) : kfac_table = csv_to_list ( in_kfac_file ) with open_csv ( out_k_file , 'w' ) as kfile : k_writer = csv_writer ( kfile ) for row in kfac_table : k_writer . writerow ( [ lambda_k * float ( row [ 0 ] ) ] ) | Creates muskingum k file from kfac file . |
33,273 | def CreateMuskingumXFileFromDranageLine ( in_drainage_line , x_id , out_x_file , file_geodatabase = None ) : ogr_drainage_line_shapefile_lyr , ogr_drainage_line_shapefile = open_shapefile ( in_drainage_line , file_geodatabase ) with open_csv ( out_x_file , 'w' ) as kfile : x_writer = csv_writer ( kfile ) for drainage_l... | Create muskingum X file from drainage line . |
33,274 | def CreateConstMuskingumXFile ( x_value , in_connectivity_file , out_x_file ) : num_rivers = 0 with open_csv ( in_connectivity_file , "r" ) as csvfile : reader = csv_reader ( csvfile ) for _ in reader : num_rivers += 1 with open_csv ( out_x_file , 'w' ) as kfile : x_writer = csv_writer ( kfile ) for _ in xrange ( num_r... | Create muskingum X file from value that is constant all the way through for each river segment . |
33,275 | def StreamIDNextDownIDToConnectivity ( stream_id_array , next_down_id_array , out_csv_file ) : list_all = [ ] max_count_upstream = 0 for hydroid in np . sort ( stream_id_array ) : list_upstreamID = stream_id_array [ next_down_id_array == hydroid ] count_upstream = len ( list_upstreamID ) if count_upstream > max_count_u... | Creates RAPID connect file from stream_id array and next down id array |
33,276 | def CreateNetworkConnectivity ( in_drainage_line , river_id , next_down_id , out_connectivity_file , file_geodatabase = None ) : ogr_drainage_line_shapefile_lyr , ogr_drainage_line_shapefile = open_shapefile ( in_drainage_line , file_geodatabase ) stream_id_array = [ ] next_down_id_array = [ ] for drainage_line_feature... | Creates Network Connectivity input CSV file for RAPID based on the Drainage Line shapefile with river ID and next downstream ID fields . |
33,277 | def CreateNetworkConnectivityTauDEMTree ( network_connectivity_tree_file , out_csv_file ) : stream_id_array = [ ] next_down_id_array = [ ] with open_csv ( network_connectivity_tree_file , "r" ) as csvfile : for row in csvfile : split_row = row . split ( ) stream_id_array . append ( split_row [ 0 ] . strip ( ) ) next_do... | Creates Network Connectivity input CSV file for RAPID based on the TauDEM network connectivity tree file |
33,278 | def CreateNetworkConnectivityNHDPlus ( in_drainage_line , out_connectivity_file , file_geodatabase = None ) : ogr_drainage_line_shapefile_lyr , ogr_drainage_line_shapefile = open_shapefile ( in_drainage_line , file_geodatabase ) ogr_drainage_line_definition = ogr_drainage_line_shapefile_lyr . GetLayerDefn ( ) orig_fiel... | Creates Network Connectivity input CSV file for RAPID based on the NHDPlus drainage lines with COMID FROMNODE TONODE and DIVERGENCE fields . |
33,279 | def CreateSubsetFile ( in_drainage_line , river_id , out_riv_bas_id_file , file_geodatabase = None ) : ogr_drainage_line_shapefile_lyr , ogr_drainage_line_shapefile = open_shapefile ( in_drainage_line , file_geodatabase ) ogr_drainage_line_definition = ogr_drainage_line_shapefile_lyr . GetLayerDefn ( ) orig_field_names... | Creates River Basin ID subset input CSV file for RAPID based on the Drainage Line shapefile with river ID and next downstream ID fields |
33,280 | def CreateAllStaticECMWFFiles ( in_catchment , catchment_river_id , rapid_output_folder , rapid_connect_file , file_geodatabase = None ) : lsm_grid_folder = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , 'lsm_grids' ) ecmwf_t1279_grid_file = os . path . join ( lsm_grid_folder , 'runoff_e... | This creates all of the ECMWF grid weight tables using an area weighted method based on Esri s RAPID_Toolbox . |
33,281 | def CreateAllStaticECMWFRAPIDFiles ( in_drainage_line , river_id , length_id , slope_id , next_down_id , in_catchment , catchment_river_id , rapid_output_folder , kfac_celerity = 1000.0 / 3600.0 , kfac_formula_type = 3 , kfac_length_units = "km" , lambda_k = 0.35 , x_value = 0.3 , nhdplus = False , taudem_network_conne... | This creates all of the static RAPID files and ECMWF grid weight tables . |
33,282 | def compare_qout_files ( dataset1_path , dataset2_path ) : qout_same = False d1 = RAPIDDataset ( dataset1_path ) d2 = RAPIDDataset ( dataset2_path ) if len ( d1 . get_river_id_array ( ) ) != len ( d2 . get_river_id_array ( ) ) : log ( "Length of COMID/rivid input not the same." , "ERROR" ) if not ( d1 . get_river_id_ar... | This function compares the output of RAPID Qout and tells you where they are different . |
33,283 | def is_time_variable_valid ( self ) : time_var_valid = False if 'time' in self . qout_nc . variables . keys ( ) : if len ( self . qout_nc . dimensions [ 'time' ] ) > 0 : if not is_masked ( self . qout_nc . variables [ 'time' ] [ : ] ) : try : timestep = ( datetime . datetime . utcfromtimestamp ( self . qout_nc . variab... | This function returns whether or not the time variable is valid . |
33,284 | def get_time_array ( self , datetime_simulation_start = None , simulation_time_step_seconds = None , return_datetime = False , time_index_array = None ) : if datetime_simulation_start is not None : self . datetime_simulation_start = datetime_simulation_start if simulation_time_step_seconds is not None : self . simulati... | This method extracts or generates an array of time . The new version of RAPID output has the time array stored . However the old version requires the user to know when the simulation began and the time step of the output . |
33,285 | def get_time_index_range ( self , date_search_start = None , date_search_end = None , time_index_start = None , time_index_end = None , time_index = None ) : time_range = None if ( ( self . is_time_variable_valid ( ) or self . _is_legacy_time_valid ( ) ) and ( date_search_start is not None or date_search_end is not Non... | Generates a time index range based on time bounds given . This is useful for subset data extraction . |
33,286 | def get_river_index ( self , river_id ) : try : return np . where ( self . get_river_id_array ( ) == river_id ) [ 0 ] [ 0 ] except IndexError : raise IndexError ( "ERROR: River ID {0} not found in dataset " "..." . format ( river_id ) ) | This method retrieves the river index in the netCDF dataset corresponding to the river ID . |
33,287 | def get_subset_riverid_index_list ( self , river_id_list ) : netcdf_river_indices_list = [ ] valid_river_ids = [ ] missing_river_ids = [ ] for river_id in river_id_list : try : netcdf_river_indices_list . append ( self . get_river_index ( river_id ) ) valid_river_ids . append ( river_id ) except IndexError : log ( "Rea... | Gets the subset riverid_list from the netcdf file Optional returns include the list of valid river ids in the dataset as well as a list of missing rive rids |
33,288 | def get_qout ( self , river_id_array = None , date_search_start = None , date_search_end = None , time_index_start = None , time_index_end = None , time_index = None , time_index_array = None , daily = False , pd_filter = None , filter_mode = "mean" , as_dataframe = False ) : riverid_index_list_subset = None if river_i... | This method extracts streamflow data by a single river ID or by a river ID array . It has options to extract by date or by date index . |
33,289 | def get_qout_index ( self , river_index_array = None , date_search_start = None , date_search_end = None , time_index_start = None , time_index_end = None , time_index = None , time_index_array = None , daily = False , pd_filter = None , filter_mode = "mean" , as_dataframe = False ) : if river_index_array is not None :... | This method extracts streamflow data by river index . It allows for extracting single or multiple river streamflow arrays It has options to extract by date or by date index . |
33,290 | def write_flows_to_csv ( self , path_to_output_file , river_index = None , river_id = None , date_search_start = None , date_search_end = None , daily = False , filter_mode = "mean" ) : if river_id is not None : river_index = self . get_river_index ( river_id ) elif river_id is None and river_index is None : raise Valu... | Write out RAPID output to CSV file . |
33,291 | def write_flows_to_gssha_time_series_xys ( self , path_to_output_file , series_name , series_id , river_index = None , river_id = None , date_search_start = None , date_search_end = None , daily = False , filter_mode = "mean" ) : if river_id is not None : river_index = self . get_river_index ( river_id ) elif river_id ... | Write out RAPID output to GSSHA WMS time series xys file . |
33,292 | def write_flows_to_gssha_time_series_ihg ( self , path_to_output_file , connection_list_file , date_search_start = None , date_search_end = None , daily = False , filter_mode = "mean" ) : self . raise_time_valid ( ) with open_csv ( path_to_output_file , 'w' ) as out_ts : connection_list = np . loadtxt ( connection_list... | Write out RAPID output to GSSHA time series ihg file |
33,293 | def case_insensitive_file_search ( directory , pattern ) : try : return os . path . join ( directory , [ filename for filename in os . listdir ( directory ) if re . search ( pattern , filename , re . IGNORECASE ) ] [ 0 ] ) except IndexError : print ( "{0} not found" . format ( pattern ) ) raise | Looks for file with pattern with case insensitive search |
33,294 | def partition ( lst , n ) : q , r = divmod ( len ( lst ) , n ) indices = [ q * i + min ( i , r ) for i in xrange ( n + 1 ) ] return [ lst [ indices [ i ] : indices [ i + 1 ] ] for i in xrange ( n ) ] , [ list ( xrange ( indices [ i ] , indices [ i + 1 ] ) ) for i in xrange ( n ) ] | Divide list into n equal parts |
33,295 | def get_valid_directory_list ( input_directory ) : valid_input_directories = [ ] for directory in os . listdir ( input_directory ) : if os . path . isdir ( os . path . join ( input_directory , directory ) ) : valid_input_directories . append ( directory ) else : print ( "{0} not a directory. Skipping ..." . format ( di... | Get a list of folders |
33,296 | def _run_mpi_cmd ( self , cmd ) : log ( "Number of Processes: {0}" . format ( self . num_processors ) ) time_start = datetime . utcnow ( ) cmd = [ self . mpiexec_path , '-n' , str ( self . num_processors ) ] + cmd log ( "Command Line: {0}" . format ( " " . join ( cmd ) ) ) process = Popen ( cmd , stdout = PIPE , stderr... | This runs the command you send in |
33,297 | def _add_prj_file ( original_gis_file , new_gis_file ) : out_prj_file = "{0}.prj" . format ( os . path . splitext ( new_gis_file ) [ 0 ] ) if original_gis_file . endswith ( ".shp" ) : dataset = ogr . Open ( original_gis_file ) layer = dataset . GetLayer ( ) spatial_ref = layer . GetSpatialRef ( ) spatial_ref . MorphToE... | Adds projection file |
33,298 | def extractSubNetwork ( network_file , out_subset_network_file , outlet_ids , river_id_field , next_down_id_field , river_magnitude_field , safe_mode = True ) : network_shapefile = ogr . Open ( network_file ) network_layer = network_shapefile . GetLayer ( ) number_of_features = network_layer . GetFeatureCount ( ) netwo... | Extracts a subset river network from the main river network based on the outlet IDs . |
33,299 | def extractLargestSubNetwork ( cls , network_file , out_subset_network_file , river_id_field , next_down_id_field , river_magnitude_field , safe_mode = True ) : network_shapefile = ogr . Open ( network_file ) network_layer = network_shapefile . GetLayer ( ) number_of_features = network_layer . GetFeatureCount ( ) riv_m... | Extracts the larges sub network from the watershed based on the magnitude parameter . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.