_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q20000 | modflow_hob_to_instruction_file | train | def modflow_hob_to_instruction_file(hob_file):
"""write an instruction file for a modflow head observation file
Parameters
----------
hob_file : str
modflow hob file
Returns
-------
df : pandas.DataFrame
pandas DataFrame with control file observation information
"""
hob_df = pd.read_csv(hob_file,delim_whitespace=True,skiprows=1,
header=None,names=["simval","obsval","obsnme"])
hob_df.loc[:,"obsnme"] = hob_df.obsnme.apply(str.lower)
| python | {
"resource": ""
} |
q20001 | modflow_hydmod_to_instruction_file | train | def modflow_hydmod_to_instruction_file(hydmod_file):
"""write an instruction file for a modflow hydmod file
Parameters
----------
hydmod_file : str
modflow hydmod file
Returns
-------
df : pandas.DataFrame
pandas DataFrame with control file observation information
Note
----
calls modflow_read_hydmod_file()
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:,"ins_line"] = hydmod_df.obsnme.apply(lambda x:"l1 w !{0:s}!".format(x))
ins_file = hydmod_outfile + ".ins"
with open(ins_file, 'w') as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(hydmod_df.loc[:,["ins_line"]].to_string(col_space=0,
| python | {
"resource": ""
} |
q20002 | modflow_read_hydmod_file | train | def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
""" read in a binary hydmod file and return a dataframe of the results
Parameters
----------
hydmod_file : str
modflow hydmod binary file
hydmod_outfile : str
output file to write. If None, use <hydmod_file>.dat.
Default is None
Returns
-------
df : pandas.DataFrame
pandas DataFrame with hymod_file values
Note
----
requires flopy
"""
try:
import flopy.utils as fu
except Exception as e:
print('flopy is not installed - cannot read {0}\n{1}'.format(hydmod_file, e))
return
#print('Starting to read HYDMOD data from {0}'.format(hydmod_file))
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != 'totim' else i for i in hyd_df.columns]
#hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df['totim'] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={'totim': 'datestamp'}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars='datestamp')
hyd_df.rename(columns={'value': 'obsval'}, inplace=True)
| python | {
"resource": ""
} |
q20003 | apply_mtlist_budget_obs | train | def apply_mtlist_budget_obs(list_filename,gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970"):
""" process an MT3D list file to extract mass budget entries.
Parameters
----------
list_filename : str
the mt3d list file
gw_filename : str
the name of the output file with gw mass budget information.
Default is "mtlist_gw.dat"
sw_filename : str
the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime : str
an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns
-------
gw : pandas.DataFrame
the gw mass dataframe
sw : pandas.DataFrame (optional)
the sw mass dataframe
Note
----
requires flopy
if SFT is not active, no SW mass budget will be returned
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
| python | {
"resource": ""
} |
q20004 | setup_mflist_budget_obs | train | def setup_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",start_datetime="1-1'1970",prefix='',
save_setup_file=False):
""" setup observations of budget volume and flux from modflow list file. writes
an instruction file and also a _setup_.csv to use when constructing a pest
control file
Parameters
----------
list_filename : str
modflow list file
flx_filename : str
output filename that will contain the budget flux observations. Default is
"flux.dat"
vol_filename : str)
output filename that will contain the budget volume observations. Default
is "vol.dat"
start_datetime : str
an str that can be parsed into a pandas.TimeStamp. used to give budget
observations meaningful names
prefix : str
a prefix to add to the water budget observations. Useful if processing
more than one list file as part of the forward run process. Default is ''.
save_setup_file : (boolean)
a flag to save _setup_<list_filename>.csv file that contains useful
control file information
Returns
-------
df : pandas.DataFrame
a dataframe with information for constructing a control file. If INSCHEK fails
to run, reutrns None
Note
----
This function uses INSCHEK to get observation values; the observation values are
the values of the list file list_filename. If INSCHEK fails to run, the obseravtion
values are set to 1.0E+10
the instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
"""
flx,vol = apply_mflist_budget_obs(list_filename,flx_filename,vol_filename,
| python | {
"resource": ""
} |
q20005 | apply_mflist_budget_obs | train | def apply_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970"):
""" process a MODFLOW list file to extract flux and volume water budget entries.
Parameters
----------
list_filename : str
the modflow list file
flx_filename : str
the name of the output file with water budget flux information.
Default is "flux.dat"
vol_filename : str
the name of the output file with water budget volume information.
| python | {
"resource": ""
} |
q20006 | apply_hds_obs | train | def apply_hds_obs(hds_file):
""" process a modflow head save file. A companion function to
setup_hds_obs that is called during the forward run process
Parameters
----------
hds_file : str
a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
Note
----
requires flopy
writes <hds_file>.dat
expects <hds_file>.dat.ins to exist
uses pyemu.pst_utils.parse_ins_file to get observation names
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".\
format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file+".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme":pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k","i","j","kper"]
for i,item in enumerate(items):
df.loc[:,item] = df.obsnme.apply(lambda x: int(x.split('_')[i+1]))
if | python | {
"resource": ""
} |
q20007 | modflow_sfr_gag_to_instruction_file | train | def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
"""
if ins_file is None:
ins_file = gage_output_file + '.ins'
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, 'r').readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename: | python | {
"resource": ""
} |
q20008 | Schur.pandas | train | def pandas(self):
"""get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions)
"""
names,prior,posterior = [],[],[]
for iname,name in enumerate(self.posterior_parameter.row_names):
names.append(name)
posterior.append(np.sqrt(float(
self.posterior_parameter[iname, iname]. x)))
iprior = self.parcov.row_names.index(name)
prior.append(np.sqrt(float(self.parcov[iprior, | python | {
"resource": ""
} |
q20009 | Schur.map_parameter_estimate | train | def map_parameter_estimate(self):
""" get the posterior expectation for parameters using Bayes linear
estimation
Returns
-------
post_expt : pandas.DataFrame
a dataframe with prior and posterior parameter expectations
"""
res = self.pst.res
assert res is not None
# build the prior expectation parameter vector
prior_expt = self.pst.parameter_data.loc[:,["parval1"]].copy()
islog = self.pst.parameter_data.partrans == "log"
prior_expt.loc[islog] = prior_expt.loc[islog].apply(np.log10)
prior_expt = Matrix.from_dataframe(prior_expt)
prior_expt.col_names = ["prior_expt"]
| python | {
"resource": ""
} |
q20010 | Schur.get_parameter_summary | train | def get_parameter_summary(self,include_map=False):
"""get a summary of the parameter uncertainty
Parameters
----------
include_map : bool
if True, add the prior and posterior expectations
and report standard deviation instead of variance
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe of prior,posterior variances and percent
uncertainty reduction of each parameter
Note
----
this is the primary method for accessing parameter uncertainty
estimates - use this!
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``>>>sc = pyemu.Schur(jco="pest.jcb",forecasts=["fore1","fore2"])``
``>>>par_sum = sc.get_parameter_summary()``
``>>>par_sum.plot(kind="bar")``
``>>>plt.show()``
"""
prior_mat = self.parcov.get(self.posterior_parameter.col_names)
if prior_mat.isdiagonal:
prior = prior_mat.x.flatten()
else:
prior = np.diag(prior_mat.x)
post = np.diag(self.posterior_parameter.x)
if include_map:
par_data = self.map_parameter_estimate
| python | {
"resource": ""
} |
q20011 | Schur.get_forecast_summary | train | def get_forecast_summary(self, include_map=False):
"""get a summary of the forecast uncertainty
Parameters
----------
include_map : bool
if True, add the prior and posterior expectations
and report standard deviation instead of variance
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe of prior,posterior variances and percent
uncertainty reduction of each parameter
Note
----
this is the primary method for accessing forecast uncertainty
estimates - use this!
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
This usage assumes you have set the ``++forecasts()`` argument in the
control file:
``>>>sc = pyemu.Schur(jco="pest.jcb")``
or, you can pass the forecasts directly, assuming the forecasts are
names of zero-weight observations:
``>>>sc = pyemu.Schur(jco="pest.jcb",forecasts=["fore1","fore2"])``
``>>>fore_sum = sc.get_forecast_summary()``
``>>>fore_sum.plot(kind="bar")``
``>>>plt.show()``
"""
sum = {"prior_var":[], "post_var":[], "percent_reduction":[]}
for forecast in self.prior_forecast.keys():
pr = self.prior_forecast[forecast]
pt = self.posterior_forecast[forecast]
| python | {
"resource": ""
} |
q20012 | Schur.__contribution_from_parameters | train | def __contribution_from_parameters(self, parameter_names):
"""private method get the prior and posterior uncertainty reduction as a result of
some parameter becoming perfectly known
Parameters
----------
parameter_names : list
parameter that are perfectly known
Returns
-------
dict : dict
dictionary of forecast name, [prior uncertainty w/o parameter_names,
% posterior uncertainty w/o parameter names]
Note
----
this method is used by get_parameter_contribution() method - don't
call this | python | {
"resource": ""
} |
q20013 | Schur.get_conditional_instance | train | def get_conditional_instance(self, parameter_names):
""" get a new Schur instance that includes conditional update from
some parameters becoming known perfectly
Parameters
----------
parameter_names : list
parameters that are to be treated as notionally perfectly
known
Returns
-------
la_cond : Schur
a new Schur instance conditional on perfect knowledge
of some parameters
Note
----
this method is used by the get_parameter_contribution() method -
don't call this method directly
"""
if not isinstance(parameter_names, list):
parameter_names = [parameter_names]
for iname, name in enumerate(parameter_names):
name = str(name).lower()
parameter_names[iname] = name
assert name in self.jco.col_names,\
"contribution parameter " + name + " not found jco"
keep_names = []
for name in self.jco.col_names:
if name not in parameter_names:
keep_names.append(name)
if len(keep_names) == 0:
| python | {
"resource": ""
} |
q20014 | Schur.get_par_contribution | train | def get_par_contribution(self,parlist_dict=None,include_prior_results=False):
"""get a dataframe the prior and posterior uncertainty
reduction as a result of some parameter becoming perfectly known
Parameters
----------
parlist_dict : dict
a nested dictionary-list of groups of parameters
that are to be treated as perfectly known. key values become
row labels in returned dataframe. If None, each adjustable parameter
is sequentially treated as known and the returned dataframe
has row labels for each adjustable parameter
include_prior_results : bool
flag to return a multi-indexed dataframe with both conditional
prior and posterior forecast uncertainty estimates. Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe that summarizes the parameter contribution analysis.
The dataframe has index (row labels) of the keys in parlist_dict
and a column labels of forecast names. The values in the dataframe
are the posterior variance of the forecast conditional on perfect
knowledge of the parameters in the values of parlist_dict. Varies
depending on `include_prior_results`.
Example
-------
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``>>>df = sc.get_par_contribution()``
"""
self.log("calculating contribution from parameters")
if parlist_dict is None:
parlist_dict = {}#dict(zip(self.pst.adj_par_names,self.pst.adj_par_names))
# make sure all of the adjustable pars are in the jco
for pname in self.pst.adj_par_names:
if pname in self.jco.col_names:
parlist_dict[pname] = pname
else:
if type(parlist_dict) == list:
parlist_dict = dict(zip(parlist_dict,parlist_dict))
results = {}
names = ["base"]
for forecast in self.prior_forecast.keys():
pr = self.prior_forecast[forecast]
pt = self.posterior_forecast[forecast]
#reduce = 100.0 * ((pr - pt) / pr)
results[(forecast,"prior")] = [pr]
| python | {
"resource": ""
} |
q20015 | ErrVar.omitted_jco | train | def omitted_jco(self):
"""get the omitted jco
Returns
-------
omitted_jco : pyemu.Jco
Note
----
returns a reference
if ErrorVariance.__omitted_jco is None,
then dynamically load the attribute before returning
"""
| python | {
"resource": ""
} |
q20016 | ErrVar.omitted_parcov | train | def omitted_parcov(self):
"""get the omitted prior parameter covariance matrix
Returns
-------
omitted_parcov : pyemu.Cov
Note
----
returns a reference
| python | {
"resource": ""
} |
q20017 | ErrVar.get_identifiability_dataframe | train | def get_identifiability_dataframe(self,singular_value=None,precondition=False):
"""get the parameter identifiability as a pandas dataframe
Parameters
----------
singular_value : int
the singular spectrum truncation point. Defaults to minimum of
non-zero-weighted observations and adjustable parameters
precondition : bool
flag to use the preconditioned hessian (xtqt + sigma_theta^-1).
Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
A pandas dataframe of the V_1**2 Matrix with the
| python | {
"resource": ""
} |
q20018 | ErrVar.variance_at | train | def variance_at(self, singular_value):
"""get the error variance of all three terms at a singluar value
Parameters
----------
singular_value : int
singular value to test
Returns
-------
dict : dict
dictionary of (err var term,prediction_name), standard_deviation pairs
| python | {
"resource": ""
} |
q20019 | ErrVar.I_minus_R | train | def I_minus_R(self,singular_value):
"""get I - R at singular value
Parameters
----------
singular_value : int
singular value to calc R at
Returns
-------
I - R : pyemu.Matrix
identity matrix minus resolution matrix at singular_value
"""
if self.__I_R is not None and singular_value == self.__I_R_sv:
return self.__I_R
else:
if singular_value > self.jco.ncol:
| python | {
"resource": ""
} |
q20020 | ErrVar.third_prediction | train | def third_prediction(self,singular_value):
"""get the omitted parameter contribution to prediction error variance
at a singular value. used to construct error variance dataframe
Parameters
----------
singular_value : int
singular value to calc third term at
Returns
-------
dict : dict
dictionary of ("third",prediction_names),error variance
"""
if not self.predictions:
raise Exception("ErrVar.third(): not predictions are set")
if self.__need_omitted is False:
zero_preds = {}
for pred in self.predictions_iter:
zero_preds[("third", pred.col_names[0])] = 0.0
return zero_preds
self.log("calc third term prediction @" + str(singular_value))
mn = min(self.jco.shape)
try:
mn = min(self.pst.npar_adj, self.pst.nnz_obs)
except:
pass
if singular_value > mn:
inf_pred = {}
for pred in self.predictions_iter:
| python | {
"resource": ""
} |
q20021 | pp_file_to_dataframe | train | def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
| python | {
"resource": ""
} |
q20022 | pp_tpl_to_dataframe | train | def pp_tpl_to_dataframe(tpl_filename):
""" read a pilot points template file to a pandas dataframe
Parameters
----------
tpl_filename : str
pilot points template file
Returns
-------
df : pandas.DataFrame
a dataframe with "parnme" included
"""
inlines = open(tpl_filename, 'r').readlines()
header = inlines.pop(0)
marker = header.strip().split()[1]
assert len(marker) == 1
usecols = [0,1,2,3]
| python | {
"resource": ""
} |
q20023 | write_pp_shapfile | train | def write_pp_shapfile(pp_df,shapename=None):
"""write pilot points dataframe to a shapefile
Parameters
----------
pp_df : pandas.DataFrame or str
pilot point dataframe or a pilot point filename. Dataframe
must include "x" and "y"
shapename : str
shapefile name. If None, pp_df must be str and shapefile
is saved as <pp_df>.shp
Note
----
requires pyshp
"""
try:
import shapefile
except Exception as e:
raise Exception("error importing shapefile: {0}, \ntry pip install pyshp...".format(str(e)))
if not isinstance(pp_df,list):
pp_df = [pp_df]
dfs = []
for pp in pp_df:
if isinstance(pp,pd.DataFrame):
dfs.append(pp)
elif isinstance(pp,str):
dfs.append(pp_file_to_dataframe(pp))
else:
raise Exception("unsupported arg type:{0}".format(type(pp)))
if shapename is None:
shapename = "pp_locs.shp"
try:
shp = shapefile.Writer(shapeType=shapefile.POINT)
except:
shp = shapefile.Writer(target=shapename, shapeType=shapefile.POINT)
for name, dtype in dfs[0].dtypes.iteritems():
if dtype == object:
shp.field(name=name, fieldType='C', size=50)
elif dtype in [int, np.int, np.int64, np.int32]:
| python | {
"resource": ""
} |
q20024 | write_pp_file | train | def write_pp_file(filename,pp_df):
"""write a pilot points dataframe to a pilot points file
Parameters
----------
filename : str
pilot points file to write
pp_df : pandas.DataFrame
a dataframe that has columns "x","y","zone", and "value"
"""
with open(filename,'w') as f:
f.write(pp_df.to_string(col_space=0,
columns=PP_NAMES,
| python | {
"resource": ""
} |
q20025 | pilot_points_to_tpl | train | def pilot_points_to_tpl(pp_file,tpl_file=None,name_prefix=None):
"""write a template file for a pilot points file
Parameters
----------
pp_file : str
pilot points file
tpl_file : str
template file name to write. If None, append ".tpl" to
the pp_file arg. Default is None
name_prefix : str
name to prepend to parameter names for each pilot point. For example,
if ``name_prefix = "hk_"``, then each pilot point parameter will be named
"hk_0001","hk_0002", etc. If None, parameter names from pp_df.name
are used. Default is None.
Returns
-------
pp_df : pandas.DataFrame
a dataframe with pilot point information (name,x,y,zone,parval1)
with the parameter information (parnme,tpl_str)
"""
if isinstance(pp_file,pd.DataFrame):
pp_df = pp_file
assert tpl_file is not None
else:
assert os.path.exists(pp_file)
pp_df = pd.read_csv(pp_file, delim_whitespace=True,
header=None, names=PP_NAMES)
if tpl_file is None:
tpl_file = pp_file + ".tpl"
if name_prefix is not None:
digits = str(len(str(pp_df.shape[0])))
fmt = "{0:0"+digits+"d}"
names = [name_prefix+fmt.format(i) | python | {
"resource": ""
} |
q20026 | run | train | def run(cmd_str,cwd='.',verbose=False):
""" an OS agnostic function to execute command
Parameters
----------
cmd_str : str
the str to execute with os.system()
cwd : str
the directory to execute the command in
verbose : bool
flag to echo to stdout complete cmd str
Note
----
uses platform to detect OS and adds .exe or ./ as appropriate
| python | {
"resource": ""
} |
q20027 | condition_on_par_knowledge | train | def condition_on_par_knowledge(cov,par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}".\
format(','.join(missing)))
| python | {
"resource": ""
} |
q20028 | kl_setup | train | def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
| python | {
"resource": ""
} |
q20029 | zero_order_tikhonov | train | def zero_order_tikhonov(pst, parbounds=True,par_groups=None,
reset=True):
"""setup preferred-value regularization
Parameters
----------
pst : pyemu.Pst
the control file instance
parbounds : bool
flag to weight the prior information equations according
to parameter bound width - approx the KL transform. Default
is True
par_groups : list
parameter groups to build PI equations for. If None, all
adjustable parameters are used. Default is None
reset : bool
flag to reset the prior_information attribute of the pst
instance. Default is True
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>pyemu.helpers.zero_order_tikhonov(pst)``
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
| python | {
"resource": ""
} |
q20030 | first_order_pearson_tikhonov | train | def first_order_pearson_tikhonov(pst,cov,reset=True,abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Parameters
----------
pst : pyemu.Pst
pst instance
cov : pyemu.Cov
covariance matrix instance
reset : bool
drop all other pi equations. If False, append to
existing pi equations
abs_drop_tol : float
tolerance to control how many pi equations are written.
If the Pearson C is less than abs_drop_tol, the prior information
equation will not be included in the control file
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>cov = pyemu.Cov.from_ascii("prior.cov")``
``>>>pyemu.helpers.first_order_pearson_tikhonov(pst,cov,abs_drop_tol=0.25)``
"""
assert isinstance(cov,pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.get(pst.adj_par_names).to_pearson()
#print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x:x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i,iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j,jname in enumerate(cc_mat.row_names[i+1:]):
if jname not in sadj_names:
| python | {
"resource": ""
} |
q20031 | apply_array_pars | train | def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters. Used to implement
the parameterization constructed by PstFromFlopyModel during a forward run
Parameters
----------
arr_par_file : str
path to csv file detailing parameter array multipliers
Note
----
"arr_pars.csv" - is written by PstFromFlopy
the function should be added to the forward_run.py script but can be called on any correctly formatted csv
"""
df = pd.read_csv(arr_par_file)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
for pp_file,fac_file,mlt_file in zip(df.pp_file,df.fac_file,df.mlt_file):
if pd.isnull(pp_file):
continue
pyemu.geostats.fac2real(pp_file=pp_file,factors_file=fac_file,
out_file=mlt_file,lower_lim=1.0e-10)
for model_file in df.model_file.unique():
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file==model_file,:]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
| python | {
"resource": ""
} |
q20032 | PstFromFlopyModel.setup_sfr_obs | train | def setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(self.org_model_ws,"{0}.sfr.out".format(self.m.name))
if not os.path.exists(org_sfr_out_file):
self.logger.lraise("setup_sfr_obs() error: could not locate existing sfr out file: {0}".
format(org_sfr_out_file))
new_sfr_out_file = os.path.join(self.m.model_ws,os.path.split(org_sfr_out_file)[-1])
shutil.copy2(org_sfr_out_file,new_sfr_out_file)
seg_group_dict = None
| python | {
"resource": ""
} |
q20033 | PstFromFlopyModel.setup_mult_dirs | train | def setup_mult_dirs(self):
""" setup the directories to use for multiplier parameterization. Directories
are make within the PstFromFlopyModel.m.model_ws directory
"""
# setup dirs to hold the original and multiplier model input quantities
set_dirs = []
# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \
# len(self.grid_props) > 0:
if self.pp_props is not None or \
self.zone_props is not None or \
self.grid_props is not None or\
self.const_props is not None or \
| python | {
"resource": ""
} |
q20034 | PstFromFlopyModel.setup_model | train | def setup_model(self,model,org_model_ws,new_model_ws):
""" setup the flopy.mbase instance for use with multipler parameters.
Changes model_ws, sets external_path and writes new MODFLOW input
files
Parameters
----------
model : flopy.mbase
flopy model instance
org_model_ws : str
the orginal model working space
new_model_ws : str
the new model working space
"""
split_new_mws = [i for i in os.path.split(new_model_ws) if len(i) > 0]
if len(split_new_mws) != 1:
self.logger.lraise("new_model_ws can only be 1 folder-level deep:{0}".
format(str(split_new_mws)))
if isinstance(model,str):
self.log("loading flopy model")
try:
import flopy
except:
raise Exception("from_flopy_model() requires flopy")
# prepare the flopy model
self.org_model_ws = org_model_ws
self.new_model_ws = new_model_ws
self.m = flopy.modflow.Modflow.load(model,model_ws=org_model_ws,
check=False,verbose=True,forgive=False)
self.log("loading flopy model")
else:
| python | {
"resource": ""
} |
q20035 | PstFromFlopyModel.get_count | train | def get_count(self,name):
""" get the latest counter for a certain parameter type.
Parameters
----------
name : str
the parameter type
Returns
-------
count : int
the latest count for a parameter type | python | {
"resource": ""
} |
q20036 | PstFromFlopyModel.write_u2d | train | def write_u2d(self, u2d):
""" write a flopy.utils.Util2D instance to an ASCII text file using the
Util2D filename
Parameters
----------
u2d : flopy.utils.Util2D
| python | {
"resource": ""
} |
q20037 | PstFromFlopyModel.write_grid_tpl | train | def write_grid_tpl(self,name,tpl_file,zn_array):
""" write a template file a for grid-based multiplier parameters
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information
"""
parnme,x,y = [],[],[]
with open(os.path.join(self.m.model_ws,tpl_file),'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
| python | {
"resource": ""
} |
q20038 | PstFromFlopyModel.grid_prep | train | def grid_prep(self):
""" prepare grid-based parameterizations
"""
if len(self.grid_props) == 0:
return
if self.grid_geostruct is None:
self.logger.warn("grid_geostruct is None,"\
" using ExpVario with contribution=1 and a=(max(delc,delr)*10")
dist = 10 * float(max(self.m.dis.delr.array.max(),
| python | {
"resource": ""
} |
q20039 | PstFromFlopyModel.kl_prep | train | def kl_prep(self,mlt_df):
""" prepare KL based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.helpers.setup_kl()
"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn("kl_geostruct is None,"\
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))")
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v)
kl_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix,:]
layers = kl_df.layer.unique()
#kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
#for i,l in enumerate(layers):
# p = set(kl_dict[l])
| python | {
"resource": ""
} |
q20040 | PstFromFlopyModel.setup_observations | train | def setup_observations(self):
""" main entry point for setting up observations
"""
obs_methods = [self.setup_water_budget_obs,self.setup_hyd,
self.setup_smp,self.setup_hob,self.setup_hds,
self.setup_sfr_obs]
obs_types = ["mflist water budget obs","hyd file",
| python | {
"resource": ""
} |
q20041 | PstFromFlopyModel.draw | train | def draw(self, num_reals=100, sigma_range=6):
""" draw like a boss!
Parameters
----------
num_reals : int
number of realizations to generate. Default is 100
sigma_range : float
number of standard deviations represented by the parameter bounds. Default
is 6.
Returns
-------
cov : pyemu.Cov
a full covariance matrix
"""
self.log("drawing realizations")
struct_dict = {}
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp==pargp,:]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
#pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
gr_dfs = []
| python | {
"resource": ""
} |
q20042 | PstFromFlopyModel.write_forward_run | train | def write_forward_run(self):
""" write the forward run script forward_run.py
"""
with open(os.path.join(self.m.model_ws,self.forward_run_file),'w') as f:
f.write("import os\nimport numpy as np\nimport pandas as pd\nimport flopy\n")
f.write("import pyemu\n")
for ex_imp in self.extra_forward_imports:
f.write('import {0}\n'.format(ex_imp))
for tmp_file in self.tmp_files:
f.write("try:\n")
f.write(" os.remove('{0}')\n".format(tmp_file))
f.write("except Exception | python | {
"resource": ""
} |
q20043 | PstFromFlopyModel.parse_k | train | def parse_k(self,k,vals):
""" parse the iterable from a property or boundary condition argument
Parameters
----------
k : int or iterable int
the iterable
vals : iterable of ints
the acceptable values that k may contain
Returns
-------
k_vals : iterable of int
| python | {
"resource": ""
} |
q20044 | PstFromFlopyModel.parse_pakattr | train | def parse_pakattr(self,pakattr):
""" parse package-iterable pairs from a property or boundary condition
argument
Parameters
----------
pakattr : iterable len 2
Returns
-------
pak : flopy.PakBase
the flopy package from the model instance
attr : (varies)
the flopy attribute from pak. Could be Util2D, Util3D,
Transient2D, or MfList
attrname : (str)
the name of the attribute for MfList type. Only returned if
attr is MfList. For example, if attr is MfList and pak is
flopy.modflow.ModflowWel, then attrname can only be "flux"
"""
raw = pakattr.lower().split('.')
if len(raw) != 2:
self.logger.lraise("pakattr is wrong:{0}".format(pakattr))
pakname = raw[0]
attrname = raw[1]
pak = self.m.get_package(pakname)
if pak is None:
if pakname == "extra":
self.logger.statement("'extra' pak detected:{0}".format(pakattr))
ud = flopy.utils.Util3d(self.m,(self.m.nlay,self.m.nrow,self.m.ncol),np.float32,1.0,attrname)
return "extra",ud
self.logger.lraise("pak {0} not found".format(pakname))
if hasattr(pak,attrname):
attr = getattr(pak,attrname)
return pak,attr
| python | {
"resource": ""
} |
q20045 | PstFromFlopyModel.setup_list_pars | train | def setup_list_pars(self):
""" main entry point for setting up list multiplier
parameters
"""
tdf = self.setup_temporal_list_pars()
sdf = self.setup_spatial_list_pars()
if tdf is None and sdf is None:
return
os.chdir(self.m.model_ws)
try:
apply_list_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running | python | {
"resource": ""
} |
q20046 | PstFromFlopyModel.list_helper | train | def list_helper(self,k,pak,attr,col):
""" helper to setup list multiplier parameters for a given
k, pak, attr set.
Parameters
----------
k : int or iterable of int
the zero-based stress period indices
pak : flopy.PakBase=
the MODFLOW package
attr : MfList
the MfList instance
col : str
the column name in the MfList recarray to parameterize
"""
# special case for horrible HFB6 exception
# if type(pak) == flopy.modflow.mfhfb.ModflowHfb:
| python | {
"resource": ""
} |
q20047 | PstFromFlopyModel.setup_smp | train | def setup_smp(self):
""" setup observations from PEST-style SMP file pairs
"""
if self.obssim_smp_pairs is None:
return
if len(self.obssim_smp_pairs) == 2:
if isinstance(self.obssim_smp_pairs[0],str):
self.obssim_smp_pairs = [self.obssim_smp_pairs]
for obs_smp,sim_smp in self.obssim_smp_pairs:
self.log("processing {0} and {1} smp files".format(obs_smp,sim_smp))
if not os.path.exists(obs_smp):
self.logger.lraise("couldn't find obs smp: {0}".format(obs_smp))
if not os.path.exists(sim_smp):
self.logger.lraise("couldn't find sim smp: {0}".format(sim_smp))
| python | {
"resource": ""
} |
q20048 | PstFromFlopyModel.setup_hob | train | def setup_hob(self):
""" setup observations from the MODFLOW HOB package
"""
if self.m.hob is None:
return
hob_out_unit = self.m.hob.iuhobsv
new_hob_out_fname = os.path.join(self.m.model_ws,self.m.get_output_attribute(unit=hob_out_unit))
org_hob_out_fname = os.path.join(self.org_model_ws,self.m.get_output_attribute(unit=hob_out_unit))
if not os.path.exists(org_hob_out_fname):
self.logger.warn("could not find hob out file: {0}...skipping".format(hob_out_fname))
| python | {
"resource": ""
} |
q20049 | PstFromFlopyModel.setup_hyd | train | def setup_hyd(self):
""" setup observations from the MODFLOW HYDMOD package
"""
if self.m.hyd is None:
return
if self.mfhyd:
org_hyd_out = os.path.join(self.org_model_ws,self.m.name+".hyd.bin")
if not os.path.exists(org_hyd_out):
self.logger.warn("can't find existing hyd out file:{0}...skipping".
format(org_hyd_out))
return
new_hyd_out = os.path.join(self.m.model_ws,os.path.split(org_hyd_out)[-1])
shutil.copy2(org_hyd_out,new_hyd_out)
df = pyemu.gw_utils.modflow_hydmod_to_instruction_file(new_hyd_out)
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: '_'.join(x.split('_')[:-1]))
| python | {
"resource": ""
} |
q20050 | PstFromFlopyModel.setup_water_budget_obs | train | def setup_water_budget_obs(self):
""" setup observations from the MODFLOW list file for
volume and flux water buget information
"""
if self.mflist_waterbudget:
org_listfile = os.path.join(self.org_model_ws,self.m.lst.file_name[0])
if os.path.exists(org_listfile):
shutil.copy2(org_listfile,os.path.join(self.m.model_ws,
self.m.lst.file_name[0]))
else:
self.logger.warn("can't find existing list file:{0}...skipping".
format(org_listfile))
return
list_file = os.path.join(self.m.model_ws,self.m.lst.file_name[0])
flx_file = os.path.join(self.m.model_ws,"flux.dat")
vol_file = os.path.join(self.m.model_ws,"vol.dat")
df = | python | {
"resource": ""
} |
q20051 | read_resfile | train | def read_resfile(resfile):
"""load a residual file into a pandas.DataFrame
Parameters
----------
resfile : str
residual file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
assert os.path.exists(resfile),"read_resfile() error: resfile " +\
"{0} not found".format(resfile)
converters = | python | {
"resource": ""
} |
q20052 | res_from_en | train | def res_from_en(pst,enfile):
"""load ensemble file for residual into a pandas.DataFrame
Parameters
----------
enfile : str
ensemble file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
converters = {"name": str_con, "group": str_con}
try: #substitute ensemble for res, 'base' if there, otherwise mean
obs=pst.observation_data
if isinstance(enfile,str):
df=pd.read_csv(enfile,converters=converters)
| python | {
"resource": ""
} |
q20053 | read_parfile | train | def read_parfile(parfile):
"""load a pest-compatible .par file into a pandas.DataFrame
Parameters
----------
parfile : str
pest parameter file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
assert os.path.exists(parfile), "Pst.parrep(): parfile not | python | {
"resource": ""
} |
q20054 | write_parfile | train | def write_parfile(df,parfile):
""" write a pest parameter file from a dataframe
Parameters
----------
df : (pandas.DataFrame)
dataframe with column names that correspond to the entries
in the parameter data section of a pest control file
parfile : str
name of the parameter file to write
"""
columns = ["parnme","parval1","scale","offset"]
formatters = {"parnme":lambda x:"{0:20s}".format(x),
"parval1":lambda x:"{0:20.7E}".format(x),
"scale":lambda x:"{0:20.7E}".format(x),
"offset":lambda x:"{0:20.7E}".format(x)}
for col in columns:
| python | {
"resource": ""
} |
q20055 | parse_tpl_file | train | def parse_tpl_file(tpl_file):
""" parse a pest template file to get the parameter names
Parameters
----------
tpl_file : str
template file name
Returns
-------
par_names : list
list of parameter names
"""
par_names = set()
with open(tpl_file,'r') as f:
try:
header = f.readline().strip().split()
assert header[0].lower() in ["ptf","jtf"],\
"template file error: must start with [ptf,jtf], not:" +\
str(header[0])
assert len(header) == 2,\
"template file error: header line must have two entries: " +\
str(header)
marker = header[1]
assert len(marker) == 1,\
"template file error: marker must be a single character, not:" +\
str(marker)
for line in f:
par_line = set(line.lower().strip().split(marker)[1::2])
| python | {
"resource": ""
} |
q20056 | write_to_template | train | def write_to_template(parvals,tpl_file,in_file):
""" write parameter values to model input files using template files
Parameters
----------
parvals : dict or pandas.Series
a way to look up parameter values using parameter names
tpl_file : str
template file
in_file : str
input file
"""
f_in = open(in_file,'w')
f_tpl = open(tpl_file,'r')
header = f_tpl.readline().strip().split()
assert header[0].lower() in ["ptf", "jtf"], \
"template file error: must start with [ptf,jtf], not:" + \
str(header[0])
assert len(header) == 2, \
"template file error: header line must have two entries: " + \
str(header)
marker = header[1]
assert len(marker) == 1, \
"template file error: marker must be a single character, not:" + \
str(marker)
for line in f_tpl:
if marker not in line:
f_in.write(line)
else:
line = line.rstrip()
par_names = line.lower().split(marker)[1::2]
par_names = [name.strip() for name in par_names]
start,end = get_marker_indices(marker,line)
| python | {
"resource": ""
} |
q20057 | parse_ins_file | train | def parse_ins_file(ins_file):
"""parse a pest instruction file to get observation names
Parameters
----------
ins_file : str
instruction file name
Returns
-------
list of observation names
"""
obs_names = []
with open(ins_file,'r') as f:
header = f.readline().strip().split()
assert header[0].lower() in ["pif","jif"],\
"instruction file error: must start with [pif,jif], not:" +\
str(header[0])
marker = header[1]
assert len(marker) == 1,\
"instruction file error: marker must be a single character, not:" +\
| python | {
"resource": ""
} |
q20058 | parse_ins_string | train | def parse_ins_string(string):
""" split up an instruction file line to get the observation names
Parameters
----------
string : str
instruction file line
Returns
-------
obs_names : list
list of observation names
"""
istart_markers = ["[","(","!"]
iend_markers = ["]",")","!"]
obs_names = []
idx = 0
while True:
if idx >= len(string) - 1:
break
char = string[idx]
if char in istart_markers:
em = iend_markers[istart_markers.index(char)]
# print("\n",idx)
| python | {
"resource": ""
} |
q20059 | populate_dataframe | train | def populate_dataframe(index,columns, default_dict, dtype):
""" helper function to populate a generic Pst dataframe attribute. This
function is called as part of constructing a generic Pst instance
Parameters
----------
index : (varies)
something to use as the dataframe index
columns: (varies)
something to use as the dataframe columns
default_dict : (dict)
dictionary of default values for columns
dtype : numpy.dtype
dtype used to cast dataframe columns
Returns
-------
new_df : pandas.DataFrame
"""
| python | {
"resource": ""
} |
q20060 | generic_pst | train | def generic_pst(par_names=["par1"],obs_names=["obs1"],addreg=False):
"""generate a generic pst instance. This can used to later fill in
the Pst parts programatically.
Parameters
----------
par_names : (list)
parameter names to setup
obs_names : (list)
observation names to setup
Returns
-------
new_pst : pyemu.Pst
"""
if not isinstance(par_names,list):
par_names = list(par_names)
if not isinstance(obs_names,list):
obs_names = list(obs_names)
new_pst = pyemu.Pst("pest.pst",load=False)
pargp_data = populate_dataframe(["pargp"], new_pst.pargp_fieldnames,
new_pst.pargp_defaults, new_pst.pargp_dtype)
new_pst.parameter_groups = pargp_data
par_data | python | {
"resource": ""
} |
q20061 | try_run_inschek | train | def try_run_inschek(pst):
""" attempt to run INSCHEK for each instruction file, model output
file pair in a pyemu.Pst. If the run is successful, the INSCHEK written
.obf file is used to populate the pst.observation_data.obsval attribute
| python | {
"resource": ""
} |
q20062 | get_phi_comps_from_recfile | train | def get_phi_comps_from_recfile(recfile):
"""read the phi components from a record file by iteration
Parameters
----------
recfile : str
pest record file name
Returns
-------
iters : dict
nested dictionary of iteration number, {group,contribution}
"""
iiter = 1
iters = {}
f = open(recfile,'r')
while True:
line = f.readline()
if line == '':
break
if "starting phi for this iteration" in line.lower() or \
"final phi" in line.lower():
contributions = {}
while True:
line = f.readline()
if line == '':
break
| python | {
"resource": ""
} |
q20063 | res_from_obseravtion_data | train | def res_from_obseravtion_data(observation_data):
"""create a generic residual dataframe filled with np.NaN for
missing information
Parameters
----------
observation_data : pandas.DataFrame
pyemu.Pst.observation_data
Returns
-------
res_df : pandas.DataFrame
"""
res_df = observation_data.copy()
res_df.loc[:, | python | {
"resource": ""
} |
q20064 | clean_missing_exponent | train | def clean_missing_exponent(pst_filename,clean_filename="clean.pst"):
"""fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
| python | {
"resource": ""
} |
q20065 | Pst.phi | train | def phi(self):
"""get the weighted total objective function
Returns
-------
phi : float
sum of squared residuals
"""
sum = | python | {
"resource": ""
} |
q20066 | Pst.phi_components | train | def phi_components(self):
""" get the individual components of the total objective function
Returns
-------
dict : dict
dictionary of observation group, contribution to total phi
Raises
------
Assertion error if Pst.observation_data groups don't match
Pst.res groups
"""
# calculate phi components for each obs group
components = {}
ogroups = self.observation_data.groupby("obgnme").groups
rgroups = self.res.groupby("group").groups
self.res.index = self.res.name
for og,onames in ogroups.items():
#assert og in rgroups.keys(),"Pst.phi_componentw obs group " +\
# "not found: " + str(og)
#og_res_df = self.res.ix[rgroups[og]]
og_res_df = self.res.loc[onames,:].dropna()
#og_res_df.index = og_res_df.name
og_df = self.observation_data.ix[ogroups[og]]
og_df.index = og_df.obsnme
#og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0],\
" Pst.phi_components error: group residual dataframe row length" +\
"doesn't match observation data group dataframe row length" + \
str(og_df.shape) + " vs. " + str(og_res_df.shape)
components[og] = np.sum((og_res_df["residual"] *
og_df["weight"]) ** 2)
| python | {
"resource": ""
} |
q20067 | Pst.phi_components_normalized | train | def phi_components_normalized(self):
""" get the individual components of the total objective function
normalized to the total PHI being 1.0
Returns
-------
dict : dict
dictionary of observation group, normalized contribution to total phi
| python | {
"resource": ""
} |
q20068 | Pst.set_res | train | def set_res(self,res):
""" reset the private Pst.res attribute
Parameters
----------
res : (varies)
something to use as Pst.res attribute
"""
| python | {
"resource": ""
} |
q20069 | Pst.res | train | def res(self):
"""get the residuals dataframe attribute
Returns
-------
res : pandas.DataFrame
Note
----
if the Pst.__res attribute has not been loaded,
this call loads the res dataframe from a file
"""
if self.__res is not None:
return self.__res
else:
if self.resfile is not None:
assert os.path.exists(self.resfile),"Pst.res: self.resfile " +\
str(self.resfile) + " does not exist"
else:
self.resfile = self.filename.replace(".pst", ".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res", ".rei")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".rei", ".base.rei")
if not os.path.exists(self.resfile):
if self.new_filename is not None:
self.resfile = self.new_filename.replace(".pst",".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res","rei")
if not os.path.exists(self.resfile):
raise Exception("Pst.res: " +
| python | {
"resource": ""
} |
q20070 | Pst.nprior | train | def nprior(self):
"""number of prior information equations
Returns
-------
nprior : int
the number of prior info equations
"""
| python | {
"resource": ""
} |
q20071 | Pst.nnz_obs | train | def nnz_obs(self):
""" get the number of non-zero weighted observations
Returns
-------
nnz_obs : int
the number of non-zeros weighted observations
"""
| python | {
"resource": ""
} |
q20072 | Pst.nobs | train | def nobs(self):
"""get the number of observations
Returns
-------
nobs : int
| python | {
"resource": ""
} |
q20073 | Pst.npar | train | def npar(self):
"""get number of parameters
Returns
-------
npar : int
| python | {
"resource": ""
} |
q20074 | Pst.pars_in_groups | train | def pars_in_groups(self):
"""
return a dictionary of parameter names in each parameter group.
Returns:
dictionary
"""
pargp = self.par_groups
allpars = dict()
| python | {
"resource": ""
} |
q20075 | Pst.obs_groups | train | def obs_groups(self):
"""get the observation groups
Returns
-------
obs_groups : list
| python | {
"resource": ""
} |
q20076 | Pst.nnz_obs_groups | train | def nnz_obs_groups(self):
""" get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation
| python | {
"resource": ""
} |
q20077 | Pst.adj_par_groups | train | def adj_par_groups(self):
"""get the parameter groups with atleast one adjustable parameter
Returns
-------
adj_par_groups : list
a list of parameter groups with at least one adjustable parameter
"""
adj_pargp = []
for pargp in self.par_groups: | python | {
"resource": ""
} |
q20078 | Pst.prior_groups | train | def prior_groups(self):
"""get the prior info groups
Returns
-------
prior_groups : list
| python | {
"resource": ""
} |
q20079 | Pst.prior_names | train | def prior_names(self):
""" get the prior information names
Returns
-------
prior_names : list
a list of prior information names
"""
| python | {
"resource": ""
} |
q20080 | Pst.nnz_obs_names | train | def nnz_obs_names(self):
"""get the non-zero weight observation names
Returns
-------
nnz_obs_names : list
a list of non-zero weighted observation names
"""
| python | {
"resource": ""
} |
q20081 | Pst.zero_weight_obs_names | train | def zero_weight_obs_names(self):
""" get the zero-weighted observation names
Returns
-------
zero_weight_obs_names : list
a list of zero-weighted observation names
"""
self.observation_data.index = self.observation_data.obsnme
groups = self.observation_data.groupby(
| python | {
"resource": ""
} |
q20082 | Pst._read_df | train | def _read_df(f,nrows,names,converters,defaults=None):
""" a private method to read part of an open file into a pandas.DataFrame.
Parameters
----------
f : file object
nrows : int
number of rows to read
names : list
names to set the columns of the dataframe with
converters : dict
dictionary of lambda functions to convert strings
to numerical format
defaults : dict
dictionary of default values to assign columns.
Default is None
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
seek_point = f.tell()
line = f.readline()
raw = line.strip().split()
if raw[0].lower() == "external":
filename = raw[1]
assert os.path.exists(filename),"Pst._read_df() error: external file '{0}' not found".format(filename)
df = pd.read_csv(filename,index_col=False,comment='#')
df.columns = df.columns.str.lower()
for name in names:
assert name in df.columns,"Pst._read_df() error: name" +\
"'{0}' not in external file '{1}' columns".format(name,filename)
if name in converters:
df.loc[:,name] = df.loc[:,name].apply(converters[name])
if defaults is not None:
for name in names:
df.loc[:, name] = df.loc[:, name].fillna(defaults[name])
else:
if nrows is None:
raise Exception("Pst._read_df() error: non-external sections require nrows")
f.seek(seek_point)
df = pd.read_csv(f, header=None,names=names,
nrows=nrows,delim_whitespace=True,
converters=converters, index_col=False,
| python | {
"resource": ""
} |
q20083 | Pst.rectify_pgroups | train | def rectify_pgroups(self):
""" private method to synchronize parameter groups section with
the parameter data section
"""
# add any parameters groups
pdata_groups = list(self.parameter_data.loc[:,"pargp"].\
value_counts().keys())
#print(pdata_groups)
need_groups = []
existing_groups = list(self.parameter_groups.pargpnme)
for pg in pdata_groups:
if pg not in existing_groups:
need_groups.append(pg)
if len(need_groups) > 0:
#print(need_groups)
defaults = copy.copy(pst_utils.pst_config["pargp_defaults"])
for grp in need_groups:
| python | {
"resource": ""
} |
q20084 | Pst._parse_pi_par_names | train | def _parse_pi_par_names(self):
""" private method to get the parameter names from prior information
equations. Sets a 'names' column in Pst.prior_information that is a list
of parameter names
"""
if self.prior_information.shape[0] == 0:
return
if "names" in self.prior_information.columns:
self.prior_information.pop("names")
if "rhs" in self.prior_information.columns:
self.prior_information.pop("rhs")
def parse(eqs):
raw = eqs.split('=')
rhs = float(raw[1])
raw = [i for i in re.split('[###]',
raw[0].lower().strip().replace(' + ','###').replace(' - ','###')) if i != '']
# in case of a leading '-' or '+'
if len(raw[0]) == 0:
raw = raw[1:]
# pnames = []
| python | {
"resource": ""
} |
q20085 | Pst.add_pi_equation | train | def add_pi_equation(self,par_names,pilbl=None,rhs=0.0,weight=1.0,
obs_group="pi_obgnme",coef_dict={}):
""" a helper to construct a new prior information equation.
Parameters
----------
par_names : list
parameter names in the equation
pilbl : str
name to assign the prior information equation. If None,
a generic equation name is formed. Default is None
rhs : (float)
the right-hand side of the equation
weight : (float)
the weight of the equation
obs_group : str
the observation group for the equation. Default is 'pi_obgnme'
coef_dict : dict
a dictionary of parameter name, coefficient pairs to assign
leading coefficients for one or more parameters in the equation.
If a parameter is not listed, 1.0 is used for its coefficients.
Default is {}
"""
if pilbl is None:
pilbl = "pilbl_{0}".format(self.__pi_count)
self.__pi_count += 1
missing,fixed = [],[]
for par_name in par_names:
if par_name not in self.parameter_data.parnme:
missing.append(par_name)
elif self.parameter_data.loc[par_name,"partrans"] in ["fixed","tied"]:
fixed.append(par_name)
if len(missing) > 0:
| python | {
"resource": ""
} |
q20086 | Pst.write | train | def write(self,new_filename,update_regul=True,version=None):
"""main entry point to write a pest control file.
Parameters
----------
new_filename : str
name of the new pest control file
update_regul : (boolean)
flag to update zero-order Tikhonov prior information
equations to prefer the current parameter values
version : int
flag for which version of control file to write (must be 1 or 2).
if None, uses Pst._version, which set in the constructor and modified
during the load
| python | {
"resource": ""
} |
q20087 | Pst.parrep | train | def parrep(self, parfile=None,enforce_bounds=True):
"""replicates the pest parrep util. replaces the parval1 field in the
parameter data section dataframe
Parameters
----------
parfile : str
parameter file to use. If None, try to use
a parameter file that corresponds to the case name.
Default is None
enforce_hounds : bool
flag to enforce parameter bounds after parameter values are updated.
This is useful because PEST and PEST++ round the parameter values in the
par file, which may cause slight bound violations
"""
if parfile is None:
parfile = self.filename.replace(".pst", ".par")
par_df | python | {
"resource": ""
} |
q20088 | Pst.adjust_weights_recfile | train | def adjust_weights_recfile(self, recfile=None,original_ceiling=True):
"""adjusts the weights by group of the observations based on the phi components
in a pest record file so that total phi is equal to the number of
non-zero weighted observations
Parameters
----------
recfile : str
record file name. If None, try to use a record file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if recfile is None:
recfile = self.filename.replace(".pst", ".rec")
assert os.path.exists(recfile), \
"Pst.adjust_weights_recfile(): recfile not found: " +\
str(recfile)
iter_components = pst_utils.get_phi_comps_from_recfile(recfile)
iters = iter_components.keys()
iters.sort()
obs = self.observation_data
ogroups = obs.groupby("obgnme").groups
last_complete_iter = None
for ogroup, idxs in ogroups.iteritems():
for iiter in iters[::-1]:
| python | {
"resource": ""
} |
q20089 | Pst.adjust_weights_resfile | train | def adjust_weights_resfile(self, resfile=None,original_ceiling=True):
"""adjusts the weights by group of the observations based on the phi components
in a pest residual file so that total phi is equal to the number of
non-zero weighted observations
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights | python | {
"resource": ""
} |
q20090 | Pst.adjust_weights_discrepancy | train | def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True):
"""adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
| python | {
"resource": ""
} |
q20091 | Pst._adjust_weights_by_phi_components | train | def _adjust_weights_by_phi_components(self, components,original_ceiling):
"""resets the weights of observations by group to account for
residual phi components.
Parameters
----------
components : dict
a dictionary of obs group:phi contribution pairs
original_ceiling : bool
flag to keep weights from increasing
"""
obs = self.observation_data
nz_groups = obs.groupby(obs["weight"].map(lambda x: x == 0)).groups
ogroups = obs.groupby("obgnme").groups
for ogroup, idxs in ogroups.items():
if self.control_data.pestmode.startswith("regul") \
and "regul" in ogroup.lower():
continue
og_phi = components[ogroup]
nz_groups = obs.loc[idxs,:].groupby(obs.loc[idxs,"weight"].\
map(lambda x: x == 0)).groups
og_nzobs = 0
| python | {
"resource": ""
} |
q20092 | Pst.__reset_weights | train | def __reset_weights(self, target_phis, res_idxs, obs_idxs):
"""private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe
"""
for item in target_phis.keys():
assert item in res_idxs.keys(),\
"Pst.__reset_weights(): " + str(item) +\
" not in residual group indices"
assert item in obs_idxs.keys(), \
"Pst.__reset_weights(): " + str(item) +\
" not in observation group indices"
| python | {
"resource": ""
} |
q20093 | Pst.adjust_weights | train | def adjust_weights(self,obs_dict=None,
obsgrp_dict=None):
"""reset the weights of observation groups to contribute a specified
amount to the composite objective function
Parameters
----------
obs_dict : dict
dictionary of obs name,new contribution pairs
obsgrp_dict : dict
dictionary of obs group name,contribution pairs
Note
----
if all observations in a named obs group have zero weight, they will be
assigned a non-zero weight so that the request phi contribution
can be met. Similarly, any observations listed in obs_dict with zero
weight will also be reset
"""
self.observation_data.index = self.observation_data.obsnme
self.res.index = self.res.name
if obsgrp_dict is not None:
# reset groups with all zero weights
obs = self.observation_data
for grp in obsgrp_dict.keys():
if obs.loc[obs.obgnme==grp,"weight"].sum() == 0.0:
obs.loc[obs.obgnme==grp,"weight"] = 1.0
res_groups = self.res.groupby("group").groups
obs_groups | python | {
"resource": ""
} |
q20094 | Pst.proportional_weights | train | def proportional_weights(self, fraction_stdev=1.0, wmax=100.0,
leave_zero=True):
"""setup weights inversely proportional to the observation value
Parameters
----------
fraction_stdev : float
the fraction portion of the observation
val to treat as the standard deviation. set to 1.0 for
inversely proportional
| python | {
"resource": ""
} |
q20095 | Pst.calculate_pertubations | train | def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:,"pertubation"] = \
self.parameter_data.parval1 + \
self.parameter_data.increment
self.parameter_data.loc[:,"out_forward"] = \
self.parameter_data.loc[:,"pertubation"] > \
self.parameter_data.loc[:,"parubnd"]
out_forward = self.parameter_data.groupby("out_forward").groups
if True in out_forward:
self.parameter_data.loc[out_forward[True],"pertubation"] = \
self.parameter_data.loc[out_forward[True],"parval1"] - \
self.parameter_data.loc[out_forward[True],"increment"] | python | {
"resource": ""
} |
q20096 | Pst.build_increments | train | def build_increments(self):
""" experimental method to calculate parameter increments for use
in the finite difference pertubation calculations
Note
----
user beware!
"""
self.enforce_bounds()
self.add_transform_columns()
par_groups = self.parameter_data.groupby("pargp").groups
inctype = self.parameter_groups.groupby("inctyp").groups
for itype,inc_groups in inctype.items():
pnames = []
for group in inc_groups:
pnames.extend(par_groups[group])
derinc = self.parameter_groups.loc[group,"derinc"]
self.parameter_data.loc[par_groups[group],"derinc"] = derinc
if itype == "absolute":
self.parameter_data.loc[pnames,"increment"] = \
self.parameter_data.loc[pnames,"derinc"]
elif itype == "relative":
self.parameter_data.loc[pnames,"increment"] = \
self.parameter_data.loc[pnames,"derinc"] * \
self.parameter_data.loc[pnames,"parval1"]
| python | {
"resource": ""
} |
q20097 | Pst.add_transform_columns | train | def add_transform_columns(self):
""" add transformed values to the Pst.parameter_data attribute
"""
for col in ["parval1","parlbnd","parubnd","increment"]:
if col not in self.parameter_data.columns:
continue
self.parameter_data.loc[:,col+"_trans"] = (self.parameter_data.loc[:,col] *
| python | {
"resource": ""
} |
q20098 | Pst.enforce_bounds | train | def enforce_bounds(self):
""" enforce bounds violation resulting from the
parameter pertubation calculations
"""
too_big = self.parameter_data.loc[:,"parval1"] > \
self.parameter_data.loc[:,"parubnd"]
| python | {
"resource": ""
} |
q20099 | Pst.from_io_files | train | def from_io_files(cls,tpl_files,in_files,ins_files,out_files,pst_filename=None):
""" create a Pst instance from model interface files. Assigns generic values for
parameter info. Tries to use INSCHEK to set somewhat meaningful observation
values
Parameters
----------
tpl_files : list
list of template file names
in_files : list
list of model input file names (pairs with template files)
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.