text
stringlengths
81
112k
Configure the |SequenceManager| object available in module |pub| following the definitions of the actual XML `reader` or `writer` element when available; if not use those of the XML `series_io` element. Compare the following results with `single_run.xml` to see that the first `writer` element defines the input file type specifically, that the second `writer` element defines a general file type, and that the third `writer` element does not define any file type (the principle mechanism is the same for other options, e.g. the aggregation mode): >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> series_io = interface.series_io >>> with TestIO(): ... series_io.writers[0].prepare_sequencemanager() >>> pub.sequencemanager.inputfiletype 'asc' >>> pub.sequencemanager.fluxfiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'none' >>> with TestIO(): ... series_io.writers[1].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'nc' >>> pub.sequencemanager.stateoverwrite False >>> with TestIO(): ... series_io.writers[2].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'mean' >>> pub.sequencemanager.inputoverwrite True >>> pub.sequencemanager.inputdirpath 'LahnH/series/input' def prepare_sequencemanager(self) -> None: """Configure the |SequenceManager| object available in module |pub| following the definitions of the actual XML `reader` or `writer` element when available; if not use those of the XML `series_io` element. Compare the following results with `single_run.xml` to see that the first `writer` element defines the input file type specifically, that the second `writer` element defines a general file type, and that the third `writer` element does not define any file type (the principle mechanism is the same for other options, e.g. the aggregation mode): >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> series_io = interface.series_io >>> with TestIO(): ... series_io.writers[0].prepare_sequencemanager() >>> pub.sequencemanager.inputfiletype 'asc' >>> pub.sequencemanager.fluxfiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'none' >>> with TestIO(): ... series_io.writers[1].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'nc' >>> pub.sequencemanager.stateoverwrite False >>> with TestIO(): ... series_io.writers[2].prepare_sequencemanager() >>> pub.sequencemanager.statefiletype 'npy' >>> pub.sequencemanager.fluxaggregation 'mean' >>> pub.sequencemanager.inputoverwrite True >>> pub.sequencemanager.inputdirpath 'LahnH/series/input' """ for config, convert in ( ('filetype', lambda x: x), ('aggregation', lambda x: x), ('overwrite', lambda x: x.lower() == 'true'), ('dirpath', lambda x: x)): xml_special = self.find(config) xml_general = self.master.find(config) for name_manager, name_xml in zip( ('input', 'flux', 'state', 'node'), ('inputs', 'fluxes', 'states', 'nodes')): value = None for xml, attr_xml in zip( (xml_special, xml_special, xml_general, xml_general), (name_xml, 'general', name_xml, 'general')): try: value = find(xml, attr_xml).text except AttributeError: continue break setattr(hydpy.pub.sequencemanager, f'{name_manager}{config}', convert(value))
A nested |collections.defaultdict| containing the model specific information provided by the XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> model2subs2seqs = series_io.writers[2].model2subs2seqs >>> for model, subs2seqs in sorted(model2subs2seqs.items()): ... for subs, seq in sorted(subs2seqs.items()): ... print(model, subs, seq) hland_v1 fluxes ['pc', 'tf'] hland_v1 states ['sm'] hstream_v1 states ['qjoints'] def model2subs2seqs(self) -> Dict[str, Dict[str, List[str]]]: """A nested |collections.defaultdict| containing the model specific information provided by the XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> model2subs2seqs = series_io.writers[2].model2subs2seqs >>> for model, subs2seqs in sorted(model2subs2seqs.items()): ... for subs, seq in sorted(subs2seqs.items()): ... print(model, subs, seq) hland_v1 fluxes ['pc', 'tf'] hland_v1 states ['sm'] hstream_v1 states ['qjoints'] """ model2subs2seqs = collections.defaultdict( lambda: collections.defaultdict(list)) for model in self.find('sequences'): model_name = strip(model.tag) if model_name == 'node': continue for group in model: group_name = strip(group.tag) for sequence in group: seq_name = strip(sequence.tag) model2subs2seqs[model_name][group_name].append(seq_name) return model2subs2seqs
A |collections.defaultdict| containing the node-specific information provided by XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> subs2seqs = series_io.writers[2].subs2seqs >>> for subs, seq in sorted(subs2seqs.items()): ... print(subs, seq) node ['sim', 'obs'] def subs2seqs(self) -> Dict[str, List[str]]: """A |collections.defaultdict| containing the node-specific information provided by XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> subs2seqs = series_io.writers[2].subs2seqs >>> for subs, seq in sorted(subs2seqs.items()): ... print(subs, seq) node ['sim', 'obs'] """ subs2seqs = collections.defaultdict(list) nodes = find(self.find('sequences'), 'node') if nodes is not None: for seq in nodes: subs2seqs['node'].append(strip(seq.tag)) return subs2seqs
Call |IOSequence.activate_ram| of all sequences selected by the given output element of the actual XML file. Use the memory argument to pass in already prepared sequences; newly prepared sequences will be added. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') >>> interface.update_timegrids() >>> series_io = interface.series_io >>> memory = set() >>> pc = hp.elements.land_dill.model.sequences.fluxes.pc >>> pc.ramflag False >>> series_io.writers[0].prepare_series(memory) >>> pc in memory True >>> pc.ramflag True >>> pc.deactivate_ram() >>> pc.ramflag False >>> series_io.writers[0].prepare_series(memory) >>> pc.ramflag False def prepare_series(self, memory: set) -> None: """Call |IOSequence.activate_ram| of all sequences selected by the given output element of the actual XML file. Use the memory argument to pass in already prepared sequences; newly prepared sequences will be added. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') >>> interface.update_timegrids() >>> series_io = interface.series_io >>> memory = set() >>> pc = hp.elements.land_dill.model.sequences.fluxes.pc >>> pc.ramflag False >>> series_io.writers[0].prepare_series(memory) >>> pc in memory True >>> pc.ramflag True >>> pc.deactivate_ram() >>> pc.ramflag False >>> series_io.writers[0].prepare_series(memory) >>> pc.ramflag False """ for sequence in self._iterate_sequences(): if sequence not in memory: memory.add(sequence) sequence.activate_ram()
Load time series data as defined by the actual XML `reader` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() ... interface.update_timegrids() ... series_io = interface.series_io ... series_io.prepare_series() ... series_io.load_series() >>> from hydpy import print_values >>> print_values( ... hp.elements.land_dill.model.sequences.inputs.t.series[:3]) -0.298846, -0.811539, -2.493848 def load_series(self) -> None: """Load time series data as defined by the actual XML `reader` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() ... interface.update_timegrids() ... series_io = interface.series_io ... series_io.prepare_series() ... series_io.load_series() >>> from hydpy import print_values >>> print_values( ... hp.elements.land_dill.model.sequences.inputs.t.series[:3]) -0.298846, -0.811539, -2.493848 """ kwargs = {} for keyword in ('flattennetcdf', 'isolatenetcdf', 'timeaxisnetcdf'): argument = getattr(hydpy.pub.options, keyword, None) if argument is not None: kwargs[keyword[:-6]] = argument hydpy.pub.sequencemanager.open_netcdf_reader(**kwargs) self.prepare_sequencemanager() for sequence in self._iterate_sequences(): sequence.load_ext() hydpy.pub.sequencemanager.close_netcdf_reader()
Save time series data as defined by the actual XML `writer` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() >>> interface.update_timegrids() >>> series_io = interface.series_io >>> series_io.prepare_series() >>> hp.elements.land_dill.model.sequences.fluxes.pc.series[2, 3] = 9.0 >>> hp.nodes.lahn_2.sequences.sim.series[4] = 7.0 >>> with TestIO(): ... series_io.save_series() >>> import numpy >>> with TestIO(): ... os.path.exists( ... 'LahnH/series/output/land_lahn_2_flux_pc.npy') ... os.path.exists( ... 'LahnH/series/output/land_lahn_3_flux_pc.npy') ... numpy.load( ... 'LahnH/series/output/land_dill_flux_pc.npy')[13+2, 3] ... numpy.load( ... 'LahnH/series/output/lahn_2_sim_q_mean.npy')[13+4] True False 9.0 7.0 def save_series(self) -> None: """Save time series data as defined by the actual XML `writer` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() >>> interface.update_timegrids() >>> series_io = interface.series_io >>> series_io.prepare_series() >>> hp.elements.land_dill.model.sequences.fluxes.pc.series[2, 3] = 9.0 >>> hp.nodes.lahn_2.sequences.sim.series[4] = 7.0 >>> with TestIO(): ... series_io.save_series() >>> import numpy >>> with TestIO(): ... os.path.exists( ... 'LahnH/series/output/land_lahn_2_flux_pc.npy') ... os.path.exists( ... 'LahnH/series/output/land_lahn_3_flux_pc.npy') ... numpy.load( ... 'LahnH/series/output/land_dill_flux_pc.npy')[13+2, 3] ... numpy.load( ... 'LahnH/series/output/lahn_2_sim_q_mean.npy')[13+4] True False 9.0 7.0 """ hydpy.pub.sequencemanager.open_netcdf_writer( flatten=hydpy.pub.options.flattennetcdf, isolate=hydpy.pub.options.isolatenetcdf) self.prepare_sequencemanager() for sequence in self._iterate_sequences(): sequence.save_ext() hydpy.pub.sequencemanager.close_netcdf_writer()
ToDo >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> pub.timegrids = '1996-01-01', '1996-01-06', '1d' >>> with TestIO(): ... hp.prepare_everything() ... interface = XMLInterface('multiple_runs.xml') >>> var = interface.exchange.itemgroups[0].models[0].subvars[0].vars[0] >>> item = var.item >>> item.value array(2.0) >>> hp.elements.land_dill.model.parameters.control.alpha alpha(1.0) >>> item.update_variables() >>> hp.elements.land_dill.model.parameters.control.alpha alpha(2.0) >>> var = interface.exchange.itemgroups[0].models[2].subvars[0].vars[0] >>> item = var.item >>> item.value array(5.0) >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(0.0) >>> item.update_variables() >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(5.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[0] >>> item = var.item >>> item.name 'sm_lahn_2' >>> item.value array(123.0) >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(138.31396, 135.71124, 147.54968, 145.47142, 154.96405, 153.32805, 160.91917, 159.62434, 165.65575, 164.63255) >>> item.update_variables() >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[1] >>> item = var.item >>> item.name 'sm_lahn_1' >>> item.value array([ 110., 120., 130., 140., 150., 160., 170., 180., 190., 200., 210., 220., 230.]) >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(99.27505, 96.17726, 109.16576, 106.39745, 117.97304, 115.56252, 125.81523, 123.73198, 132.80035, 130.91684, 138.95523, 137.25983, 142.84148) >>> from hydpy import pub >>> with pub.options.warntrim(False): ... item.update_variables() >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0) >>> for element in pub.selections.headwaters.elements: ... element.model.parameters.control.rfcf(1.1) >>> for element in pub.selections.nonheadwaters.elements: ... element.model.parameters.control.rfcf(1.0) >>> for subvars in interface.exchange.itemgroups[2].models[0].subvars: ... for var in subvars.vars: ... var.item.update_variables() >>> for element in hp.elements.catchment: ... print(element, repr(element.model.parameters.control.sfcf)) land_dill sfcf(1.4) land_lahn_1 sfcf(1.4) land_lahn_2 sfcf(1.2) land_lahn_3 sfcf(field=1.1, forest=1.2) >>> var = interface.exchange.itemgroups[3].models[0].subvars[1].vars[0] >>> hp.elements.land_dill.model.sequences.states.sm = 1.0 >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_states_sm [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \ 1.0, 1.0, 1.0] land_lahn_1_states_sm [110.0, 120.0, 130.0, 140.0, 150.0, 160.0, \ 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0] land_lahn_2_states_sm [123.0, 123.0, 123.0, 123.0, 123.0, 123.0, \ 123.0, 123.0, 123.0, 123.0] land_lahn_3_states_sm [101.3124...] >>> vars_ = interface.exchange.itemgroups[3].models[0].subvars[0].vars >>> qt = hp.elements.land_dill.model.sequences.fluxes.qt >>> qt(1.0) >>> qt.series = 2.0 >>> for var in vars_: ... for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_fluxes_qt 1.0 land_dill_fluxes_qt_series [2.0, 2.0, 2.0, 2.0, 2.0] >>> var = interface.exchange.itemgroups[3].nodes[0].vars[0] >>> hp.nodes.dill.sequences.sim.series = range(5) >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [0.0, 1.0, 2.0, 3.0, 4.0] >>> for name, target in var.item.yield_name2value(2, 4): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [2.0, 3.0] def item(self): """ ToDo >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface, pub >>> hp = HydPy('LahnH') >>> pub.timegrids = '1996-01-01', '1996-01-06', '1d' >>> with TestIO(): ... hp.prepare_everything() ... interface = XMLInterface('multiple_runs.xml') >>> var = interface.exchange.itemgroups[0].models[0].subvars[0].vars[0] >>> item = var.item >>> item.value array(2.0) >>> hp.elements.land_dill.model.parameters.control.alpha alpha(1.0) >>> item.update_variables() >>> hp.elements.land_dill.model.parameters.control.alpha alpha(2.0) >>> var = interface.exchange.itemgroups[0].models[2].subvars[0].vars[0] >>> item = var.item >>> item.value array(5.0) >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(0.0) >>> item.update_variables() >>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag lag(5.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[0] >>> item = var.item >>> item.name 'sm_lahn_2' >>> item.value array(123.0) >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(138.31396, 135.71124, 147.54968, 145.47142, 154.96405, 153.32805, 160.91917, 159.62434, 165.65575, 164.63255) >>> item.update_variables() >>> hp.elements.land_lahn_2.model.sequences.states.sm sm(123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0) >>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[1] >>> item = var.item >>> item.name 'sm_lahn_1' >>> item.value array([ 110., 120., 130., 140., 150., 160., 170., 180., 190., 200., 210., 220., 230.]) >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(99.27505, 96.17726, 109.16576, 106.39745, 117.97304, 115.56252, 125.81523, 123.73198, 132.80035, 130.91684, 138.95523, 137.25983, 142.84148) >>> from hydpy import pub >>> with pub.options.warntrim(False): ... item.update_variables() >>> hp.elements.land_lahn_1.model.sequences.states.sm sm(110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0) >>> for element in pub.selections.headwaters.elements: ... element.model.parameters.control.rfcf(1.1) >>> for element in pub.selections.nonheadwaters.elements: ... element.model.parameters.control.rfcf(1.0) >>> for subvars in interface.exchange.itemgroups[2].models[0].subvars: ... for var in subvars.vars: ... var.item.update_variables() >>> for element in hp.elements.catchment: ... print(element, repr(element.model.parameters.control.sfcf)) land_dill sfcf(1.4) land_lahn_1 sfcf(1.4) land_lahn_2 sfcf(1.2) land_lahn_3 sfcf(field=1.1, forest=1.2) >>> var = interface.exchange.itemgroups[3].models[0].subvars[1].vars[0] >>> hp.elements.land_dill.model.sequences.states.sm = 1.0 >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_states_sm [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \ 1.0, 1.0, 1.0] land_lahn_1_states_sm [110.0, 120.0, 130.0, 140.0, 150.0, 160.0, \ 170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0] land_lahn_2_states_sm [123.0, 123.0, 123.0, 123.0, 123.0, 123.0, \ 123.0, 123.0, 123.0, 123.0] land_lahn_3_states_sm [101.3124...] >>> vars_ = interface.exchange.itemgroups[3].models[0].subvars[0].vars >>> qt = hp.elements.land_dill.model.sequences.fluxes.qt >>> qt(1.0) >>> qt.series = 2.0 >>> for var in vars_: ... for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS land_dill_fluxes_qt 1.0 land_dill_fluxes_qt_series [2.0, 2.0, 2.0, 2.0, 2.0] >>> var = interface.exchange.itemgroups[3].nodes[0].vars[0] >>> hp.nodes.dill.sequences.sim.series = range(5) >>> for name, target in var.item.yield_name2value(): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [0.0, 1.0, 2.0, 3.0, 4.0] >>> for name, target in var.item.yield_name2value(2, 4): ... print(name, target) # doctest: +ELLIPSIS dill_nodes_sim_series [2.0, 3.0] """ target = f'{self.master.name}.{self.name}' if self.master.name == 'nodes': master = self.master.name itemgroup = self.master.master.name else: master = self.master.master.name itemgroup = self.master.master.master.name itemclass = _ITEMGROUP2ITEMCLASS[itemgroup] if itemgroup == 'getitems': return self._get_getitem(target, master, itemclass) return self._get_changeitem(target, master, itemclass, itemgroup)
Write the complete base schema file `HydPyConfigBase.xsd` based on the template file `HydPyConfigBase.xsdt`. Method |XSDWriter.write_xsd| adds model specific information to the general information of template file `HydPyConfigBase.xsdt` regarding reading and writing of time series data and exchanging parameter and sequence values e.g. during calibration. The following example shows that after writing a new schema file, method |XMLInterface.validate_xml| does not raise an error when either applied on the XML configuration files `single_run.xml` or `multiple_runs.xml` of the `LahnH` example project: >>> import os >>> from hydpy.auxs.xmltools import XSDWriter, XMLInterface >>> if os.path.exists(XSDWriter.filepath_target): ... os.remove(XSDWriter.filepath_target) >>> os.path.exists(XSDWriter.filepath_target) False >>> XSDWriter.write_xsd() >>> os.path.exists(XSDWriter.filepath_target) True >>> from hydpy import data >>> for configfile in ('single_run.xml', 'multiple_runs.xml'): ... XMLInterface(configfile, data.get_path('LahnH')).validate_xml() def write_xsd(cls) -> None: """Write the complete base schema file `HydPyConfigBase.xsd` based on the template file `HydPyConfigBase.xsdt`. Method |XSDWriter.write_xsd| adds model specific information to the general information of template file `HydPyConfigBase.xsdt` regarding reading and writing of time series data and exchanging parameter and sequence values e.g. during calibration. The following example shows that after writing a new schema file, method |XMLInterface.validate_xml| does not raise an error when either applied on the XML configuration files `single_run.xml` or `multiple_runs.xml` of the `LahnH` example project: >>> import os >>> from hydpy.auxs.xmltools import XSDWriter, XMLInterface >>> if os.path.exists(XSDWriter.filepath_target): ... os.remove(XSDWriter.filepath_target) >>> os.path.exists(XSDWriter.filepath_target) False >>> XSDWriter.write_xsd() >>> os.path.exists(XSDWriter.filepath_target) True >>> from hydpy import data >>> for configfile in ('single_run.xml', 'multiple_runs.xml'): ... XMLInterface(configfile, data.get_path('LahnH')).validate_xml() """ with open(cls.filepath_source) as file_: template = file_.read() template = template.replace( '<!--include model sequence groups-->', cls.get_insertion()) template = template.replace( '<!--include exchange items-->', cls.get_exchangeinsertion()) with open(cls.filepath_target, 'w') as file_: file_.write(template)
Return a sorted |list| containing all application model names. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS [...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...] def get_modelnames() -> List[str]: """Return a sorted |list| containing all application model names. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS [...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...] """ return sorted(str(fn.split('.')[0]) for fn in os.listdir(models.__path__[0]) if (fn.endswith('.py') and (fn != '__init__.py')))
Return the complete string to be inserted into the string of the template file. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_insertion()) # doctest: +ELLIPSIS <element name="arma_v1" substitutionGroup="hpcb:sequenceGroup" type="hpcb:arma_v1Type"/> <BLANKLINE> <complexType name="arma_v1Type"> <complexContent> <extension base="hpcb:sequenceGroupType"> <sequence> <element name="fluxes" minOccurs="0"> <complexType> <sequence> <element name="qin" minOccurs="0"/> ... </complexType> </element> </sequence> </extension> </complexContent> </complexType> <BLANKLINE> def get_insertion(cls) -> str: """Return the complete string to be inserted into the string of the template file. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_insertion()) # doctest: +ELLIPSIS <element name="arma_v1" substitutionGroup="hpcb:sequenceGroup" type="hpcb:arma_v1Type"/> <BLANKLINE> <complexType name="arma_v1Type"> <complexContent> <extension base="hpcb:sequenceGroupType"> <sequence> <element name="fluxes" minOccurs="0"> <complexType> <sequence> <element name="qin" minOccurs="0"/> ... </complexType> </element> </sequence> </extension> </complexContent> </complexType> <BLANKLINE> """ indent = 1 blanks = ' ' * (indent+4) subs = [] for name in cls.get_modelnames(): subs.extend([ f'{blanks}<element name="{name}"', f'{blanks} substitutionGroup="hpcb:sequenceGroup"', f'{blanks} type="hpcb:{name}Type"/>', f'', f'{blanks}<complexType name="{name}Type">', f'{blanks} <complexContent>', f'{blanks} <extension base="hpcb:sequenceGroupType">', f'{blanks} <sequence>']) model = importtools.prepare_model(name) subs.append(cls.get_modelinsertion(model, indent + 4)) subs.extend([ f'{blanks} </sequence>', f'{blanks} </extension>', f'{blanks} </complexContent>', f'{blanks}</complexType>', f'' ]) return '\n'.join(subs)
Return the insertion string required for the given application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> print(XSDWriter.get_modelinsertion(model, 1)) # doctest: +ELLIPSIS <element name="inputs" minOccurs="0"> <complexType> <sequence> <element name="p" minOccurs="0"/> ... </element> <element name="fluxes" minOccurs="0"> ... </element> <element name="states" minOccurs="0"> ... </element> def get_modelinsertion(cls, model, indent) -> str: """Return the insertion string required for the given application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> print(XSDWriter.get_modelinsertion(model, 1)) # doctest: +ELLIPSIS <element name="inputs" minOccurs="0"> <complexType> <sequence> <element name="p" minOccurs="0"/> ... </element> <element name="fluxes" minOccurs="0"> ... </element> <element name="states" minOccurs="0"> ... </element> """ texts = [] for name in ('inputs', 'fluxes', 'states'): subsequences = getattr(model.sequences, name, None) if subsequences: texts.append( cls.get_subsequencesinsertion(subsequences, indent)) return '\n'.join(texts)
Return the insertion string required for the given group of sequences. >>> from hydpy.auxs.xmltools import XSDWriter >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> print(XSDWriter.get_subsequencesinsertion( ... model.sequences.fluxes, 1)) # doctest: +ELLIPSIS <element name="fluxes" minOccurs="0"> <complexType> <sequence> <element name="tmean" minOccurs="0"/> <element name="tc" minOccurs="0"/> ... <element name="qt" minOccurs="0"/> </sequence> </complexType> </element> def get_subsequencesinsertion(cls, subsequences, indent) -> str: """Return the insertion string required for the given group of sequences. >>> from hydpy.auxs.xmltools import XSDWriter >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> print(XSDWriter.get_subsequencesinsertion( ... model.sequences.fluxes, 1)) # doctest: +ELLIPSIS <element name="fluxes" minOccurs="0"> <complexType> <sequence> <element name="tmean" minOccurs="0"/> <element name="tc" minOccurs="0"/> ... <element name="qt" minOccurs="0"/> </sequence> </complexType> </element> """ blanks = ' ' * (indent*4) lines = [f'{blanks}<element name="{subsequences.name}"', f'{blanks} minOccurs="0">', f'{blanks} <complexType>', f'{blanks} <sequence>'] for sequence in subsequences: lines.append(cls.get_sequenceinsertion(sequence, indent + 3)) lines.extend([f'{blanks} </sequence>', f'{blanks} </complexType>', f'{blanks}</element>']) return '\n'.join(lines)
Return the complete string related to the definition of exchange items to be inserted into the string of the template file. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_exchangeinsertion()) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> ... <element name="setitems"> ... <complexType name="arma_v1_setitemsType"> ... <element name="additems"> ... <element name="getitems"> ... def get_exchangeinsertion(cls): """Return the complete string related to the definition of exchange items to be inserted into the string of the template file. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_exchangeinsertion()) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> ... <element name="setitems"> ... <complexType name="arma_v1_setitemsType"> ... <element name="additems"> ... <element name="getitems"> ... """ indent = 1 subs = [cls.get_mathitemsinsertion(indent)] for groupname in ('setitems', 'additems', 'getitems'): subs.append(cls.get_itemsinsertion(groupname, indent)) subs.append(cls.get_itemtypesinsertion(groupname, indent)) return '\n'.join(subs)
Return a string defining a model specific XML type extending `ItemType`. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> <complexContent> <extension base="hpcb:setitemType"> <choice> <element name="control.responses"/> ... <element name="logs.logout"/> </choice> </extension> </complexContent> </complexType> <BLANKLINE> <complexType name="dam_v001_mathitemType"> ... def get_mathitemsinsertion(cls, indent) -> str: """Return a string defining a model specific XML type extending `ItemType`. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS <complexType name="arma_v1_mathitemType"> <complexContent> <extension base="hpcb:setitemType"> <choice> <element name="control.responses"/> ... <element name="logs.logout"/> </choice> </extension> </complexContent> </complexType> <BLANKLINE> <complexType name="dam_v001_mathitemType"> ... """ blanks = ' ' * (indent*4) subs = [] for modelname in cls.get_modelnames(): model = importtools.prepare_model(modelname) subs.extend([ f'{blanks}<complexType name="{modelname}_mathitemType">', f'{blanks} <complexContent>', f'{blanks} <extension base="hpcb:setitemType">', f'{blanks} <choice>']) for subvars in cls._get_subvars(model): for var in subvars: subs.append( f'{blanks} ' f'<element name="{subvars.name}.{var.name}"/>') subs.extend([ f'{blanks} </choice>', f'{blanks} </extension>', f'{blanks} </complexContent>', f'{blanks}</complexType>', f'']) return '\n'.join(subs)
Return a string defining the XML element for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemsinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <element name="setitems"> <complexType> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> ... <element name="hland_v1" type="hpcb:hland_v1_setitemsType" minOccurs="0" maxOccurs="unbounded"/> ... <element name="nodes" type="hpcb:nodes_setitemsType" minOccurs="0" maxOccurs="unbounded"/> </sequence> <attribute name="info" type="string"/> </complexType> </element> <BLANKLINE> def get_itemsinsertion(cls, itemgroup, indent) -> str: """Return a string defining the XML element for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemsinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <element name="setitems"> <complexType> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> ... <element name="hland_v1" type="hpcb:hland_v1_setitemsType" minOccurs="0" maxOccurs="unbounded"/> ... <element name="nodes" type="hpcb:nodes_setitemsType" minOccurs="0" maxOccurs="unbounded"/> </sequence> <attribute name="info" type="string"/> </complexType> </element> <BLANKLINE> """ blanks = ' ' * (indent*4) subs = [] subs.extend([ f'{blanks}<element name="{itemgroup}">', f'{blanks} <complexType>', f'{blanks} <sequence>', f'{blanks} <element ref="hpcb:selections"', f'{blanks} minOccurs="0"/>', f'{blanks} <element ref="hpcb:devices"', f'{blanks} minOccurs="0"/>']) for modelname in cls.get_modelnames(): type_ = cls._get_itemstype(modelname, itemgroup) subs.append(f'{blanks} <element name="{modelname}"') subs.append(f'{blanks} type="hpcb:{type_}"') subs.append(f'{blanks} minOccurs="0"') subs.append(f'{blanks} maxOccurs="unbounded"/>') if itemgroup in ('setitems', 'getitems'): type_ = f'nodes_{itemgroup}Type' subs.append(f'{blanks} <element name="nodes"') subs.append(f'{blanks} type="hpcb:{type_}"') subs.append(f'{blanks} minOccurs="0"') subs.append(f'{blanks} maxOccurs="unbounded"/>') subs.extend([ f'{blanks} </sequence>', f'{blanks} <attribute name="info" type="string"/>', f'{blanks} </complexType>', f'{blanks}</element>', f'']) return '\n'.join(subs)
Return a string defining the required types for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypesinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="arma_v1_setitemsType"> ... </complexType> <BLANKLINE> <complexType name="dam_v001_setitemsType"> ... <complexType name="nodes_setitemsType"> ... def get_itemtypesinsertion(cls, itemgroup, indent) -> str: """Return a string defining the required types for the given exchange item group. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypesinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="arma_v1_setitemsType"> ... </complexType> <BLANKLINE> <complexType name="dam_v001_setitemsType"> ... <complexType name="nodes_setitemsType"> ... """ subs = [] for modelname in cls.get_modelnames(): subs.append(cls.get_itemtypeinsertion(itemgroup, modelname, indent)) subs.append(cls.get_nodesitemtypeinsertion(itemgroup, indent)) return '\n'.join(subs)
Return a string defining the required types for the given combination of an exchange item group and an application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypeinsertion( ... 'setitems', 'hland_v1', 1)) # doctest: +ELLIPSIS <complexType name="hland_v1_setitemsType"> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="control" minOccurs="0" maxOccurs="unbounded"> ... </sequence> </complexType> <BLANKLINE> def get_itemtypeinsertion(cls, itemgroup, modelname, indent) -> str: """Return a string defining the required types for the given combination of an exchange item group and an application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_itemtypeinsertion( ... 'setitems', 'hland_v1', 1)) # doctest: +ELLIPSIS <complexType name="hland_v1_setitemsType"> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="control" minOccurs="0" maxOccurs="unbounded"> ... </sequence> </complexType> <BLANKLINE> """ blanks = ' ' * (indent * 4) type_ = cls._get_itemstype(modelname, itemgroup) subs = [ f'{blanks}<complexType name="{type_}">', f'{blanks} <sequence>', f'{blanks} <element ref="hpcb:selections"', f'{blanks} minOccurs="0"/>', f'{blanks} <element ref="hpcb:devices"', f'{blanks} minOccurs="0"/>', cls.get_subgroupsiteminsertion(itemgroup, modelname, indent+2), f'{blanks} </sequence>', f'{blanks}</complexType>', f''] return '\n'.join(subs)
Return a string defining the required types for the given combination of an exchange item group and |Node| objects. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_nodesitemtypeinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="nodes_setitemsType"> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="sim" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="obs" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="sim.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="obs.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> </sequence> </complexType> <BLANKLINE> def get_nodesitemtypeinsertion(cls, itemgroup, indent) -> str: """Return a string defining the required types for the given combination of an exchange item group and |Node| objects. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_nodesitemtypeinsertion( ... 'setitems', 1)) # doctest: +ELLIPSIS <complexType name="nodes_setitemsType"> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="sim" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="obs" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="sim.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="obs.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> </sequence> </complexType> <BLANKLINE> """ blanks = ' ' * (indent * 4) subs = [ f'{blanks}<complexType name="nodes_{itemgroup}Type">', f'{blanks} <sequence>', f'{blanks} <element ref="hpcb:selections"', f'{blanks} minOccurs="0"/>', f'{blanks} <element ref="hpcb:devices"', f'{blanks} minOccurs="0"/>'] type_ = 'getitemType' if itemgroup == 'getitems' else 'setitemType' for name in ('sim', 'obs', 'sim.series', 'obs.series'): subs.extend([ f'{blanks} <element name="{name}"', f'{blanks} type="hpcb:{type_}"', f'{blanks} minOccurs="0"', f'{blanks} maxOccurs="unbounded"/>']) subs.extend([ f'{blanks} </sequence>', f'{blanks}</complexType>', f'']) return '\n'.join(subs)
Return a string defining the required types for the given combination of an exchange item group and an application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_subgroupsiteminsertion( ... 'setitems', 'hland_v1', 1)) # doctest: +ELLIPSIS <element name="control" minOccurs="0" maxOccurs="unbounded"> ... </element> <element name="inputs" ... <element name="fluxes" ... <element name="states" ... <element name="logs" ... def get_subgroupsiteminsertion(cls, itemgroup, modelname, indent) -> str: """Return a string defining the required types for the given combination of an exchange item group and an application model. >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_subgroupsiteminsertion( ... 'setitems', 'hland_v1', 1)) # doctest: +ELLIPSIS <element name="control" minOccurs="0" maxOccurs="unbounded"> ... </element> <element name="inputs" ... <element name="fluxes" ... <element name="states" ... <element name="logs" ... """ subs = [] model = importtools.prepare_model(modelname) for subvars in cls._get_subvars(model): subs.append(cls.get_subgroupiteminsertion( itemgroup, model, subvars, indent)) return '\n'.join(subs)
Return a string defining the required types for the given combination of an exchange item group and a specific variable subgroup of an application model or class |Node|. Note that for `setitems` and `getitems` `setitemType` and `getitemType` are referenced, respectively, and for all others the model specific `mathitemType`: >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'setitems', model, model.parameters.control, 1)) <element name="control" minOccurs="0" maxOccurs="unbounded"> <complexType> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="area" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="nmbzones" ... </sequence> </complexType> </element> >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'getitems', model, model.parameters.control, 1)) <element name="control" ... <element name="area" type="hpcb:getitemType" minOccurs="0" maxOccurs="unbounded"/> ... >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'additems', model, model.parameters.control, 1)) <element name="control" ... <element name="area" type="hpcb:hland_v1_mathitemType" minOccurs="0" maxOccurs="unbounded"/> ... For sequence classes, additional "series" elements are added: >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'setitems', model, model.sequences.fluxes, 1)) <element name="fluxes" ... <element name="tmean" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="tmean.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="tc" ... </sequence> </complexType> </element> def get_subgroupiteminsertion( cls, itemgroup, model, subgroup, indent) -> str: """Return a string defining the required types for the given combination of an exchange item group and a specific variable subgroup of an application model or class |Node|. Note that for `setitems` and `getitems` `setitemType` and `getitemType` are referenced, respectively, and for all others the model specific `mathitemType`: >>> from hydpy import prepare_model >>> model = prepare_model('hland_v1') >>> from hydpy.auxs.xmltools import XSDWriter >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'setitems', model, model.parameters.control, 1)) <element name="control" minOccurs="0" maxOccurs="unbounded"> <complexType> <sequence> <element ref="hpcb:selections" minOccurs="0"/> <element ref="hpcb:devices" minOccurs="0"/> <element name="area" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="nmbzones" ... </sequence> </complexType> </element> >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'getitems', model, model.parameters.control, 1)) <element name="control" ... <element name="area" type="hpcb:getitemType" minOccurs="0" maxOccurs="unbounded"/> ... >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'additems', model, model.parameters.control, 1)) <element name="control" ... <element name="area" type="hpcb:hland_v1_mathitemType" minOccurs="0" maxOccurs="unbounded"/> ... For sequence classes, additional "series" elements are added: >>> print(XSDWriter.get_subgroupiteminsertion( # doctest: +ELLIPSIS ... 'setitems', model, model.sequences.fluxes, 1)) <element name="fluxes" ... <element name="tmean" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="tmean.series" type="hpcb:setitemType" minOccurs="0" maxOccurs="unbounded"/> <element name="tc" ... </sequence> </complexType> </element> """ blanks1 = ' ' * (indent * 4) blanks2 = ' ' * ((indent+5) * 4 + 1) subs = [ f'{blanks1}<element name="{subgroup.name}"', f'{blanks1} minOccurs="0"', f'{blanks1} maxOccurs="unbounded">', f'{blanks1} <complexType>', f'{blanks1} <sequence>', f'{blanks1} <element ref="hpcb:selections"', f'{blanks1} minOccurs="0"/>', f'{blanks1} <element ref="hpcb:devices"', f'{blanks1} minOccurs="0"/>'] seriesflags = (False,) if subgroup.name == 'control' else (False, True) for variable in subgroup: for series in seriesflags: name = f'{variable.name}.series' if series else variable.name subs.append(f'{blanks1} <element name="{name}"') if itemgroup == 'setitems': subs.append(f'{blanks2}type="hpcb:setitemType"') elif itemgroup == 'getitems': subs.append(f'{blanks2}type="hpcb:getitemType"') else: subs.append( f'{blanks2}type="hpcb:{model.name}_mathitemType"') subs.append(f'{blanks2}minOccurs="0"') subs.append(f'{blanks2}maxOccurs="unbounded"/>') subs.extend([ f'{blanks1} </sequence>', f'{blanks1} </complexType>', f'{blanks1}</element>']) return '\n'.join(subs)
Create a new mask object based on the given |numpy.ndarray| and return it. def array2mask(cls, array=None, **kwargs): """Create a new mask object based on the given |numpy.ndarray| and return it.""" kwargs['dtype'] = bool if array is None: return numpy.ndarray.__new__(cls, 0, **kwargs) return numpy.asarray(array, **kwargs).view(cls)
Return a new |DefaultMask| object associated with the given |Variable| object. def new(cls, variable, **kwargs): """Return a new |DefaultMask| object associated with the given |Variable| object.""" return cls.array2mask(numpy.full(variable.shape, True))
Return a new |IndexMask| object of the same shape as the parameter referenced by |property| |IndexMask.refindices|. Entries are only |True|, if the integer values of the respective entries of the referenced parameter are contained in the |IndexMask| class attribute tuple `RELEVANT_VALUES`. def new(cls, variable, **kwargs): """Return a new |IndexMask| object of the same shape as the parameter referenced by |property| |IndexMask.refindices|. Entries are only |True|, if the integer values of the respective entries of the referenced parameter are contained in the |IndexMask| class attribute tuple `RELEVANT_VALUES`. """ indices = cls.get_refindices(variable) if numpy.min(getattr(indices, 'values', 0)) < 1: raise RuntimeError( f'The mask of parameter {objecttools.elementphrase(variable)} ' f'cannot be determined, as long as parameter `{indices.name}` ' f'is not prepared properly.') mask = numpy.full(indices.shape, False, dtype=bool) refvalues = indices.values for relvalue in cls.RELEVANT_VALUES: mask[refvalues == relvalue] = True return cls.array2mask(mask, **kwargs)
A |list| of all currently relevant indices, calculated as an intercection of the (constant) class attribute `RELEVANT_VALUES` and the (variable) property |IndexMask.refindices|. def relevantindices(self) -> List[int]: """A |list| of all currently relevant indices, calculated as an intercection of the (constant) class attribute `RELEVANT_VALUES` and the (variable) property |IndexMask.refindices|.""" return [idx for idx in numpy.unique(self.refindices.values) if idx in self.RELEVANT_VALUES]
Determine the reference discharge within the given space-time interval. Required state sequences: |QZ| |QA| Calculated flux sequence: |QRef| Basic equation: :math:`QRef = \\frac{QZ_{new}+QZ_{old}+QA_{old}}{3}` Example: >>> from hydpy.models.lstream import * >>> parameterstep() >>> states.qz.new = 3.0 >>> states.qz.old = 2.0 >>> states.qa.old = 1.0 >>> model.calc_qref_v1() >>> fluxes.qref qref(2.0) def calc_qref_v1(self): """Determine the reference discharge within the given space-time interval. Required state sequences: |QZ| |QA| Calculated flux sequence: |QRef| Basic equation: :math:`QRef = \\frac{QZ_{new}+QZ_{old}+QA_{old}}{3}` Example: >>> from hydpy.models.lstream import * >>> parameterstep() >>> states.qz.new = 3.0 >>> states.qz.old = 2.0 >>> states.qa.old = 1.0 >>> model.calc_qref_v1() >>> fluxes.qref qref(2.0) """ new = self.sequences.states.fastaccess_new old = self.sequences.states.fastaccess_old flu = self.sequences.fluxes.fastaccess flu.qref = (new.qz+old.qz+old.qa)/3.
Determine the actual traveling time of the water (not of the wave!). Required derived parameter: |Sek| Required flux sequences: |AG| |QRef| Calculated flux sequence: |RK| Basic equation: :math:`RK = \\frac{Laen \\cdot A}{QRef}` Examples: First, note that the traveling time is determined in the unit of the actual simulation step size: >>> from hydpy.models.lstream import * >>> parameterstep() >>> laen(25.0) >>> derived.sek(24*60*60) >>> fluxes.ag = 10.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(2.893519) Second, for negative values or zero values of |AG| or |QRef|, the value of |RK| is set to zero: >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) def calc_rk_v1(self): """Determine the actual traveling time of the water (not of the wave!). Required derived parameter: |Sek| Required flux sequences: |AG| |QRef| Calculated flux sequence: |RK| Basic equation: :math:`RK = \\frac{Laen \\cdot A}{QRef}` Examples: First, note that the traveling time is determined in the unit of the actual simulation step size: >>> from hydpy.models.lstream import * >>> parameterstep() >>> laen(25.0) >>> derived.sek(24*60*60) >>> fluxes.ag = 10.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(2.893519) Second, for negative values or zero values of |AG| or |QRef|, the value of |RK| is set to zero: >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) >>> fluxes.ag = 0.0 >>> fluxes.qref = 1.0 >>> model.calc_rk_v1() >>> fluxes.rk rk(0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess if (flu.ag > 0.) and (flu.qref > 0.): flu.rk = (1000.*con.laen*flu.ag)/(der.sek*flu.qref) else: flu.rk = 0.
Calculate the flown through area and the wetted perimeter of the main channel. Note that the main channel is assumed to have identical slopes on both sides and that water flowing exactly above the main channel is contributing to |AM|. Both theoretical surfaces seperating water above the main channel from water above both forelands are contributing to |UM|. Required control parameters: |HM| |BM| |BNM| Required flux sequence: |H| Calculated flux sequence: |AM| |UM| Examples: Generally, a trapezoid with reflection symmetry is assumed. Here its smaller base (bottom) has a length of 2 meters, its legs show an inclination of 1 meter per 4 meters, and its height (depths) is 1 meter: >>> from hydpy.models.lstream import * >>> parameterstep() >>> bm(2.0) >>> bnm(4.0) >>> hm(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_am_um_v1() >>> fluxes.am am(2.0) >>> fluxes.um um(6.123106) The second example deals with high flow conditions, where water flows over the foreland also (|H| > |HM|): >>> fluxes.h = 1.5 >>> model.calc_am_um_v1() >>> fluxes.am am(11.0) >>> fluxes.um um(11.246211) The third example checks the special case of a main channel with zero height: >>> hm(0.0) >>> model.calc_am_um_v1() >>> fluxes.am am(3.0) >>> fluxes.um um(5.0) The fourth example checks the special case of the actual water stage not being larger than zero (empty channel): >>> fluxes.h = 0.0 >>> hm(1.0) >>> model.calc_am_um_v1() >>> fluxes.am am(0.0) >>> fluxes.um um(0.0) def calc_am_um_v1(self): """Calculate the flown through area and the wetted perimeter of the main channel. Note that the main channel is assumed to have identical slopes on both sides and that water flowing exactly above the main channel is contributing to |AM|. Both theoretical surfaces seperating water above the main channel from water above both forelands are contributing to |UM|. Required control parameters: |HM| |BM| |BNM| Required flux sequence: |H| Calculated flux sequence: |AM| |UM| Examples: Generally, a trapezoid with reflection symmetry is assumed. Here its smaller base (bottom) has a length of 2 meters, its legs show an inclination of 1 meter per 4 meters, and its height (depths) is 1 meter: >>> from hydpy.models.lstream import * >>> parameterstep() >>> bm(2.0) >>> bnm(4.0) >>> hm(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_am_um_v1() >>> fluxes.am am(2.0) >>> fluxes.um um(6.123106) The second example deals with high flow conditions, where water flows over the foreland also (|H| > |HM|): >>> fluxes.h = 1.5 >>> model.calc_am_um_v1() >>> fluxes.am am(11.0) >>> fluxes.um um(11.246211) The third example checks the special case of a main channel with zero height: >>> hm(0.0) >>> model.calc_am_um_v1() >>> fluxes.am am(3.0) >>> fluxes.um um(5.0) The fourth example checks the special case of the actual water stage not being larger than zero (empty channel): >>> fluxes.h = 0.0 >>> hm(1.0) >>> model.calc_am_um_v1() >>> fluxes.am am(0.0) >>> fluxes.um um(0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess if flu.h <= 0.: flu.am = 0. flu.um = 0. elif flu.h < con.hm: flu.am = flu.h*(con.bm+flu.h*con.bnm) flu.um = con.bm+2.*flu.h*(1.+con.bnm**2)**.5 else: flu.am = (con.hm*(con.bm+con.hm*con.bnm) + ((flu.h-con.hm)*(con.bm+2.*con.hm*con.bnm))) flu.um = con.bm+(2.*con.hm*(1.+con.bnm**2)**.5)+(2*(flu.h-con.hm))
Calculate the discharge of the main channel after Manning-Strickler. Required control parameters: |EKM| |SKM| |Gef| Required flux sequence: |AM| |UM| Calculated flux sequence: |lstream_fluxes.QM| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekm(2.0) >>> skm(50.0) >>> gef(0.01) >>> fluxes.am = 3.0 >>> fluxes.um = 7.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.am = -1.0 >>> fluxes.um = 7.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(0.0) >>> fluxes.am = 3.0 >>> fluxes.um = 0.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(0.0) def calc_qm_v1(self): """Calculate the discharge of the main channel after Manning-Strickler. Required control parameters: |EKM| |SKM| |Gef| Required flux sequence: |AM| |UM| Calculated flux sequence: |lstream_fluxes.QM| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekm(2.0) >>> skm(50.0) >>> gef(0.01) >>> fluxes.am = 3.0 >>> fluxes.um = 7.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.am = -1.0 >>> fluxes.um = 7.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(0.0) >>> fluxes.am = 3.0 >>> fluxes.um = 0.0 >>> model.calc_qm_v1() >>> fluxes.qm qm(0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess if (flu.am > 0.) and (flu.um > 0.): flu.qm = con.ekm*con.skm*flu.am**(5./3.)/flu.um**(2./3.)*con.gef**.5 else: flu.qm = 0.
Calculate the flown through area and the wetted perimeter of both forelands. Note that the each foreland lies between the main channel and one outer embankment and that water flowing exactly above the a foreland is contributing to |AV|. The theoretical surface seperating water above the main channel from water above the foreland is not contributing to |UV|, but the surface seperating water above the foreland from water above its outer embankment is contributing to |UV|. Required control parameters: |HM| |BV| |BNV| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AV| |UV| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bv(2.0) >>> bnv(4.0) >>> derived.hv(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(0.0, 0.0) >>> fluxes.uv uv(0.0, 0.0) The second example deals with moderate high flow conditions, where water flows over both forelands, but not over their embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(1.5, 1.5) >>> fluxes.uv uv(4.061553, 4.061553) The third example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(7.0, 7.0) >>> fluxes.uv uv(6.623106, 6.623106) The forth example assures that zero widths or hights of the forelands are handled properly: >>> bv.left = 0.0 >>> derived.hv.right = 0.0 >>> model.calc_av_uv_v1() >>> fluxes.av av(4.0, 3.0) >>> fluxes.uv uv(4.623106, 3.5) def calc_av_uv_v1(self): """Calculate the flown through area and the wetted perimeter of both forelands. Note that the each foreland lies between the main channel and one outer embankment and that water flowing exactly above the a foreland is contributing to |AV|. The theoretical surface seperating water above the main channel from water above the foreland is not contributing to |UV|, but the surface seperating water above the foreland from water above its outer embankment is contributing to |UV|. Required control parameters: |HM| |BV| |BNV| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AV| |UV| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bv(2.0) >>> bnv(4.0) >>> derived.hv(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(0.0, 0.0) >>> fluxes.uv uv(0.0, 0.0) The second example deals with moderate high flow conditions, where water flows over both forelands, but not over their embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(1.5, 1.5) >>> fluxes.uv uv(4.061553, 4.061553) The third example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(7.0, 7.0) >>> fluxes.uv uv(6.623106, 6.623106) The forth example assures that zero widths or hights of the forelands are handled properly: >>> bv.left = 0.0 >>> derived.hv.right = 0.0 >>> model.calc_av_uv_v1() >>> fluxes.av av(4.0, 3.0) >>> fluxes.uv uv(4.623106, 3.5) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for i in range(2): if flu.h <= con.hm: flu.av[i] = 0. flu.uv[i] = 0. elif flu.h <= (con.hm+der.hv[i]): flu.av[i] = (flu.h-con.hm)*(con.bv[i]+(flu.h-con.hm)*con.bnv[i]/2.) flu.uv[i] = con.bv[i]+(flu.h-con.hm)*(1.+con.bnv[i]**2)**.5 else: flu.av[i] = (der.hv[i]*(con.bv[i]+der.hv[i]*con.bnv[i]/2.) + ((flu.h-(con.hm+der.hv[i])) * (con.bv[i]+der.hv[i]*con.bnv[i]))) flu.uv[i] = ((con.bv[i])+(der.hv[i]*(1.+con.bnv[i]**2)**.5) + (flu.h-(con.hm+der.hv[i])))
Calculate the discharge of both forelands after Manning-Strickler. Required control parameters: |EKV| |SKV| |Gef| Required flux sequence: |AV| |UV| Calculated flux sequence: |lstream_fluxes.QV| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekv(2.0) >>> skv(50.0) >>> gef(0.01) >>> fluxes.av = 3.0 >>> fluxes.uv = 7.0 >>> model.calc_qv_v1() >>> fluxes.qv qv(17.053102, 17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.av = -1.0, 3.0 >>> fluxes.uv = 7.0, 0.0 >>> model.calc_qv_v1() >>> fluxes.qv qv(0.0, 0.0) def calc_qv_v1(self): """Calculate the discharge of both forelands after Manning-Strickler. Required control parameters: |EKV| |SKV| |Gef| Required flux sequence: |AV| |UV| Calculated flux sequence: |lstream_fluxes.QV| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekv(2.0) >>> skv(50.0) >>> gef(0.01) >>> fluxes.av = 3.0 >>> fluxes.uv = 7.0 >>> model.calc_qv_v1() >>> fluxes.qv qv(17.053102, 17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.av = -1.0, 3.0 >>> fluxes.uv = 7.0, 0.0 >>> model.calc_qv_v1() >>> fluxes.qv qv(0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for i in range(2): if (flu.av[i] > 0.) and (flu.uv[i] > 0.): flu.qv[i] = (con.ekv[i]*con.skv[i] * flu.av[i]**(5./3.)/flu.uv[i]**(2./3.)*con.gef**.5) else: flu.qv[i] = 0.
Calculate the flown through area and the wetted perimeter of both outer embankments. Note that each outer embankment lies beyond its foreland and that all water flowing exactly above the a embankment is added to |AVR|. The theoretical surface seperating water above the foreland from water above its embankment is not contributing to |UVR|. Required control parameters: |HM| |BNVR| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AVR| |UVR| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. Generally, a triangles are assumed, with the vertical side seperating the foreland from its outer embankment. Here, for simplicity, both forelands are assumed to be symmetrical. Their inclinations are 1 meter per 4 meters and their lowest point is 1 meter above the forelands bottom and 2 meters above the main channels bottom: >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bnvr(4.0) >>> derived.hv(1.0) The first example deals with moderate high flow conditions, where water flows over the forelands, but not over their outer embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_avr_uvr_v1() >>> fluxes.avr avr(0.0, 0.0) >>> fluxes.uvr uvr(0.0, 0.0) The second example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_avr_uvr_v1() >>> fluxes.avr avr(0.5, 0.5) >>> fluxes.uvr uvr(2.061553, 2.061553) def calc_avr_uvr_v1(self): """Calculate the flown through area and the wetted perimeter of both outer embankments. Note that each outer embankment lies beyond its foreland and that all water flowing exactly above the a embankment is added to |AVR|. The theoretical surface seperating water above the foreland from water above its embankment is not contributing to |UVR|. Required control parameters: |HM| |BNVR| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AVR| |UVR| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. Generally, a triangles are assumed, with the vertical side seperating the foreland from its outer embankment. Here, for simplicity, both forelands are assumed to be symmetrical. Their inclinations are 1 meter per 4 meters and their lowest point is 1 meter above the forelands bottom and 2 meters above the main channels bottom: >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bnvr(4.0) >>> derived.hv(1.0) The first example deals with moderate high flow conditions, where water flows over the forelands, but not over their outer embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_avr_uvr_v1() >>> fluxes.avr avr(0.0, 0.0) >>> fluxes.uvr uvr(0.0, 0.0) The second example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_avr_uvr_v1() >>> fluxes.avr avr(0.5, 0.5) >>> fluxes.uvr uvr(2.061553, 2.061553) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for i in range(2): if flu.h <= (con.hm+der.hv[i]): flu.avr[i] = 0. flu.uvr[i] = 0. else: flu.avr[i] = (flu.h-(con.hm+der.hv[i]))**2*con.bnvr[i]/2. flu.uvr[i] = (flu.h-(con.hm+der.hv[i]))*(1.+con.bnvr[i]**2)**.5
Calculate the discharge of both outer embankments after Manning-Strickler. Required control parameters: |EKV| |SKV| |Gef| Required flux sequence: |AVR| |UVR| Calculated flux sequence: |QVR| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekv(2.0) >>> skv(50.0) >>> gef(0.01) >>> fluxes.avr = 3.0 >>> fluxes.uvr = 7.0 >>> model.calc_qvr_v1() >>> fluxes.qvr qvr(17.053102, 17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.avr = -1.0, 3.0 >>> fluxes.uvr = 7.0, 0.0 >>> model.calc_qvr_v1() >>> fluxes.qvr qvr(0.0, 0.0) def calc_qvr_v1(self): """Calculate the discharge of both outer embankments after Manning-Strickler. Required control parameters: |EKV| |SKV| |Gef| Required flux sequence: |AVR| |UVR| Calculated flux sequence: |QVR| Examples: For appropriate strictly positive values: >>> from hydpy.models.lstream import * >>> parameterstep() >>> ekv(2.0) >>> skv(50.0) >>> gef(0.01) >>> fluxes.avr = 3.0 >>> fluxes.uvr = 7.0 >>> model.calc_qvr_v1() >>> fluxes.qvr qvr(17.053102, 17.053102) For zero or negative values of the flown through surface or the wetted perimeter: >>> fluxes.avr = -1.0, 3.0 >>> fluxes.uvr = 7.0, 0.0 >>> model.calc_qvr_v1() >>> fluxes.qvr qvr(0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for i in range(2): if (flu.avr[i] > 0.) and (flu.uvr[i] > 0.): flu.qvr[i] = (con.ekv[i]*con.skv[i] * flu.avr[i]**(5./3.)/flu.uvr[i]**(2./3.)*con.gef**.5) else: flu.qvr[i] = 0.
Sum the through flown area of the total cross section. Required flux sequences: |AM| |AV| |AVR| Calculated flux sequence: |AG| Example: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.am = 1.0 >>> fluxes.av= 2.0, 3.0 >>> fluxes.avr = 4.0, 5.0 >>> model.calc_ag_v1() >>> fluxes.ag ag(15.0) def calc_ag_v1(self): """Sum the through flown area of the total cross section. Required flux sequences: |AM| |AV| |AVR| Calculated flux sequence: |AG| Example: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.am = 1.0 >>> fluxes.av= 2.0, 3.0 >>> fluxes.avr = 4.0, 5.0 >>> model.calc_ag_v1() >>> fluxes.ag ag(15.0) """ flu = self.sequences.fluxes.fastaccess flu.ag = flu.am+flu.av[0]+flu.av[1]+flu.avr[0]+flu.avr[1]
Calculate the discharge of the total cross section. Method |calc_qg_v1| applies the actual versions of all methods for calculating the flown through areas, wetted perimeters and discharges of the different cross section compartments. Hence its requirements might be different for various application models. def calc_qg_v1(self): """Calculate the discharge of the total cross section. Method |calc_qg_v1| applies the actual versions of all methods for calculating the flown through areas, wetted perimeters and discharges of the different cross section compartments. Hence its requirements might be different for various application models. """ flu = self.sequences.fluxes.fastaccess self.calc_am_um() self.calc_qm() self.calc_av_uv() self.calc_qv() self.calc_avr_uvr() self.calc_qvr() flu.qg = flu.qm+flu.qv[0]+flu.qv[1]+flu.qvr[0]+flu.qvr[1]
Determine an starting interval for iteration methods as the one implemented in method |calc_h_v1|. The resulting interval is determined in a manner, that on the one hand :math:`Qmin \\leq QRef \\leq Qmax` is fulfilled and on the other hand the results of method |calc_qg_v1| are continuous for :math:`Hmin \\leq H \\leq Hmax`. Required control parameter: |HM| Required derived parameters: |HV| |lstream_derived.QM| |lstream_derived.QV| Required flux sequence: |QRef| Calculated aide sequences: |HMin| |HMax| |QMin| |QMax| Besides the mentioned required parameters and sequences, those of the actual method for calculating the discharge of the total cross section might be required. This is the case whenever water flows on both outer embankments. In such occasions no previously determined upper boundary values are available and method |calc_hmin_qmin_hmax_qmax_v1| needs to increase the value of :math:`HMax` successively until the condition :math:`QG \\leq QMax` is met. def calc_hmin_qmin_hmax_qmax_v1(self): """Determine an starting interval for iteration methods as the one implemented in method |calc_h_v1|. The resulting interval is determined in a manner, that on the one hand :math:`Qmin \\leq QRef \\leq Qmax` is fulfilled and on the other hand the results of method |calc_qg_v1| are continuous for :math:`Hmin \\leq H \\leq Hmax`. Required control parameter: |HM| Required derived parameters: |HV| |lstream_derived.QM| |lstream_derived.QV| Required flux sequence: |QRef| Calculated aide sequences: |HMin| |HMax| |QMin| |QMax| Besides the mentioned required parameters and sequences, those of the actual method for calculating the discharge of the total cross section might be required. This is the case whenever water flows on both outer embankments. In such occasions no previously determined upper boundary values are available and method |calc_hmin_qmin_hmax_qmax_v1| needs to increase the value of :math:`HMax` successively until the condition :math:`QG \\leq QMax` is met. """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess aid = self.sequences.aides.fastaccess if flu.qref <= der.qm: aid.hmin = 0. aid.qmin = 0. aid.hmax = con.hm aid.qmax = der.qm elif flu.qref <= min(der.qv[0], der.qv[1]): aid.hmin = con.hm aid.qmin = der.qm aid.hmax = con.hm+min(der.hv[0], der.hv[1]) aid.qmax = min(der.qv[0], der.qv[1]) elif flu.qref < max(der.qv[0], der.qv[1]): aid.hmin = con.hm+min(der.hv[0], der.hv[1]) aid.qmin = min(der.qv[0], der.qv[1]) aid.hmax = con.hm+max(der.hv[0], der.hv[1]) aid.qmax = max(der.qv[0], der.qv[1]) else: flu.h = con.hm+max(der.hv[0], der.hv[1]) aid.hmin = flu.h aid.qmin = flu.qg while True: flu.h *= 2. self.calc_qg() if flu.qg < flu.qref: aid.hmin = flu.h aid.qmin = flu.qg else: aid.hmax = flu.h aid.qmax = flu.qg break
Approximate the water stage resulting in a certain reference discarge with the Pegasus iteration method. Required control parameters: |QTol| |HTol| Required flux sequence: |QRef| Modified aide sequences: |HMin| |HMax| |QMin| |QMax| Calculated flux sequence: |H| Besides the parameters and sequences given above, those of the actual method for calculating the discharge of the total cross section are required. Examples: Essentially, the Pegasus method is a root finding algorithm which sequentially decreases its search radius (like the simple bisection algorithm) and shows superlinear convergence properties (like the Newton-Raphson algorithm). Ideally, its convergence should be proved for each application model to be derived from HydPy-L-Stream. The following examples focus on the methods |calc_hmin_qmin_hmax_qmax_v1| and |calc_qg_v1| (including their submethods) only: >>> from hydpy.models.lstream import * >>> parameterstep() >>> model.calc_hmin_qmin_hmax_qmax = model.calc_hmin_qmin_hmax_qmax_v1 >>> model.calc_qg = model.calc_qg_v1 >>> model.calc_qm = model.calc_qm_v1 >>> model.calc_av_uv = model.calc_av_uv_v1 >>> model.calc_qv = model.calc_qv_v1 >>> model.calc_avr_uvr = model.calc_avr_uvr_v1 >>> model.calc_qvr = model.calc_qvr_v1 Define the geometry and roughness values for the first test channel: >>> bm(2.0) >>> bnm(4.0) >>> hm(1.0) >>> bv(0.5, 10.0) >>> bbv(1.0, 2.0) >>> bnv(1.0, 8.0) >>> bnvr(20.0) >>> ekm(1.0) >>> skm(20.0) >>> ekv(1.0) >>> skv(60.0, 80.0) >>> gef(0.01) Set the error tolerances of the iteration small enough to not compromise the shown first six decimal places of the following results: >>> qtol(1e-10) >>> htol(1e-10) Derive the required secondary parameters: >>> derived.hv.update() >>> derived.qm.update() >>> derived.qv.update() Define a test function, accepting a reference discharge and printing both the approximated water stage and the related discharge value: >>> def test(qref): ... fluxes.qref = qref ... model.calc_hmin_qmin_hmax_qmax() ... model.calc_h() ... print(repr(fluxes.h)) ... print(repr(fluxes.qg)) Zero discharge and the following discharge values are related to the only discontinuities of the given root finding problem: >>> derived.qm qm(8.399238) >>> derived.qv qv(left=154.463234, right=23.073584) The related water stages are the ones (directly or indirectly) defined above: >>> test(0.0) h(0.0) qg(0.0) >>> test(derived.qm) h(1.0) qg(8.399238) >>> test(derived.qv.left) h(2.0) qg(154.463234) >>> test(derived.qv.right) h(1.25) qg(23.073584) Test some intermediate water stages, inundating the only the main channel, the main channel along with the right foreland, and the main channel along with both forelands respectively: >>> test(6.0) h(0.859452) qg(6.0) >>> test(10.0) h(1.047546) qg(10.0) >>> test(100.0) h(1.77455) qg(100.0) Finally, test two extreme water stages, inundating both outer foreland embankments: >>> test(200.0) h(2.152893) qg(200.0) >>> test(2000.0) h(4.240063) qg(2000.0) There is a potential risk of the implemented iteration method to fail for special channel geometries. To test such cases in a more condensed manner, the following test methods evaluates different water stages automatically in accordance with the example above. An error message is printed only, the estimated discharge does not approximate the reference discharge with six decimal places: >>> def test(): ... derived.hv.update() ... derived.qm.update() ... derived.qv.update() ... qm, qv = derived.qm, derived.qv ... for qref in [0.0, qm, qv.left, qv.right, ... 2.0/3.0*qm+1.0/3.0*min(qv), ... 2.0/3.0*min(qv)+1.0/3.0*max(qv), ... 3.0*max(qv), 30.0*max(qv)]: ... fluxes.qref = qref ... model.calc_hmin_qmin_hmax_qmax() ... model.calc_h() ... if abs(round(fluxes.qg-qref) > 0.0): ... print('Error!', 'qref:', qref, 'qg:', fluxes.qg) Check for a triangle main channel: >>> bm(0.0) >>> test() >>> bm(2.0) Check for a completely flat main channel: >>> hm(0.0) >>> test() Repeat the last example but with a decreased value of |QTol| allowing to trigger another stopping mechanisms if the iteration algorithm: >>> qtol(0.0) >>> test() >>> hm(1.0) >>> qtol(1e-10) Check for a nonexistend main channel: >>> bm(0.0) >>> bnm(0.0) >>> test() >>> bm(2.0) >>> bnm(4.0) Check for a nonexistend forelands: >>> bv(0.0) >>> bbv(0.0) >>> test() >>> bv(0.5, 10.0) >>> bbv(1., 2.0) Check for nonexistend outer foreland embankments: >>> bnvr(0.0) >>> test() To take the last test as an illustrative example, one can see that the given reference discharge is met by the estimated total discharge, which consists of components related to the main channel and the forelands only: >>> fluxes.qref qref(3932.452785) >>> fluxes.qg qg(3932.452785) >>> fluxes.qm qm(530.074621) >>> fluxes.qv qv(113.780226, 3288.597937) >>> fluxes.qvr qvr(0.0, 0.0) def calc_h_v1(self): """Approximate the water stage resulting in a certain reference discarge with the Pegasus iteration method. Required control parameters: |QTol| |HTol| Required flux sequence: |QRef| Modified aide sequences: |HMin| |HMax| |QMin| |QMax| Calculated flux sequence: |H| Besides the parameters and sequences given above, those of the actual method for calculating the discharge of the total cross section are required. Examples: Essentially, the Pegasus method is a root finding algorithm which sequentially decreases its search radius (like the simple bisection algorithm) and shows superlinear convergence properties (like the Newton-Raphson algorithm). Ideally, its convergence should be proved for each application model to be derived from HydPy-L-Stream. The following examples focus on the methods |calc_hmin_qmin_hmax_qmax_v1| and |calc_qg_v1| (including their submethods) only: >>> from hydpy.models.lstream import * >>> parameterstep() >>> model.calc_hmin_qmin_hmax_qmax = model.calc_hmin_qmin_hmax_qmax_v1 >>> model.calc_qg = model.calc_qg_v1 >>> model.calc_qm = model.calc_qm_v1 >>> model.calc_av_uv = model.calc_av_uv_v1 >>> model.calc_qv = model.calc_qv_v1 >>> model.calc_avr_uvr = model.calc_avr_uvr_v1 >>> model.calc_qvr = model.calc_qvr_v1 Define the geometry and roughness values for the first test channel: >>> bm(2.0) >>> bnm(4.0) >>> hm(1.0) >>> bv(0.5, 10.0) >>> bbv(1.0, 2.0) >>> bnv(1.0, 8.0) >>> bnvr(20.0) >>> ekm(1.0) >>> skm(20.0) >>> ekv(1.0) >>> skv(60.0, 80.0) >>> gef(0.01) Set the error tolerances of the iteration small enough to not compromise the shown first six decimal places of the following results: >>> qtol(1e-10) >>> htol(1e-10) Derive the required secondary parameters: >>> derived.hv.update() >>> derived.qm.update() >>> derived.qv.update() Define a test function, accepting a reference discharge and printing both the approximated water stage and the related discharge value: >>> def test(qref): ... fluxes.qref = qref ... model.calc_hmin_qmin_hmax_qmax() ... model.calc_h() ... print(repr(fluxes.h)) ... print(repr(fluxes.qg)) Zero discharge and the following discharge values are related to the only discontinuities of the given root finding problem: >>> derived.qm qm(8.399238) >>> derived.qv qv(left=154.463234, right=23.073584) The related water stages are the ones (directly or indirectly) defined above: >>> test(0.0) h(0.0) qg(0.0) >>> test(derived.qm) h(1.0) qg(8.399238) >>> test(derived.qv.left) h(2.0) qg(154.463234) >>> test(derived.qv.right) h(1.25) qg(23.073584) Test some intermediate water stages, inundating the only the main channel, the main channel along with the right foreland, and the main channel along with both forelands respectively: >>> test(6.0) h(0.859452) qg(6.0) >>> test(10.0) h(1.047546) qg(10.0) >>> test(100.0) h(1.77455) qg(100.0) Finally, test two extreme water stages, inundating both outer foreland embankments: >>> test(200.0) h(2.152893) qg(200.0) >>> test(2000.0) h(4.240063) qg(2000.0) There is a potential risk of the implemented iteration method to fail for special channel geometries. To test such cases in a more condensed manner, the following test methods evaluates different water stages automatically in accordance with the example above. An error message is printed only, the estimated discharge does not approximate the reference discharge with six decimal places: >>> def test(): ... derived.hv.update() ... derived.qm.update() ... derived.qv.update() ... qm, qv = derived.qm, derived.qv ... for qref in [0.0, qm, qv.left, qv.right, ... 2.0/3.0*qm+1.0/3.0*min(qv), ... 2.0/3.0*min(qv)+1.0/3.0*max(qv), ... 3.0*max(qv), 30.0*max(qv)]: ... fluxes.qref = qref ... model.calc_hmin_qmin_hmax_qmax() ... model.calc_h() ... if abs(round(fluxes.qg-qref) > 0.0): ... print('Error!', 'qref:', qref, 'qg:', fluxes.qg) Check for a triangle main channel: >>> bm(0.0) >>> test() >>> bm(2.0) Check for a completely flat main channel: >>> hm(0.0) >>> test() Repeat the last example but with a decreased value of |QTol| allowing to trigger another stopping mechanisms if the iteration algorithm: >>> qtol(0.0) >>> test() >>> hm(1.0) >>> qtol(1e-10) Check for a nonexistend main channel: >>> bm(0.0) >>> bnm(0.0) >>> test() >>> bm(2.0) >>> bnm(4.0) Check for a nonexistend forelands: >>> bv(0.0) >>> bbv(0.0) >>> test() >>> bv(0.5, 10.0) >>> bbv(1., 2.0) Check for nonexistend outer foreland embankments: >>> bnvr(0.0) >>> test() To take the last test as an illustrative example, one can see that the given reference discharge is met by the estimated total discharge, which consists of components related to the main channel and the forelands only: >>> fluxes.qref qref(3932.452785) >>> fluxes.qg qg(3932.452785) >>> fluxes.qm qm(530.074621) >>> fluxes.qv qv(113.780226, 3288.597937) >>> fluxes.qvr qvr(0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess aid = self.sequences.aides.fastaccess aid.qmin -= flu.qref aid.qmax -= flu.qref if modelutils.fabs(aid.qmin) < con.qtol: flu.h = aid.hmin self.calc_qg() elif modelutils.fabs(aid.qmax) < con.qtol: flu.h = aid.hmax self.calc_qg() elif modelutils.fabs(aid.hmax-aid.hmin) < con.htol: flu.h = (aid.hmin+aid.hmax)/2. self.calc_qg() else: while True: flu.h = aid.hmin-aid.qmin*(aid.hmax-aid.hmin)/(aid.qmax-aid.qmin) self.calc_qg() aid.qtest = flu.qg-flu.qref if modelutils.fabs(aid.qtest) < con.qtol: return if (((aid.qmax < 0.) and (aid.qtest < 0.)) or ((aid.qmax > 0.) and (aid.qtest > 0.))): aid.qmin *= aid.qmax/(aid.qmax+aid.qtest) else: aid.hmin = aid.hmax aid.qmin = aid.qmax aid.hmax = flu.h aid.qmax = aid.qtest if modelutils.fabs(aid.hmax-aid.hmin) < con.htol: return
Calculate outflow. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required flux sequence: |RK| Required state sequence: |QZ| Updated state sequence: |QA| Basic equation: :math:`QA_{neu} = QA_{alt} + (QZ_{alt}-QA_{alt}) \\cdot (1-exp(-RK^{-1})) + (QZ_{neu}-QZ_{alt}) \\cdot (1-RK\\cdot(1-exp(-RK^{-1})))` Examples: A normal test case: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.rk(0.1) >>> states.qz.old = 2.0 >>> states.qz.new = 4.0 >>> states.qa.old = 3.0 >>> model.calc_qa_v1() >>> states.qa qa(3.800054) First extreme test case (zero division is circumvented): >>> fluxes.rk(0.0) >>> model.calc_qa_v1() >>> states.qa qa(4.0) Second extreme test case (numerical overflow is circumvented): >>> fluxes.rk(1e201) >>> model.calc_qa_v1() >>> states.qa qa(5.0) def calc_qa_v1(self): """Calculate outflow. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required flux sequence: |RK| Required state sequence: |QZ| Updated state sequence: |QA| Basic equation: :math:`QA_{neu} = QA_{alt} + (QZ_{alt}-QA_{alt}) \\cdot (1-exp(-RK^{-1})) + (QZ_{neu}-QZ_{alt}) \\cdot (1-RK\\cdot(1-exp(-RK^{-1})))` Examples: A normal test case: >>> from hydpy.models.lstream import * >>> parameterstep() >>> fluxes.rk(0.1) >>> states.qz.old = 2.0 >>> states.qz.new = 4.0 >>> states.qa.old = 3.0 >>> model.calc_qa_v1() >>> states.qa qa(3.800054) First extreme test case (zero division is circumvented): >>> fluxes.rk(0.0) >>> model.calc_qa_v1() >>> states.qa qa(4.0) Second extreme test case (numerical overflow is circumvented): >>> fluxes.rk(1e201) >>> model.calc_qa_v1() >>> states.qa qa(5.0) """ flu = self.sequences.fluxes.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new aid = self.sequences.aides.fastaccess if flu.rk <= 0.: new.qa = new.qz elif flu.rk > 1e200: new.qa = old.qa+new.qz-old.qz else: aid.temp = (1.-modelutils.exp(-1./flu.rk)) new.qa = (old.qa + (old.qz-old.qa)*aid.temp + (new.qz-old.qz)*(1.-flu.rk*aid.temp))
Update inflow. def pick_q_v1(self): """Update inflow.""" sta = self.sequences.states.fastaccess inl = self.sequences.inlets.fastaccess sta.qz = 0. for idx in range(inl.len_q): sta.qz += inl.q[idx][0]
Update outflow. def pass_q_v1(self): """Update outflow.""" sta = self.sequences.states.fastaccess out = self.sequences.outlets.fastaccess out.q[0] += sta.qa
Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8) def calc_tc_v1(self): """Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.tc[k] = inp.t-con.tcalt[k]*(con.zonez[k]-con.zrelt)
Calculate the areal mean temperature of the subbasin. Required derived parameter: |RelZoneArea| Required flux sequence: |TC| Calculated flux sequences: |TMean| Examples: Prepare two zones, the first one being twice as large as the second one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> derived.relzonearea(2.0/3.0, 1.0/3.0) With temperature values of 5°C and 8°C of the respective zones, the mean temperature is 6°C: >>> fluxes.tc = 5.0, 8.0 >>> model.calc_tmean_v1() >>> fluxes.tmean tmean(6.0) def calc_tmean_v1(self): """Calculate the areal mean temperature of the subbasin. Required derived parameter: |RelZoneArea| Required flux sequence: |TC| Calculated flux sequences: |TMean| Examples: Prepare two zones, the first one being twice as large as the second one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> derived.relzonearea(2.0/3.0, 1.0/3.0) With temperature values of 5°C and 8°C of the respective zones, the mean temperature is 6°C: >>> fluxes.tc = 5.0, 8.0 >>> model.calc_tmean_v1() >>> fluxes.tmean tmean(6.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.tmean = 0. for k in range(con.nmbzones): flu.tmean += der.relzonearea[k]*flu.tc[k]
Determine the temperature-dependent fraction of (liquid) rainfall and (total) precipitation. Required control parameters: |NmbZones| |TT|, |TTInt| Required flux sequence: |TC| Calculated flux sequences: |FracRain| Basic equation: :math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}` Restriction: :math:`0 \\leq FracRain \\leq 1` Examples: The threshold temperature of seven zones is 0°C and the corresponding temperature interval of mixed precipitation 2°C: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(7) >>> tt(0.0) >>> ttint(2.0) The fraction of rainfall is zero below -1°C, is one above 1°C and increases linearly in between: >>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0 >>> model.calc_fracrain_v1() >>> fluxes.fracrain fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0) Note the special case of a zero temperature interval. With a actual temperature being equal to the threshold temperature, the rainfall fraction is one: >>> ttint(0.0) >>> model.calc_fracrain_v1() >>> fluxes.fracrain fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0) def calc_fracrain_v1(self): """Determine the temperature-dependent fraction of (liquid) rainfall and (total) precipitation. Required control parameters: |NmbZones| |TT|, |TTInt| Required flux sequence: |TC| Calculated flux sequences: |FracRain| Basic equation: :math:`FracRain = \\frac{TC-(TT-\\frac{TTInt}{2})}{TTInt}` Restriction: :math:`0 \\leq FracRain \\leq 1` Examples: The threshold temperature of seven zones is 0°C and the corresponding temperature interval of mixed precipitation 2°C: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(7) >>> tt(0.0) >>> ttint(2.0) The fraction of rainfall is zero below -1°C, is one above 1°C and increases linearly in between: >>> fluxes.tc = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0 >>> model.calc_fracrain_v1() >>> fluxes.fracrain fracrain(0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0) Note the special case of a zero temperature interval. With a actual temperature being equal to the threshold temperature, the rainfall fraction is one: >>> ttint(0.0) >>> model.calc_fracrain_v1() >>> fluxes.fracrain fracrain(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): if flu.tc[k] >= (con.tt[k]+con.ttint[k]/2.): flu.fracrain[k] = 1. elif flu.tc[k] <= (con.tt[k]-con.ttint[k]/2.): flu.fracrain[k] = 0. else: flu.fracrain[k] = ((flu.tc[k]-(con.tt[k]-con.ttint[k]/2.)) / con.ttint[k])
Calculate the corrected fractions rainfall/snowfall and total precipitation. Required control parameters: |NmbZones| |RfCF| |SfCF| Calculated flux sequences: |RfC| |SfC| Basic equations: :math:`RfC = RfCF \\cdot FracRain` \n :math:`SfC = SfCF \\cdot (1 - FracRain)` Examples: Assume five zones with different temperatures and hence different fractions of rainfall and total precipitation: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(5) >>> fluxes.fracrain = 0.0, 0.25, 0.5, 0.75, 1.0 With no rainfall and no snowfall correction (implied by the respective factors being one), the corrected fraction related to rainfall is identical with the original fraction and the corrected fraction related to snowfall behaves opposite: >>> rfcf(1.0) >>> sfcf(1.0) >>> model.calc_rfc_sfc_v1() >>> fluxes.rfc rfc(0.0, 0.25, 0.5, 0.75, 1.0) >>> fluxes.sfc sfc(1.0, 0.75, 0.5, 0.25, 0.0) With a negative rainfall correction of 20% and a positive snowfall correction of 20 % the corrected fractions are: >>> rfcf(0.8) >>> sfcf(1.2) >>> model.calc_rfc_sfc_v1() >>> fluxes.rfc rfc(0.0, 0.2, 0.4, 0.6, 0.8) >>> fluxes.sfc sfc(1.2, 0.9, 0.6, 0.3, 0.0) def calc_rfc_sfc_v1(self): """Calculate the corrected fractions rainfall/snowfall and total precipitation. Required control parameters: |NmbZones| |RfCF| |SfCF| Calculated flux sequences: |RfC| |SfC| Basic equations: :math:`RfC = RfCF \\cdot FracRain` \n :math:`SfC = SfCF \\cdot (1 - FracRain)` Examples: Assume five zones with different temperatures and hence different fractions of rainfall and total precipitation: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(5) >>> fluxes.fracrain = 0.0, 0.25, 0.5, 0.75, 1.0 With no rainfall and no snowfall correction (implied by the respective factors being one), the corrected fraction related to rainfall is identical with the original fraction and the corrected fraction related to snowfall behaves opposite: >>> rfcf(1.0) >>> sfcf(1.0) >>> model.calc_rfc_sfc_v1() >>> fluxes.rfc rfc(0.0, 0.25, 0.5, 0.75, 1.0) >>> fluxes.sfc sfc(1.0, 0.75, 0.5, 0.25, 0.0) With a negative rainfall correction of 20% and a positive snowfall correction of 20 % the corrected fractions are: >>> rfcf(0.8) >>> sfcf(1.2) >>> model.calc_rfc_sfc_v1() >>> fluxes.rfc rfc(0.0, 0.2, 0.4, 0.6, 0.8) >>> fluxes.sfc sfc(1.2, 0.9, 0.6, 0.3, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.rfc[k] = flu.fracrain[k]*con.rfcf[k] flu.sfc[k] = (1.-flu.fracrain[k])*con.sfcf[k]
Apply the precipitation correction factors and adjust precipitation to the altitude of the individual zones. Required control parameters: |NmbZones| |PCorr| |PCAlt| |ZoneZ| |ZRelP| Required input sequence: |P| Required flux sequences: |RfC| |SfC| Calculated flux sequences: |PC| Basic equation: :math:`PC = P \\cdot PCorr \\cdot (1+PCAlt \\cdot (ZoneZ-ZRelP)) \\cdot (RfC + SfC)` Examples: Five zones are at an elevation of 200 m. A precipitation value of 5 mm has been measured at a gauge at an elevation of 300 m: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(5) >>> zrelp(2.0) >>> zonez(3.0) >>> inputs.p = 5.0 The first four zones illustrate the individual precipitation corrections due to the general precipitation correction factor (|PCorr|, first zone), the altitude correction factor (|PCAlt|, second zone), the rainfall related correction (|RfC|, third zone), and the snowfall related correction factor (|SfC|, fourth zone). The fifth zone illustrates the interaction between all corrections: >>> pcorr(1.3, 1.0, 1.0, 1.0, 1.3) >>> pcalt(0.0, 0.1, 0.0, 0.0, 0.1) >>> fluxes.rfc = 0.5, 0.5, 0.4, 0.5, 0.4 >>> fluxes.sfc = 0.5, 0.5, 0.5, 0.7, 0.7 >>> model.calc_pc_v1() >>> fluxes.pc pc(6.5, 5.5, 4.5, 6.0, 7.865) Usually, one would set zero or positive values for parameter |PCAlt|. But it is also allowed to set negative values, in order to reflect possible negative relationships between precipitation and altitude. To prevent from calculating negative precipitation when too large negative values are applied, a truncation is performed: >>> pcalt(-1.0) >>> model.calc_pc_v1() >>> fluxes.pc pc(0.0, 0.0, 0.0, 0.0, 0.0) def calc_pc_v1(self): """Apply the precipitation correction factors and adjust precipitation to the altitude of the individual zones. Required control parameters: |NmbZones| |PCorr| |PCAlt| |ZoneZ| |ZRelP| Required input sequence: |P| Required flux sequences: |RfC| |SfC| Calculated flux sequences: |PC| Basic equation: :math:`PC = P \\cdot PCorr \\cdot (1+PCAlt \\cdot (ZoneZ-ZRelP)) \\cdot (RfC + SfC)` Examples: Five zones are at an elevation of 200 m. A precipitation value of 5 mm has been measured at a gauge at an elevation of 300 m: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(5) >>> zrelp(2.0) >>> zonez(3.0) >>> inputs.p = 5.0 The first four zones illustrate the individual precipitation corrections due to the general precipitation correction factor (|PCorr|, first zone), the altitude correction factor (|PCAlt|, second zone), the rainfall related correction (|RfC|, third zone), and the snowfall related correction factor (|SfC|, fourth zone). The fifth zone illustrates the interaction between all corrections: >>> pcorr(1.3, 1.0, 1.0, 1.0, 1.3) >>> pcalt(0.0, 0.1, 0.0, 0.0, 0.1) >>> fluxes.rfc = 0.5, 0.5, 0.4, 0.5, 0.4 >>> fluxes.sfc = 0.5, 0.5, 0.5, 0.7, 0.7 >>> model.calc_pc_v1() >>> fluxes.pc pc(6.5, 5.5, 4.5, 6.0, 7.865) Usually, one would set zero or positive values for parameter |PCAlt|. But it is also allowed to set negative values, in order to reflect possible negative relationships between precipitation and altitude. To prevent from calculating negative precipitation when too large negative values are applied, a truncation is performed: >>> pcalt(-1.0) >>> model.calc_pc_v1() >>> fluxes.pc pc(0.0, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.pc[k] = inp.p*(1.+con.pcalt[k]*(con.zonez[k]-con.zrelp)) if flu.pc[k] <= 0.: flu.pc[k] = 0. else: flu.pc[k] *= con.pcorr[k]*(flu.rfc[k]+flu.sfc[k])
Adjust potential norm evaporation to the actual temperature. Required control parameters: |NmbZones| |ETF| Required input sequence: |EPN| |TN| Required flux sequence: |TMean| Calculated flux sequences: |EP| Basic equation: :math:`EP = EPN \\cdot (1 + ETF \\cdot (TMean - TN))` Restriction: :math:`0 \\leq EP \\leq 2 \\cdot EPN` Examples: Assume four zones with different values of the temperature related factor for the adjustment of evaporation (the negative value of the first zone is not meaningful, but used for illustration purporses): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(4) >>> etf(-0.5, 0.0, 0.1, 0.5) >>> inputs.tn = 20.0 >>> inputs.epn = 2.0 With mean temperature equal to norm temperature, actual (uncorrected) evaporation is equal to norm evaporation: >>> fluxes.tmean = 20.0 >>> model.calc_ep_v1() >>> fluxes.ep ep(2.0, 2.0, 2.0, 2.0) With mean temperature 5°C higher than norm temperature, potential evaporation is increased by 1 mm for the third zone, which possesses a very common adjustment factor. For the first zone, potential evaporation is 0 mm (which is the smallest value allowed), and for the fourth zone it is the double value of the norm evaporation (which is the largest value allowed): >>> fluxes.tmean = 25.0 >>> model.calc_ep_v1() >>> fluxes.ep ep(0.0, 2.0, 3.0, 4.0) def calc_ep_v1(self): """Adjust potential norm evaporation to the actual temperature. Required control parameters: |NmbZones| |ETF| Required input sequence: |EPN| |TN| Required flux sequence: |TMean| Calculated flux sequences: |EP| Basic equation: :math:`EP = EPN \\cdot (1 + ETF \\cdot (TMean - TN))` Restriction: :math:`0 \\leq EP \\leq 2 \\cdot EPN` Examples: Assume four zones with different values of the temperature related factor for the adjustment of evaporation (the negative value of the first zone is not meaningful, but used for illustration purporses): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(4) >>> etf(-0.5, 0.0, 0.1, 0.5) >>> inputs.tn = 20.0 >>> inputs.epn = 2.0 With mean temperature equal to norm temperature, actual (uncorrected) evaporation is equal to norm evaporation: >>> fluxes.tmean = 20.0 >>> model.calc_ep_v1() >>> fluxes.ep ep(2.0, 2.0, 2.0, 2.0) With mean temperature 5°C higher than norm temperature, potential evaporation is increased by 1 mm for the third zone, which possesses a very common adjustment factor. For the first zone, potential evaporation is 0 mm (which is the smallest value allowed), and for the fourth zone it is the double value of the norm evaporation (which is the largest value allowed): >>> fluxes.tmean = 25.0 >>> model.calc_ep_v1() >>> fluxes.ep ep(0.0, 2.0, 3.0, 4.0) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.ep[k] = inp.epn*(1.+con.etf[k]*(flu.tmean-inp.tn)) flu.ep[k] = min(max(flu.ep[k], 0.), 2.*inp.epn)
Apply the evaporation correction factors and adjust evaporation to the altitude of the individual zones. Calculate the areal mean of (uncorrected) potential evaporation for the subbasin, adjust it to the individual zones in accordance with their heights and perform some corrections, among which one depends on the actual precipitation. Required control parameters: |NmbZones| |ECorr| |ECAlt| |ZoneZ| |ZRelE| |EPF| Required flux sequences: |EP| |PC| Calculated flux sequences: |EPC| Basic equation: :math:`EPC = EP \\cdot ECorr \\cdot (1+ECAlt \\cdot (ZoneZ-ZRelE)) \\cdot exp(-EPF \\cdot PC)` Examples: Four zones are at an elevation of 200 m. A (uncorrected) potential evaporation value of 2 mm and a (corrected) precipitation value of 5 mm have been determined for each zone beforehand: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(4) >>> zrele(2.0) >>> zonez(3.0) >>> fluxes.ep = 2.0 >>> fluxes.pc = 5.0 The first three zones illustrate the individual evaporation corrections due to the general evaporation correction factor (|ECorr|, first zone), the altitude correction factor (|ECAlt|, second zone), the precipitation related correction factor (|EPF|, third zone). The fourth zone illustrates the interaction between all corrections: >>> ecorr(1.3, 1.0, 1.0, 1.3) >>> ecalt(0.0, 0.1, 0.0, 0.1) >>> epf(0.0, 0.0, -numpy.log(0.7)/10.0, -numpy.log(0.7)/10.0) >>> model.calc_epc_v1() >>> fluxes.epc epc(2.6, 1.8, 1.4, 1.638) To prevent from calculating negative evaporation values when too large values for parameter |ECAlt| are set, a truncation is performed: >>> ecalt(2.0) >>> model.calc_epc_v1() >>> fluxes.epc epc(0.0, 0.0, 0.0, 0.0) def calc_epc_v1(self): """Apply the evaporation correction factors and adjust evaporation to the altitude of the individual zones. Calculate the areal mean of (uncorrected) potential evaporation for the subbasin, adjust it to the individual zones in accordance with their heights and perform some corrections, among which one depends on the actual precipitation. Required control parameters: |NmbZones| |ECorr| |ECAlt| |ZoneZ| |ZRelE| |EPF| Required flux sequences: |EP| |PC| Calculated flux sequences: |EPC| Basic equation: :math:`EPC = EP \\cdot ECorr \\cdot (1+ECAlt \\cdot (ZoneZ-ZRelE)) \\cdot exp(-EPF \\cdot PC)` Examples: Four zones are at an elevation of 200 m. A (uncorrected) potential evaporation value of 2 mm and a (corrected) precipitation value of 5 mm have been determined for each zone beforehand: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(4) >>> zrele(2.0) >>> zonez(3.0) >>> fluxes.ep = 2.0 >>> fluxes.pc = 5.0 The first three zones illustrate the individual evaporation corrections due to the general evaporation correction factor (|ECorr|, first zone), the altitude correction factor (|ECAlt|, second zone), the precipitation related correction factor (|EPF|, third zone). The fourth zone illustrates the interaction between all corrections: >>> ecorr(1.3, 1.0, 1.0, 1.3) >>> ecalt(0.0, 0.1, 0.0, 0.1) >>> epf(0.0, 0.0, -numpy.log(0.7)/10.0, -numpy.log(0.7)/10.0) >>> model.calc_epc_v1() >>> fluxes.epc epc(2.6, 1.8, 1.4, 1.638) To prevent from calculating negative evaporation values when too large values for parameter |ECAlt| are set, a truncation is performed: >>> ecalt(2.0) >>> model.calc_epc_v1() >>> fluxes.epc epc(0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.epc[k] = (flu.ep[k]*con.ecorr[k] * (1. - con.ecalt[k]*(con.zonez[k]-con.zrele))) if flu.epc[k] <= 0.: flu.epc[k] = 0. else: flu.epc[k] *= modelutils.exp(-con.epf[k]*flu.pc[k])
Calculate throughfall and update the interception storage accordingly. Required control parameters: |NmbZones| |ZoneType| |IcMax| Required flux sequences: |PC| Calculated fluxes sequences: |TF| Updated state sequence: |Ic| Basic equation: :math:`TF = \\Bigl \\lbrace { {PC \\ | \\ Ic = IcMax} \\atop {0 \\ | \\ Ic < IcMax} }` Examples: Initialize six zones of different types. Assume a generall maximum interception capacity of 2 mm. All zones receive a 0.5 mm input of precipitation: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD) >>> icmax(2.0) >>> fluxes.pc = 0.5 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() For glaciers (first zone) and internal lakes (second zone) the interception routine does not apply. Hence, all precipitation is routed as throughfall. For fields and forests, the interception routine is identical (usually, only larger capacities for forests are assumed, due to their higher leaf area index). Hence, the results of the third and the second zone are equal. The last three zones demonstrate, that all precipitation is stored until the interception capacity is reached; afterwards, all precepitation is routed as throughfall. Initial storage reduces the effective capacity of the respective simulation step: >>> states.ic ic(0.0, 0.0, 0.5, 0.5, 1.5, 2.0) >>> fluxes.tf tf(0.5, 0.5, 0.0, 0.0, 0.0, 0.5) A zero precipitation example: >>> fluxes.pc = 0.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) >>> fluxes.tf tf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) A high precipitation example: >>> fluxes.pc = 5.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() >>> states.ic ic(0.0, 0.0, 2.0, 2.0, 2.0, 2.0) >>> fluxes.tf tf(5.0, 5.0, 3.0, 3.0, 4.0, 5.0) def calc_tf_ic_v1(self): """Calculate throughfall and update the interception storage accordingly. Required control parameters: |NmbZones| |ZoneType| |IcMax| Required flux sequences: |PC| Calculated fluxes sequences: |TF| Updated state sequence: |Ic| Basic equation: :math:`TF = \\Bigl \\lbrace { {PC \\ | \\ Ic = IcMax} \\atop {0 \\ | \\ Ic < IcMax} }` Examples: Initialize six zones of different types. Assume a generall maximum interception capacity of 2 mm. All zones receive a 0.5 mm input of precipitation: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD) >>> icmax(2.0) >>> fluxes.pc = 0.5 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() For glaciers (first zone) and internal lakes (second zone) the interception routine does not apply. Hence, all precipitation is routed as throughfall. For fields and forests, the interception routine is identical (usually, only larger capacities for forests are assumed, due to their higher leaf area index). Hence, the results of the third and the second zone are equal. The last three zones demonstrate, that all precipitation is stored until the interception capacity is reached; afterwards, all precepitation is routed as throughfall. Initial storage reduces the effective capacity of the respective simulation step: >>> states.ic ic(0.0, 0.0, 0.5, 0.5, 1.5, 2.0) >>> fluxes.tf tf(0.5, 0.5, 0.0, 0.0, 0.0, 0.5) A zero precipitation example: >>> fluxes.pc = 0.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) >>> fluxes.tf tf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) A high precipitation example: >>> fluxes.pc = 5.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_tf_ic_v1() >>> states.ic ic(0.0, 0.0, 2.0, 2.0, 2.0, 2.0) >>> fluxes.tf tf(5.0, 5.0, 3.0, 3.0, 4.0, 5.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): flu.tf[k] = max(flu.pc[k]-(con.icmax[k]-sta.ic[k]), 0.) sta.ic[k] += flu.pc[k]-flu.tf[k] else: flu.tf[k] = flu.pc[k] sta.ic[k] = 0.
Calculate interception evaporation and update the interception storage accordingly. Required control parameters: |NmbZones| |ZoneType| Required flux sequences: |EPC| Calculated fluxes sequences: |EI| Updated state sequence: |Ic| Basic equation: :math:`EI = \\Bigl \\lbrace { {EPC \\ | \\ Ic > 0} \\atop {0 \\ | \\ Ic = 0} }` Examples: Initialize six zones of different types. For all zones a (corrected) potential evaporation of 0.5 mm is given: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD) >>> fluxes.epc = 0.5 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() For glaciers (first zone) and internal lakes (second zone) the interception routine does not apply. Hence, no interception evaporation can occur. For fields and forests, the interception routine is identical (usually, only larger capacities for forests are assumed, due to their higher leaf area index). Hence, the results of the third and the second zone are equal. The last three zones demonstrate, that all interception evaporation is equal to potential evaporation until the interception storage is empty; afterwards, interception evaporation is zero: >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 0.5, 1.5) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 0.5, 0.5) A zero evaporation example: >>> fluxes.epc = 0.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) A high evaporation example: >>> fluxes.epc = 5.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) def calc_ei_ic_v1(self): """Calculate interception evaporation and update the interception storage accordingly. Required control parameters: |NmbZones| |ZoneType| Required flux sequences: |EPC| Calculated fluxes sequences: |EI| Updated state sequence: |Ic| Basic equation: :math:`EI = \\Bigl \\lbrace { {EPC \\ | \\ Ic > 0} \\atop {0 \\ | \\ Ic = 0} }` Examples: Initialize six zones of different types. For all zones a (corrected) potential evaporation of 0.5 mm is given: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(GLACIER, ILAKE, FIELD, FOREST, FIELD, FIELD) >>> fluxes.epc = 0.5 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() For glaciers (first zone) and internal lakes (second zone) the interception routine does not apply. Hence, no interception evaporation can occur. For fields and forests, the interception routine is identical (usually, only larger capacities for forests are assumed, due to their higher leaf area index). Hence, the results of the third and the second zone are equal. The last three zones demonstrate, that all interception evaporation is equal to potential evaporation until the interception storage is empty; afterwards, interception evaporation is zero: >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 0.5, 1.5) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 0.5, 0.5) A zero evaporation example: >>> fluxes.epc = 0.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) A high evaporation example: >>> fluxes.epc = 5.0 >>> states.ic = 0.0, 0.0, 0.0, 0.0, 1.0, 2.0 >>> model.calc_ei_ic_v1() >>> states.ic ic(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> fluxes.ei ei(0.0, 0.0, 0.0, 0.0, 1.0, 2.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): flu.ei[k] = min(flu.epc[k], sta.ic[k]) sta.ic[k] -= flu.ei[k] else: flu.ei[k] = 0. sta.ic[k] = 0.
Add throughfall to the snow layer. Required control parameters: |NmbZones| |ZoneType| Required flux sequences: |TF| |RfC| |SfC| Updated state sequences: |WC| |SP| Basic equations: :math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n :math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}` Exemples: Consider the following setting, in which eight zones of different type receive a throughfall of 10mm: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(8) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD) >>> fluxes.tf = 10.0 >>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0 >>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0 >>> states.sp = 0.0 >>> states.wc = 0.0 >>> model.calc_sp_wc_v1() >>> states.sp sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0) >>> states.wc wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0) The snow routine does not apply for internal lakes, which is why both the ice storage and the water storage of the first zone remain unchanged. The snow routine is identical for glaciers, fields and forests in the current context, which is why the results of the second, third, and fourth zone are equal. The last four zones illustrate that the corrected snowfall fraction as well as the corrected rainfall fraction are applied in a relative manner, as the total amount of water yield has been corrected in the interception module already. When both factors are zero, the neither the water nor the ice content of the snow layer changes: >>> fluxes.sfc = 0.0 >>> fluxes.rfc = 0.0 >>> states.sp = 2.0 >>> states.wc = 0.0 >>> model.calc_sp_wc_v1() >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) def calc_sp_wc_v1(self): """Add throughfall to the snow layer. Required control parameters: |NmbZones| |ZoneType| Required flux sequences: |TF| |RfC| |SfC| Updated state sequences: |WC| |SP| Basic equations: :math:`\\frac{dSP}{dt} = TF \\cdot \\frac{SfC}{SfC+RfC}` \n :math:`\\frac{dWC}{dt} = TF \\cdot \\frac{RfC}{SfC+RfC}` Exemples: Consider the following setting, in which eight zones of different type receive a throughfall of 10mm: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(8) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD, FIELD) >>> fluxes.tf = 10.0 >>> fluxes.sfc = 0.5, 0.5, 0.5, 0.5, 0.2, 0.8, 1.0, 4.0 >>> fluxes.rfc = 0.5, 0.5, 0.5, 0.5, 0.8, 0.2, 4.0, 1.0 >>> states.sp = 0.0 >>> states.wc = 0.0 >>> model.calc_sp_wc_v1() >>> states.sp sp(0.0, 5.0, 5.0, 5.0, 2.0, 8.0, 2.0, 8.0) >>> states.wc wc(0.0, 5.0, 5.0, 5.0, 8.0, 2.0, 8.0, 2.0) The snow routine does not apply for internal lakes, which is why both the ice storage and the water storage of the first zone remain unchanged. The snow routine is identical for glaciers, fields and forests in the current context, which is why the results of the second, third, and fourth zone are equal. The last four zones illustrate that the corrected snowfall fraction as well as the corrected rainfall fraction are applied in a relative manner, as the total amount of water yield has been corrected in the interception module already. When both factors are zero, the neither the water nor the ice content of the snow layer changes: >>> fluxes.sfc = 0.0 >>> fluxes.rfc = 0.0 >>> states.sp = 2.0 >>> states.wc = 0.0 >>> model.calc_sp_wc_v1() >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: if (flu.rfc[k]+flu.sfc[k]) > 0.: sta.wc[k] += flu.tf[k]*flu.rfc[k]/(flu.rfc[k]+flu.sfc[k]) sta.sp[k] += flu.tf[k]*flu.sfc[k]/(flu.rfc[k]+flu.sfc[k]) else: sta.wc[k] = 0. sta.sp[k] = 0.
Calculate refreezing of the water content within the snow layer and update both the snow layers ice and the water content. Required control parameters: |NmbZones| |ZoneType| |CFMax| |CFR| Required derived parameter: |TTM| Required flux sequences: |TC| Calculated fluxes sequences: |Refr| Required state sequence: |WC| Updated state sequence: |SP| Basic equations: :math:`\\frac{dSP}{dt} = + Refr` \n :math:`\\frac{dWC}{dt} = - Refr` \n :math:`Refr = min(cfr \\cdot cfmax \\cdot (TTM-TC), WC)` Examples: Six zones are initialized with the same threshold temperature, degree day factor and refreezing coefficient, but with different zone types and initial states: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> cfmax(4.0) >>> cfr(0.1) >>> derived.ttm = 2.0 >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> cfmax cfmax(4.0) >>> cfmax.values array([ 2., 2., 2., 2., 2., 2.]) When the actual temperature is equal to the threshold temperature for melting and refreezing, neither no refreezing occurs and the states remain unchanged: >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) The same holds true for an actual temperature higher than the threshold temperature: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C above the threshold temperature, only melting can occur. Actual melting is consistent with potential melting, except for the first zone, which is an internal lake, and the last two zones, for which potential melting exceeds the available frozen water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 5.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C below the threshold temperature, refreezing can occur. Actual refreezing is consistent with potential refreezing, except for the first zone, which is an internal lake, and the last two zones, for which potential refreezing exceeds the available liquid water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = -1.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.6, 0.6, 0.6, 0.5, 0.0) >>> states.sp sp(0.0, 2.6, 2.6, 2.6, 2.5, 2.0) >>> states.wc wc(0.0, 0.4, 0.4, 0.4, 0.0, 0.0) def calc_refr_sp_wc_v1(self): """Calculate refreezing of the water content within the snow layer and update both the snow layers ice and the water content. Required control parameters: |NmbZones| |ZoneType| |CFMax| |CFR| Required derived parameter: |TTM| Required flux sequences: |TC| Calculated fluxes sequences: |Refr| Required state sequence: |WC| Updated state sequence: |SP| Basic equations: :math:`\\frac{dSP}{dt} = + Refr` \n :math:`\\frac{dWC}{dt} = - Refr` \n :math:`Refr = min(cfr \\cdot cfmax \\cdot (TTM-TC), WC)` Examples: Six zones are initialized with the same threshold temperature, degree day factor and refreezing coefficient, but with different zone types and initial states: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> cfmax(4.0) >>> cfr(0.1) >>> derived.ttm = 2.0 >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> cfmax cfmax(4.0) >>> cfmax.values array([ 2., 2., 2., 2., 2., 2.]) When the actual temperature is equal to the threshold temperature for melting and refreezing, neither no refreezing occurs and the states remain unchanged: >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) The same holds true for an actual temperature higher than the threshold temperature: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C above the threshold temperature, only melting can occur. Actual melting is consistent with potential melting, except for the first zone, which is an internal lake, and the last two zones, for which potential melting exceeds the available frozen water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 5.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C below the threshold temperature, refreezing can occur. Actual refreezing is consistent with potential refreezing, except for the first zone, which is an internal lake, and the last two zones, for which potential refreezing exceeds the available liquid water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = -1.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.6, 0.6, 0.6, 0.5, 0.0) >>> states.sp sp(0.0, 2.6, 2.6, 2.6, 2.5, 2.0) >>> states.wc wc(0.0, 0.4, 0.4, 0.4, 0.0, 0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: if flu.tc[k] < der.ttm[k]: flu.refr[k] = min(con.cfr[k]*con.cfmax[k] * (der.ttm[k]-flu.tc[k]), sta.wc[k]) sta.sp[k] += flu.refr[k] sta.wc[k] -= flu.refr[k] else: flu.refr[k] = 0. else: flu.refr[k] = 0. sta.wc[k] = 0. sta.sp[k] = 0.
Calculate the actual water release from the snow layer due to the exceedance of the snow layers capacity for (liquid) water. Required control parameters: |NmbZones| |ZoneType| |WHC| Required state sequence: |SP| Required flux sequence |TF| Calculated fluxes sequences: |In_| Updated state sequence: |WC| Basic equations: :math:`\\frac{dWC}{dt} = -In` \n :math:`-In = max(WC - WHC \\cdot SP, 0)` Examples: Initialize six zones of different types and frozen water contents of the snow layer and set the relative water holding capacity to 20% of the respective frozen water content: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> whc(0.2) >>> states.sp = 0.0, 10.0, 10.0, 10.0, 5.0, 0.0 Also set the actual value of stand precipitation to 5 mm/d: >>> fluxes.tf = 5.0 When there is no (liquid) water content in the snow layer, no water can be released: >>> states.wc = 0.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) When there is a (liquid) water content in the snow layer, the water release depends on the frozen water content. Note the special cases of the first zone being an internal lake, for which the snow routine does not apply, and of the last zone, which has no ice content and thus effectively not really a snow layer: >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 3.0, 3.0, 3.0, 4.0, 5.0) >>> states.wc wc(0.0, 2.0, 2.0, 2.0, 1.0, 0.0) When the relative water holding capacity is assumed to be zero, all liquid water is released: >>> whc(0.0) >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 5.0, 5.0, 5.0, 5.0, 5.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) Note that for the single lake zone, stand precipitation is directly passed to `in_` in all three examples. def calc_in_wc_v1(self): """Calculate the actual water release from the snow layer due to the exceedance of the snow layers capacity for (liquid) water. Required control parameters: |NmbZones| |ZoneType| |WHC| Required state sequence: |SP| Required flux sequence |TF| Calculated fluxes sequences: |In_| Updated state sequence: |WC| Basic equations: :math:`\\frac{dWC}{dt} = -In` \n :math:`-In = max(WC - WHC \\cdot SP, 0)` Examples: Initialize six zones of different types and frozen water contents of the snow layer and set the relative water holding capacity to 20% of the respective frozen water content: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> whc(0.2) >>> states.sp = 0.0, 10.0, 10.0, 10.0, 5.0, 0.0 Also set the actual value of stand precipitation to 5 mm/d: >>> fluxes.tf = 5.0 When there is no (liquid) water content in the snow layer, no water can be released: >>> states.wc = 0.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) When there is a (liquid) water content in the snow layer, the water release depends on the frozen water content. Note the special cases of the first zone being an internal lake, for which the snow routine does not apply, and of the last zone, which has no ice content and thus effectively not really a snow layer: >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 3.0, 3.0, 3.0, 4.0, 5.0) >>> states.wc wc(0.0, 2.0, 2.0, 2.0, 1.0, 0.0) When the relative water holding capacity is assumed to be zero, all liquid water is released: >>> whc(0.0) >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 5.0, 5.0, 5.0, 5.0, 5.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) Note that for the single lake zone, stand precipitation is directly passed to `in_` in all three examples. """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: flu.in_[k] = max(sta.wc[k]-con.whc[k]*sta.sp[k], 0.) sta.wc[k] -= flu.in_[k] else: flu.in_[k] = flu.tf[k] sta.wc[k] = 0.
Calculate melting from glaciers which are actually not covered by a snow layer and add it to the water release of the snow module. Required control parameters: |NmbZones| |ZoneType| |GMelt| Required state sequence: |SP| Required flux sequence: |TC| Calculated fluxes sequence: |GlMelt| Updated flux sequence: |In_| Basic equation: :math:`GlMelt = \\Bigl \\lbrace { {max(GMelt \\cdot (TC-TTM), 0) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` Examples: Seven zones are prepared, but glacier melting occurs only in the fourth one, as the first three zones are no glaciers, the fifth zone is covered by a snow layer and the actual temperature of the last two zones is not above the threshold temperature: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(7) >>> zonetype(FIELD, FOREST, ILAKE, GLACIER, GLACIER, GLACIER, GLACIER) >>> gmelt(4.) >>> derived.ttm(2.) >>> states.sp = 0., 0., 0., 0., .1, 0., 0. >>> fluxes.tc = 3., 3., 3., 3., 3., 2., 1. >>> fluxes.in_ = 3. >>> model.calc_glmelt_in_v1() >>> fluxes.glmelt glmelt(0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0) >>> fluxes.in_ in_(3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 3.0) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> gmelt gmelt(4.0) >>> gmelt.values array([ 2., 2., 2., 2., 2., 2., 2.]) def calc_glmelt_in_v1(self): """Calculate melting from glaciers which are actually not covered by a snow layer and add it to the water release of the snow module. Required control parameters: |NmbZones| |ZoneType| |GMelt| Required state sequence: |SP| Required flux sequence: |TC| Calculated fluxes sequence: |GlMelt| Updated flux sequence: |In_| Basic equation: :math:`GlMelt = \\Bigl \\lbrace { {max(GMelt \\cdot (TC-TTM), 0) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` Examples: Seven zones are prepared, but glacier melting occurs only in the fourth one, as the first three zones are no glaciers, the fifth zone is covered by a snow layer and the actual temperature of the last two zones is not above the threshold temperature: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(7) >>> zonetype(FIELD, FOREST, ILAKE, GLACIER, GLACIER, GLACIER, GLACIER) >>> gmelt(4.) >>> derived.ttm(2.) >>> states.sp = 0., 0., 0., 0., .1, 0., 0. >>> fluxes.tc = 3., 3., 3., 3., 3., 2., 1. >>> fluxes.in_ = 3. >>> model.calc_glmelt_in_v1() >>> fluxes.glmelt glmelt(0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0) >>> fluxes.in_ in_(3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 3.0) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> gmelt gmelt(4.0) >>> gmelt.values array([ 2., 2., 2., 2., 2., 2., 2.]) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if ((con.zonetype[k] == GLACIER) and (sta.sp[k] <= 0.) and (flu.tc[k] > der.ttm[k])): flu.glmelt[k] = con.gmelt[k]*(flu.tc[k]-der.ttm[k]) flu.in_[k] += flu.glmelt[k] else: flu.glmelt[k] = 0.
Calculate effective precipitation and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |Beta| Required fluxes sequence: |In_| Calculated flux sequence: |R| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = IN - R` \n :math:`R = IN \\cdot \\left(\\frac{SM}{FC}\\right)^{Beta}` Examples: Initialize six zones of different types. The field capacity of all fields and forests is set to 200mm, the input of each zone is 10mm: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> fc(200.0) >>> fluxes.in_ = 10.0 With a common nonlinearity parameter value of 2, a relative soil moisture of 50% (zones three and four) results in a discharge coefficient of 25%. For a soil completely dried (zone five) or completely saturated (one six) the discharge coefficient does not depend on the nonlinearity parameter and is 0% and 100% respectively. Glaciers and internal lakes also always route 100% of their input as effective precipitation: >>> beta(2.0) >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 2.5, 2.5, 0.0, 10.0) >>> states.sm sm(0.0, 0.0, 107.5, 107.5, 10.0, 200.0) Through decreasing the nonlinearity parameter, the discharge coefficient increases. A parameter value of zero leads to a discharge coefficient of 100% for any soil moisture: >>> beta(0.0) >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 10.0, 10.0, 10.0, 10.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 0.0, 200.0) With zero field capacity, the discharge coefficient also always equates to 100%: >>> fc(0.0) >>> beta(2.0) >>> states.sm = 0.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 10.0, 10.0, 10.0, 10.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) def calc_r_sm_v1(self): """Calculate effective precipitation and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |Beta| Required fluxes sequence: |In_| Calculated flux sequence: |R| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = IN - R` \n :math:`R = IN \\cdot \\left(\\frac{SM}{FC}\\right)^{Beta}` Examples: Initialize six zones of different types. The field capacity of all fields and forests is set to 200mm, the input of each zone is 10mm: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> fc(200.0) >>> fluxes.in_ = 10.0 With a common nonlinearity parameter value of 2, a relative soil moisture of 50% (zones three and four) results in a discharge coefficient of 25%. For a soil completely dried (zone five) or completely saturated (one six) the discharge coefficient does not depend on the nonlinearity parameter and is 0% and 100% respectively. Glaciers and internal lakes also always route 100% of their input as effective precipitation: >>> beta(2.0) >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 2.5, 2.5, 0.0, 10.0) >>> states.sm sm(0.0, 0.0, 107.5, 107.5, 10.0, 200.0) Through decreasing the nonlinearity parameter, the discharge coefficient increases. A parameter value of zero leads to a discharge coefficient of 100% for any soil moisture: >>> beta(0.0) >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 10.0, 10.0, 10.0, 10.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 0.0, 200.0) With zero field capacity, the discharge coefficient also always equates to 100%: >>> fc(0.0) >>> beta(2.0) >>> states.sm = 0.0 >>> model.calc_r_sm_v1() >>> fluxes.r r(10.0, 10.0, 10.0, 10.0, 10.0, 10.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): if con.fc[k] > 0.: flu.r[k] = flu.in_[k]*(sta.sm[k]/con.fc[k])**con.beta[k] flu.r[k] = max(flu.r[k], sta.sm[k]+flu.in_[k]-con.fc[k]) else: flu.r[k] = flu.in_[k] sta.sm[k] += flu.in_[k]-flu.r[k] else: flu.r[k] = flu.in_[k] sta.sm[k] = 0.
Calculate capillary flow and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |CFlux| Required fluxes sequence: |R| Required state sequence: |UZ| Calculated flux sequence: |CF| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = CF` \n :math:`CF = CFLUX \\cdot (1 - \\frac{SM}{FC})` Examples: Initialize six zones of different types. The field capacity of als fields and forests is set to 200mm, the maximum capillary flow rate is 4mm/d: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> fc(200.0) >>> cflux(4.0) Note that the assumed length of the simulation step is only a half day. Hence the maximum capillary flow per simulation step is 2 instead of 4: >>> cflux cflux(4.0) >>> cflux.values array([ 2., 2., 2., 2., 2., 2.]) For fields and forests, the actual capillary return flow depends on the relative soil moisture deficite, if either the upper zone layer provides enough water... >>> fluxes.r = 0.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 20.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 1.0, 1.0, 2.0, 0.0) >>> states.sm sm(0.0, 0.0, 101.0, 101.0, 2.0, 200.0) ...our enough effective precipitation is generated, which can be rerouted directly: >>> cflux(4.0) >>> fluxes.r = 10.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 1.0, 1.0, 2.0, 0.0) >>> states.sm sm(0.0, 0.0, 101.0, 101.0, 2.0, 200.0) If the upper zone layer is empty and no effective precipitation is generated, capillary flow is zero: >>> cflux(4.0) >>> fluxes.r = 0.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 0.0, 200.0) Here an example, where both the upper zone layer and effective precipitation provide water for the capillary flow, but less then the maximum flow rate times the relative soil moisture: >>> cflux(4.0) >>> fluxes.r = 0.1 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.2 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.3, 0.3, 0.3, 0.0) >>> states.sm sm(0.0, 0.0, 100.3, 100.3, 0.3, 200.0) Even unrealistic high maximum capillary flow rates do not result in overfilled soils: >>> cflux(1000.0) >>> fluxes.r = 200.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 200.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 100.0, 100.0, 200.0, 0.0) >>> states.sm sm(0.0, 0.0, 200.0, 200.0, 200.0, 200.0) For (unrealistic) soils with zero field capacity, capillary flow is always zero: >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) def calc_cf_sm_v1(self): """Calculate capillary flow and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |CFlux| Required fluxes sequence: |R| Required state sequence: |UZ| Calculated flux sequence: |CF| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = CF` \n :math:`CF = CFLUX \\cdot (1 - \\frac{SM}{FC})` Examples: Initialize six zones of different types. The field capacity of als fields and forests is set to 200mm, the maximum capillary flow rate is 4mm/d: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> fc(200.0) >>> cflux(4.0) Note that the assumed length of the simulation step is only a half day. Hence the maximum capillary flow per simulation step is 2 instead of 4: >>> cflux cflux(4.0) >>> cflux.values array([ 2., 2., 2., 2., 2., 2.]) For fields and forests, the actual capillary return flow depends on the relative soil moisture deficite, if either the upper zone layer provides enough water... >>> fluxes.r = 0.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 20.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 1.0, 1.0, 2.0, 0.0) >>> states.sm sm(0.0, 0.0, 101.0, 101.0, 2.0, 200.0) ...our enough effective precipitation is generated, which can be rerouted directly: >>> cflux(4.0) >>> fluxes.r = 10.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 1.0, 1.0, 2.0, 0.0) >>> states.sm sm(0.0, 0.0, 101.0, 101.0, 2.0, 200.0) If the upper zone layer is empty and no effective precipitation is generated, capillary flow is zero: >>> cflux(4.0) >>> fluxes.r = 0.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 0.0, 200.0) Here an example, where both the upper zone layer and effective precipitation provide water for the capillary flow, but less then the maximum flow rate times the relative soil moisture: >>> cflux(4.0) >>> fluxes.r = 0.1 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 0.2 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.3, 0.3, 0.3, 0.0) >>> states.sm sm(0.0, 0.0, 100.3, 100.3, 0.3, 200.0) Even unrealistic high maximum capillary flow rates do not result in overfilled soils: >>> cflux(1000.0) >>> fluxes.r = 200.0 >>> states.sm = 0.0, 0.0, 100.0, 100.0, 0.0, 200.0 >>> states.uz = 200.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 100.0, 100.0, 200.0, 0.0) >>> states.sm sm(0.0, 0.0, 200.0, 200.0, 200.0, 200.0) For (unrealistic) soils with zero field capacity, capillary flow is always zero: >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_cf_sm_v1() >>> fluxes.cf cf(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): if con.fc[k] > 0.: flu.cf[k] = con.cflux[k]*(1.-sta.sm[k]/con.fc[k]) flu.cf[k] = min(flu.cf[k], sta.uz+flu.r[k]) flu.cf[k] = min(flu.cf[k], con.fc[k]-sta.sm[k]) else: flu.cf[k] = 0. sta.sm[k] += flu.cf[k] else: flu.cf[k] = 0. sta.sm[k] = 0.
Calculate soil evaporation and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |LP| |ERed| Required fluxes sequences: |EPC| |EI| Required state sequence: |SP| Calculated flux sequence: |EA| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = - EA` \n :math:`EA_{temp} = \\biggl \\lbrace { {EPC \\cdot min\\left(\\frac{SM}{LP \\cdot FC}, 1\\right) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` \n :math:`EA = EA_{temp} - max(ERED \\cdot (EA_{temp} + EI - EPC), 0)` Examples: Initialize seven zones of different types. The field capacity of all fields and forests is set to 200mm, potential evaporation and interception evaporation are 2mm and 1mm respectively: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(7) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD) >>> fc(200.0) >>> lp(0.0, 0.0, 0.5, 0.5, 0.0, 0.8, 1.0) >>> ered(0.0) >>> fluxes.epc = 2.0 >>> fluxes.ei = 1.0 >>> states.sp = 0.0 Only fields and forests include soils; for glaciers and zones (the first two zones) no soil evaporation is performed. For fields and forests, the underlying calculations are the same. In the following example, the relative soil moisture is 50% in all field and forest zones. Hence, differences in soil evaporation are related to the different soil evaporation parameter values only: >>> states.sm = 100.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 2.0, 2.0, 2.0, 1.25, 1.0) >>> states.sm sm(0.0, 0.0, 98.0, 98.0, 98.0, 98.75, 99.0) In the last example, evaporation values of 2mm have been calculated for some zones despite the fact, that these 2mm added to the actual interception evaporation of 1mm exceed potential evaporation. This behaviour can be reduced... >>> states.sm = 100.0 >>> ered(0.5) >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 1.5, 1.5, 1.5, 1.125, 1.0) >>> states.sm sm(0.0, 0.0, 98.5, 98.5, 98.5, 98.875, 99.0) ...or be completely excluded: >>> states.sm = 100.0 >>> ered(1.0) >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0) >>> states.sm sm(0.0, 0.0, 99.0, 99.0, 99.0, 99.0, 99.0) Any occurrence of a snow layer suppresses soil evaporation completely: >>> states.sp = 0.01 >>> states.sm = 100.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0) For (unrealistic) soils with zero field capacity, soil evaporation is always zero: >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) def calc_ea_sm_v1(self): """Calculate soil evaporation and update soil moisture. Required control parameters: |NmbZones| |ZoneType| |FC| |LP| |ERed| Required fluxes sequences: |EPC| |EI| Required state sequence: |SP| Calculated flux sequence: |EA| Updated state sequence: |SM| Basic equations: :math:`\\frac{dSM}{dt} = - EA` \n :math:`EA_{temp} = \\biggl \\lbrace { {EPC \\cdot min\\left(\\frac{SM}{LP \\cdot FC}, 1\\right) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` \n :math:`EA = EA_{temp} - max(ERED \\cdot (EA_{temp} + EI - EPC), 0)` Examples: Initialize seven zones of different types. The field capacity of all fields and forests is set to 200mm, potential evaporation and interception evaporation are 2mm and 1mm respectively: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(7) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD, FIELD) >>> fc(200.0) >>> lp(0.0, 0.0, 0.5, 0.5, 0.0, 0.8, 1.0) >>> ered(0.0) >>> fluxes.epc = 2.0 >>> fluxes.ei = 1.0 >>> states.sp = 0.0 Only fields and forests include soils; for glaciers and zones (the first two zones) no soil evaporation is performed. For fields and forests, the underlying calculations are the same. In the following example, the relative soil moisture is 50% in all field and forest zones. Hence, differences in soil evaporation are related to the different soil evaporation parameter values only: >>> states.sm = 100.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 2.0, 2.0, 2.0, 1.25, 1.0) >>> states.sm sm(0.0, 0.0, 98.0, 98.0, 98.0, 98.75, 99.0) In the last example, evaporation values of 2mm have been calculated for some zones despite the fact, that these 2mm added to the actual interception evaporation of 1mm exceed potential evaporation. This behaviour can be reduced... >>> states.sm = 100.0 >>> ered(0.5) >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 1.5, 1.5, 1.5, 1.125, 1.0) >>> states.sm sm(0.0, 0.0, 98.5, 98.5, 98.5, 98.875, 99.0) ...or be completely excluded: >>> states.sm = 100.0 >>> ered(1.0) >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0) >>> states.sm sm(0.0, 0.0, 99.0, 99.0, 99.0, 99.0, 99.0) Any occurrence of a snow layer suppresses soil evaporation completely: >>> states.sp = 0.01 >>> states.sm = 100.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0) For (unrealistic) soils with zero field capacity, soil evaporation is always zero: >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_ea_sm_v1() >>> fluxes.ea ea(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sm sm(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): if sta.sp[k] <= 0.: if (con.lp[k]*con.fc[k]) > 0.: flu.ea[k] = flu.epc[k]*sta.sm[k]/(con.lp[k]*con.fc[k]) flu.ea[k] = min(flu.ea[k], flu.epc[k]) else: flu.ea[k] = flu.epc[k] flu.ea[k] -= max(con.ered[k] * (flu.ea[k]+flu.ei[k]-flu.epc[k]), 0.) flu.ea[k] = min(flu.ea[k], sta.sm[k]) else: flu.ea[k] = 0. sta.sm[k] -= flu.ea[k] else: flu.ea[k] = 0. sta.sm[k] = 0.
Accumulate the total inflow into the upper zone layer. Required control parameters: |NmbZones| |ZoneType| Required derived parameters: |RelLandZoneArea| Required fluxes sequences: |R| |CF| Calculated flux sequence: |InUZ| Basic equation: :math:`InUZ = R - CF` Examples: Initialize three zones of different relative `land sizes` (area related to the total size of the subbasin except lake areas): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(3) >>> zonetype(FIELD, ILAKE, GLACIER) >>> derived.rellandzonearea = 2.0/3.0, 0.0, 1.0/3.0 >>> fluxes.r = 6.0, 0.0, 2.0 >>> fluxes.cf = 2.0, 0.0, 1.0 >>> model.calc_inuz_v1() >>> fluxes.inuz inuz(3.0) Internal lakes do not contribute to the upper zone layer. Hence for a subbasin consisting only of interal lakes a zero input value would be calculated: >>> zonetype(ILAKE, ILAKE, ILAKE) >>> model.calc_inuz_v1() >>> fluxes.inuz inuz(0.0) def calc_inuz_v1(self): """Accumulate the total inflow into the upper zone layer. Required control parameters: |NmbZones| |ZoneType| Required derived parameters: |RelLandZoneArea| Required fluxes sequences: |R| |CF| Calculated flux sequence: |InUZ| Basic equation: :math:`InUZ = R - CF` Examples: Initialize three zones of different relative `land sizes` (area related to the total size of the subbasin except lake areas): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(3) >>> zonetype(FIELD, ILAKE, GLACIER) >>> derived.rellandzonearea = 2.0/3.0, 0.0, 1.0/3.0 >>> fluxes.r = 6.0, 0.0, 2.0 >>> fluxes.cf = 2.0, 0.0, 1.0 >>> model.calc_inuz_v1() >>> fluxes.inuz inuz(3.0) Internal lakes do not contribute to the upper zone layer. Hence for a subbasin consisting only of interal lakes a zero input value would be calculated: >>> zonetype(ILAKE, ILAKE, ILAKE) >>> model.calc_inuz_v1() >>> fluxes.inuz inuz(0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.inuz = 0. for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: flu.inuz += der.rellandzonearea[k]*(flu.r[k]-flu.cf[k])
Determine the relative size of the contributing area of the whole subbasin. Required control parameters: |NmbZones| |ZoneType| |RespArea| |FC| |Beta| Required derived parameter: |RelSoilArea| Required state sequence: |SM| Calculated fluxes sequences: |ContriArea| Basic equation: :math:`ContriArea = \\left( \\frac{SM}{FC} \\right)^{Beta}` Examples: Four zones are initialized, but only the first two zones of type field and forest are taken into account in the calculation of the relative contributing area of the catchment (even, if also glaciers contribute to the inflow of the upper zone layer): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(4) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE) >>> beta(2.0) >>> fc(200.0) >>> resparea(True) >>> derived.relsoilarea(0.5) >>> derived.relsoilzonearea(1.0/3.0, 2.0/3.0, 0.0, 0.0) With a relative soil moisture of 100 % in the whole subbasin, the contributing area is also estimated as 100 %,... >>> states.sm = 200.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...and relative soil moistures of 0% result in an contributing area of 0 %: >>> states.sm = 0.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(0.0) With the given value 2 of the nonlinearity parameter Beta, soil moisture of 50 % results in a contributing area estimate of 25%: >>> states.sm = 100.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(0.25) Setting the response area option to False,... >>> resparea(False) >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ... setting the soil area (total area of all field and forest zones in the subbasin) to zero..., >>> resparea(True) >>> derived.relsoilarea(0.0) >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...or setting all field capacities to zero... >>> derived.relsoilarea(0.5) >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...leads to contributing area values of 100 %. def calc_contriarea_v1(self): """Determine the relative size of the contributing area of the whole subbasin. Required control parameters: |NmbZones| |ZoneType| |RespArea| |FC| |Beta| Required derived parameter: |RelSoilArea| Required state sequence: |SM| Calculated fluxes sequences: |ContriArea| Basic equation: :math:`ContriArea = \\left( \\frac{SM}{FC} \\right)^{Beta}` Examples: Four zones are initialized, but only the first two zones of type field and forest are taken into account in the calculation of the relative contributing area of the catchment (even, if also glaciers contribute to the inflow of the upper zone layer): >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(4) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE) >>> beta(2.0) >>> fc(200.0) >>> resparea(True) >>> derived.relsoilarea(0.5) >>> derived.relsoilzonearea(1.0/3.0, 2.0/3.0, 0.0, 0.0) With a relative soil moisture of 100 % in the whole subbasin, the contributing area is also estimated as 100 %,... >>> states.sm = 200.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...and relative soil moistures of 0% result in an contributing area of 0 %: >>> states.sm = 0.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(0.0) With the given value 2 of the nonlinearity parameter Beta, soil moisture of 50 % results in a contributing area estimate of 25%: >>> states.sm = 100.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(0.25) Setting the response area option to False,... >>> resparea(False) >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ... setting the soil area (total area of all field and forest zones in the subbasin) to zero..., >>> resparea(True) >>> derived.relsoilarea(0.0) >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...or setting all field capacities to zero... >>> derived.relsoilarea(0.5) >>> fc(0.0) >>> states.sm = 0.0 >>> model.calc_contriarea_v1() >>> fluxes.contriarea contriarea(1.0) ...leads to contributing area values of 100 %. """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if con.resparea and (der.relsoilarea > 0.): flu.contriarea = 0. for k in range(con.nmbzones): if con.zonetype[k] in (FIELD, FOREST): if con.fc[k] > 0.: flu.contriarea += (der.relsoilzonearea[k] * (sta.sm[k]/con.fc[k])**con.beta[k]) else: flu.contriarea += der.relsoilzonearea[k] else: flu.contriarea = 1.
Perform the upper zone layer routine which determines percolation to the lower zone layer and the fast response of the hland model. Note that the system behaviour of this method depends strongly on the specifications of the options |RespArea| and |RecStep|. Required control parameters: |RecStep| |PercMax| |K| |Alpha| Required derived parameters: |DT| Required fluxes sequence: |InUZ| Calculated fluxes sequences: |Perc| |Q0| Updated state sequence: |UZ| Basic equations: :math:`\\frac{dUZ}{dt} = InUZ - Perc - Q0` \n :math:`Perc = PercMax \\cdot ContriArea` \n :math:`Q0 = K * \\cdot \\left( \\frac{UZ}{ContriArea} \\right)^{1+Alpha}` Examples: The upper zone layer routine is an exception compared to the other routines of the HydPy-H-Land model, regarding its consideration of numerical accuracy. To increase the accuracy of the numerical integration of the underlying ordinary differential equation, each simulation step can be divided into substeps, which are all solved with first order accuracy. In the first example, this option is omitted through setting the RecStep parameter to one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> recstep(2) >>> derived.dt = 1/recstep >>> percmax(2.0) >>> alpha(1.0) >>> k(2.0) >>> fluxes.contriarea = 1.0 >>> fluxes.inuz = 0.0 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(1.0) >>> fluxes.q0 q0(0.0) >>> states.uz uz(0.0) Due to the sequential calculation of the upper zone routine, the upper zone storage is drained completely through percolation and no water is left for fast discharge response. By dividing the simulation step in 100 substeps, the results are quite different: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.786934) >>> fluxes.q0 q0(0.213066) >>> states.uz uz(0.0) Note that the assumed length of the simulation step is only a half day. Hence the effective values of the maximum percolation rate and the storage coefficient is not 2 but 1: >>> percmax percmax(2.0) >>> k k(2.0) >>> percmax.value 1.0 >>> k.value 1.0 By decreasing the contributing area one decreases percolation but increases fast discharge response: >>> fluxes.contriarea = 0.5 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.434108) >>> fluxes.q0 q0(0.565892) >>> states.uz uz(0.0) Resetting RecStep leads to more transparent results. Note that, due to the large value of the storage coefficient and the low accuracy of the numerical approximation, direct discharge drains the rest of the upper zone storage: >>> recstep(2) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.5) >>> states.uz uz(0.0) Applying a more reasonable storage coefficient results in: >>> k(0.5) >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.25) >>> states.uz uz(0.25) Adding an input of 0.3 mm results the same percolation value (which, in the given example, is determined by the maximum percolation rate only), but in an increases value of the direct response (which always depends on the actual upper zone storage directly): >>> fluxes.inuz = 0.3 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.64) >>> states.uz uz(0.16) Due to the same reasons, another increase in numerical accuracy has no impact on percolation but decreases the direct response in the given example: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.421708) >>> states.uz uz(0.378292) def calc_q0_perc_uz_v1(self): """Perform the upper zone layer routine which determines percolation to the lower zone layer and the fast response of the hland model. Note that the system behaviour of this method depends strongly on the specifications of the options |RespArea| and |RecStep|. Required control parameters: |RecStep| |PercMax| |K| |Alpha| Required derived parameters: |DT| Required fluxes sequence: |InUZ| Calculated fluxes sequences: |Perc| |Q0| Updated state sequence: |UZ| Basic equations: :math:`\\frac{dUZ}{dt} = InUZ - Perc - Q0` \n :math:`Perc = PercMax \\cdot ContriArea` \n :math:`Q0 = K * \\cdot \\left( \\frac{UZ}{ContriArea} \\right)^{1+Alpha}` Examples: The upper zone layer routine is an exception compared to the other routines of the HydPy-H-Land model, regarding its consideration of numerical accuracy. To increase the accuracy of the numerical integration of the underlying ordinary differential equation, each simulation step can be divided into substeps, which are all solved with first order accuracy. In the first example, this option is omitted through setting the RecStep parameter to one: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> recstep(2) >>> derived.dt = 1/recstep >>> percmax(2.0) >>> alpha(1.0) >>> k(2.0) >>> fluxes.contriarea = 1.0 >>> fluxes.inuz = 0.0 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(1.0) >>> fluxes.q0 q0(0.0) >>> states.uz uz(0.0) Due to the sequential calculation of the upper zone routine, the upper zone storage is drained completely through percolation and no water is left for fast discharge response. By dividing the simulation step in 100 substeps, the results are quite different: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.786934) >>> fluxes.q0 q0(0.213066) >>> states.uz uz(0.0) Note that the assumed length of the simulation step is only a half day. Hence the effective values of the maximum percolation rate and the storage coefficient is not 2 but 1: >>> percmax percmax(2.0) >>> k k(2.0) >>> percmax.value 1.0 >>> k.value 1.0 By decreasing the contributing area one decreases percolation but increases fast discharge response: >>> fluxes.contriarea = 0.5 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.434108) >>> fluxes.q0 q0(0.565892) >>> states.uz uz(0.0) Resetting RecStep leads to more transparent results. Note that, due to the large value of the storage coefficient and the low accuracy of the numerical approximation, direct discharge drains the rest of the upper zone storage: >>> recstep(2) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.5) >>> states.uz uz(0.0) Applying a more reasonable storage coefficient results in: >>> k(0.5) >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.25) >>> states.uz uz(0.25) Adding an input of 0.3 mm results the same percolation value (which, in the given example, is determined by the maximum percolation rate only), but in an increases value of the direct response (which always depends on the actual upper zone storage directly): >>> fluxes.inuz = 0.3 >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.64) >>> states.uz uz(0.16) Due to the same reasons, another increase in numerical accuracy has no impact on percolation but decreases the direct response in the given example: >>> recstep(200) >>> derived.dt = 1.0/recstep >>> states.uz = 1.0 >>> model.calc_q0_perc_uz_v1() >>> fluxes.perc perc(0.5) >>> fluxes.q0 q0(0.421708) >>> states.uz uz(0.378292) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess flu.perc = 0. flu.q0 = 0. for dummy in range(con.recstep): # First state update related to the upper zone input. sta.uz += der.dt*flu.inuz # Second state update related to percolation. d_perc = min(der.dt*con.percmax*flu.contriarea, sta.uz) sta.uz -= d_perc flu.perc += d_perc # Third state update related to fast runoff response. if sta.uz > 0.: if flu.contriarea > 0.: d_q0 = (der.dt*con.k * (sta.uz/flu.contriarea)**(1.+con.alpha)) d_q0 = min(d_q0, sta.uz) else: d_q0 = sta.uz sta.uz -= d_q0 flu.q0 += d_q0 else: d_q0 = 0.
Update the lower zone layer in accordance with percolation from upper groundwater to lower groundwater and/or in accordance with lake precipitation. Required control parameters: |NmbZones| |ZoneType| Required derived parameters: |RelLandArea| |RelZoneArea| Required fluxes sequences: |PC| |Perc| Updated state sequence: |LZ| Basic equation: :math:`\\frac{dLZ}{dt} = Perc + Pc` Examples: At first, a subbasin with two field zones is assumed (the zones could be of type forest or glacier as well). In such zones, precipitation does not fall directly into the lower zone layer, hence the given precipitation of 2mm has no impact. Only the actual percolation from the upper zone layer (underneath both field zones) is added to the lower zone storage: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zonetype(FIELD, FIELD) >>> derived.rellandarea = 1.0 >>> derived.relzonearea = 2.0/3.0, 1.0/3.0 >>> fluxes.perc = 2.0 >>> fluxes.pc = 5.0 >>> states.lz = 10.0 >>> model.calc_lz_v1() >>> states.lz lz(12.0) If the second zone is an internal lake, its precipitation falls on the lower zone layer directly. Note that only 5/3mm precipitation are added, due to the relative size of the internal lake within the subbasin. Percolation from the upper zone layer increases the lower zone storage only by two thirds of its original value, due to the larger spatial extend of the lower zone layer: >>> zonetype(FIELD, ILAKE) >>> derived.rellandarea = 2.0/3.0 >>> derived.relzonearea = 2.0/3.0, 1.0/3.0 >>> states.lz = 10.0 >>> model.calc_lz_v1() >>> states.lz lz(13.0) def calc_lz_v1(self): """Update the lower zone layer in accordance with percolation from upper groundwater to lower groundwater and/or in accordance with lake precipitation. Required control parameters: |NmbZones| |ZoneType| Required derived parameters: |RelLandArea| |RelZoneArea| Required fluxes sequences: |PC| |Perc| Updated state sequence: |LZ| Basic equation: :math:`\\frac{dLZ}{dt} = Perc + Pc` Examples: At first, a subbasin with two field zones is assumed (the zones could be of type forest or glacier as well). In such zones, precipitation does not fall directly into the lower zone layer, hence the given precipitation of 2mm has no impact. Only the actual percolation from the upper zone layer (underneath both field zones) is added to the lower zone storage: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zonetype(FIELD, FIELD) >>> derived.rellandarea = 1.0 >>> derived.relzonearea = 2.0/3.0, 1.0/3.0 >>> fluxes.perc = 2.0 >>> fluxes.pc = 5.0 >>> states.lz = 10.0 >>> model.calc_lz_v1() >>> states.lz lz(12.0) If the second zone is an internal lake, its precipitation falls on the lower zone layer directly. Note that only 5/3mm precipitation are added, due to the relative size of the internal lake within the subbasin. Percolation from the upper zone layer increases the lower zone storage only by two thirds of its original value, due to the larger spatial extend of the lower zone layer: >>> zonetype(FIELD, ILAKE) >>> derived.rellandarea = 2.0/3.0 >>> derived.relzonearea = 2.0/3.0, 1.0/3.0 >>> states.lz = 10.0 >>> model.calc_lz_v1() >>> states.lz lz(13.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess sta.lz += der.rellandarea*flu.perc for k in range(con.nmbzones): if con.zonetype[k] == ILAKE: sta.lz += der.relzonearea[k]*flu.pc[k]
Calculate lake evaporation. Required control parameters: |NmbZones| |ZoneType| |TTIce| Required derived parameters: |RelZoneArea| Required fluxes sequences: |TC| |EPC| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -EL` \n :math:`EL = \\Bigl \\lbrace { {EPC \\ | \\ TC > TTIce} \\atop {0 \\ | \\ TC \\leq TTIce} }` Examples: Six zones of the same size are initialized. The first three zones are no internal lakes, they can not exhibit any lake evaporation. Of the last three zones, which are internal lakes, only the last one evaporates water. For zones five and six, evaporation is suppressed due to an assumed ice layer, whenever the associated theshold temperature is not exceeded: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE) >>> ttice(-1.0) >>> derived.relzonearea = 1.0/6.0 >>> fluxes.epc = 0.6 >>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0 >>> states.lz = 10.0 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(9.9) Note that internal lakes always contain water. Hence, the HydPy-H-Land model allows for negative values of the lower zone storage: >>> states.lz = 0.05 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(-0.05) def calc_el_lz_v1(self): """Calculate lake evaporation. Required control parameters: |NmbZones| |ZoneType| |TTIce| Required derived parameters: |RelZoneArea| Required fluxes sequences: |TC| |EPC| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -EL` \n :math:`EL = \\Bigl \\lbrace { {EPC \\ | \\ TC > TTIce} \\atop {0 \\ | \\ TC \\leq TTIce} }` Examples: Six zones of the same size are initialized. The first three zones are no internal lakes, they can not exhibit any lake evaporation. Of the last three zones, which are internal lakes, only the last one evaporates water. For zones five and six, evaporation is suppressed due to an assumed ice layer, whenever the associated theshold temperature is not exceeded: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE) >>> ttice(-1.0) >>> derived.relzonearea = 1.0/6.0 >>> fluxes.epc = 0.6 >>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0 >>> states.lz = 10.0 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(9.9) Note that internal lakes always contain water. Hence, the HydPy-H-Land model allows for negative values of the lower zone storage: >>> states.lz = 0.05 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(-0.05) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if (con.zonetype[k] == ILAKE) and (flu.tc[k] > con.ttice[k]): flu.el[k] = flu.epc[k] sta.lz -= der.relzonearea[k]*flu.el[k] else: flu.el[k] = 0.
Calculate the slow response of the lower zone layer. Required control parameters: |K4| |Gamma| Calculated fluxes sequence: |Q1| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -Q1` \n :math:`Q1 = \\Bigl \\lbrace { {K4 \\cdot LZ^{1+Gamma} \\ | \\ LZ > 0} \\atop {0 \\ | \\ LZ\\leq 0} }` Examples: As long as the lower zone storage is negative... >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> k4(0.2) >>> gamma(0.0) >>> states.lz = -2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(-2.0) ...or zero, no slow discharge response occurs: >>> states.lz = 0.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(0.0) For storage values above zero the linear... >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.2) >>> states.lz lz(1.8) ...or nonlinear storage routing equation applies: >>> gamma(1.) >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.4) >>> states.lz lz(1.6) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the storage coefficient is not 0.2 but 0.1: >>> k4 k4(0.2) >>> k4.value 0.1 def calc_q1_lz_v1(self): """Calculate the slow response of the lower zone layer. Required control parameters: |K4| |Gamma| Calculated fluxes sequence: |Q1| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -Q1` \n :math:`Q1 = \\Bigl \\lbrace { {K4 \\cdot LZ^{1+Gamma} \\ | \\ LZ > 0} \\atop {0 \\ | \\ LZ\\leq 0} }` Examples: As long as the lower zone storage is negative... >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> k4(0.2) >>> gamma(0.0) >>> states.lz = -2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(-2.0) ...or zero, no slow discharge response occurs: >>> states.lz = 0.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(0.0) For storage values above zero the linear... >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.2) >>> states.lz lz(1.8) ...or nonlinear storage routing equation applies: >>> gamma(1.) >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.4) >>> states.lz lz(1.6) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the storage coefficient is not 0.2 but 0.1: >>> k4 k4(0.2) >>> k4.value 0.1 """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if sta.lz > 0.: flu.q1 = con.k4*sta.lz**(1.+con.gamma) else: flu.q1 = 0. sta.lz -= flu.q1
Calculate the unit hydrograph input. Required derived parameters: |RelLandArea| Required flux sequences: |Q0| |Q1| Calculated flux sequence: |InUH| Basic equation: :math:`InUH = Q0 + Q1` Example: The unit hydrographs receives base flow from the whole subbasin and direct flow from zones of type field, forest and glacier only. In the following example, these occupy only one half of the subbasin, which is why the partial input of q0 is halved: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> derived.rellandarea = 0.5 >>> fluxes.q0 = 4.0 >>> fluxes.q1 = 1.0 >>> model.calc_inuh_v1() >>> fluxes.inuh inuh(3.0) def calc_inuh_v1(self): """Calculate the unit hydrograph input. Required derived parameters: |RelLandArea| Required flux sequences: |Q0| |Q1| Calculated flux sequence: |InUH| Basic equation: :math:`InUH = Q0 + Q1` Example: The unit hydrographs receives base flow from the whole subbasin and direct flow from zones of type field, forest and glacier only. In the following example, these occupy only one half of the subbasin, which is why the partial input of q0 is halved: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> derived.rellandarea = 0.5 >>> fluxes.q0 = 4.0 >>> fluxes.q1 = 1.0 >>> model.calc_inuh_v1() >>> fluxes.inuh inuh(3.0) """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.inuh = der.rellandarea*flu.q0+flu.q1
Calculate the unit hydrograph output (convolution). Required derived parameters: |UH| Required flux sequences: |Q0| |Q1| |InUH| Updated log sequence: |QUH| Calculated flux sequence: |OutUH| Examples: Prepare a unit hydrograph with only three ordinates --- representing a fast catchment response compared to the selected step size: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> derived.uh.shape = 3 >>> derived.uh = 0.3, 0.5, 0.2 >>> logs.quh.shape = 3 >>> logs.quh = 1.0, 3.0, 0.0 Without new input, the actual output is simply the first value stored in the logging sequence and the values of the logging sequence are shifted to the left: >>> fluxes.inuh = 0.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(1.0) >>> logs.quh quh(3.0, 0.0, 0.0) With an new input of 4mm, the actual output consists of the first value stored in the logging sequence and the input value multiplied with the first unit hydrograph ordinate. The updated logging sequence values result from the multiplication of the input values and the remaining ordinates: >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(4.2) >>> logs.quh quh(2.0, 0.8, 0.0) The next example demonstates the updating of non empty logging sequence: >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(3.2) >>> logs.quh quh(2.8, 0.8, 0.0) A unit hydrograph with only one ordinate results in the direct routing of the input: >>> derived.uh.shape = 1 >>> derived.uh = 1.0 >>> fluxes.inuh = 0.0 >>> logs.quh.shape = 1 >>> logs.quh = 0.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(0.0) >>> logs.quh quh(0.0) >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh() >>> fluxes.outuh outuh(4.0) >>> logs.quh quh(0.0) def calc_outuh_quh_v1(self): """Calculate the unit hydrograph output (convolution). Required derived parameters: |UH| Required flux sequences: |Q0| |Q1| |InUH| Updated log sequence: |QUH| Calculated flux sequence: |OutUH| Examples: Prepare a unit hydrograph with only three ordinates --- representing a fast catchment response compared to the selected step size: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> derived.uh.shape = 3 >>> derived.uh = 0.3, 0.5, 0.2 >>> logs.quh.shape = 3 >>> logs.quh = 1.0, 3.0, 0.0 Without new input, the actual output is simply the first value stored in the logging sequence and the values of the logging sequence are shifted to the left: >>> fluxes.inuh = 0.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(1.0) >>> logs.quh quh(3.0, 0.0, 0.0) With an new input of 4mm, the actual output consists of the first value stored in the logging sequence and the input value multiplied with the first unit hydrograph ordinate. The updated logging sequence values result from the multiplication of the input values and the remaining ordinates: >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(4.2) >>> logs.quh quh(2.0, 0.8, 0.0) The next example demonstates the updating of non empty logging sequence: >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(3.2) >>> logs.quh quh(2.8, 0.8, 0.0) A unit hydrograph with only one ordinate results in the direct routing of the input: >>> derived.uh.shape = 1 >>> derived.uh = 1.0 >>> fluxes.inuh = 0.0 >>> logs.quh.shape = 1 >>> logs.quh = 0.0 >>> model.calc_outuh_quh_v1() >>> fluxes.outuh outuh(0.0) >>> logs.quh quh(0.0) >>> fluxes.inuh = 4.0 >>> model.calc_outuh_quh() >>> fluxes.outuh outuh(4.0) >>> logs.quh quh(0.0) """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess flu.outuh = der.uh[0]*flu.inuh+log.quh[0] for jdx in range(1, len(der.uh)): log.quh[jdx-1] = der.uh[jdx]*flu.inuh+log.quh[jdx]
Calculate the total discharge after possible abstractions. Required control parameter: |Abstr| Required flux sequence: |OutUH| Calculated flux sequence: |QT| Basic equation: :math:`QT = max(OutUH - Abstr, 0)` Examples: Trying to abstract less then available, as much as available and less then available results in: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> abstr(2.0) >>> fluxes.outuh = 2.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(1.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) >>> fluxes.outuh = 0.5 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) Note that "negative abstractions" are allowed: >>> abstr(-2.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(2.0) def calc_qt_v1(self): """Calculate the total discharge after possible abstractions. Required control parameter: |Abstr| Required flux sequence: |OutUH| Calculated flux sequence: |QT| Basic equation: :math:`QT = max(OutUH - Abstr, 0)` Examples: Trying to abstract less then available, as much as available and less then available results in: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> abstr(2.0) >>> fluxes.outuh = 2.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(1.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) >>> fluxes.outuh = 0.5 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) Note that "negative abstractions" are allowed: >>> abstr(-2.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(2.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess flu.qt = max(flu.outuh-con.abstr, 0.)
Save all defined auxiliary control files. The target path is taken from the |ControlManager| object stored in module |pub|. Hence we initialize one and override its |property| `currentpath` with a simple |str| object defining the test target path: >>> from hydpy import pub >>> pub.projectname = 'test' >>> from hydpy.core.filetools import ControlManager >>> class Test(ControlManager): ... currentpath = 'test_directory' >>> pub.controlmanager = Test() Normally, the control files would be written to disk, of course. But to show (and test) the results in the following doctest, file writing is temporarily redirected via |Open|: >>> from hydpy import dummies >>> from hydpy import Open >>> with Open(): ... dummies.aux.save( ... parameterstep='1d', ... simulationstep='12h') ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file1.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v1 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file2.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v2 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) eqd2(100.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def save(self, parameterstep=None, simulationstep=None): """Save all defined auxiliary control files. The target path is taken from the |ControlManager| object stored in module |pub|. Hence we initialize one and override its |property| `currentpath` with a simple |str| object defining the test target path: >>> from hydpy import pub >>> pub.projectname = 'test' >>> from hydpy.core.filetools import ControlManager >>> class Test(ControlManager): ... currentpath = 'test_directory' >>> pub.controlmanager = Test() Normally, the control files would be written to disk, of course. But to show (and test) the results in the following doctest, file writing is temporarily redirected via |Open|: >>> from hydpy import dummies >>> from hydpy import Open >>> with Open(): ... dummies.aux.save( ... parameterstep='1d', ... simulationstep='12h') ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file1.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v1 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file2.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v2 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) eqd2(100.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ par = parametertools.Parameter for (modelname, var2aux) in self: for filename in var2aux.filenames: with par.parameterstep(parameterstep), \ par.simulationstep(simulationstep): lines = [parametertools.get_controlfileheader( modelname, parameterstep, simulationstep)] for par in getattr(var2aux, filename): lines.append(repr(par) + '\n') hydpy.pub.controlmanager.save_file(filename, ''.join(lines))
Remove the defined variables. The variables to be removed can be selected in two ways. But the first example shows that passing nothing or an empty iterable to method |Variable2Auxfile.remove| does not remove any variable: >>> from hydpy import dummies >>> v2af = dummies.v2af >>> v2af.remove() >>> v2af.remove([]) >>> from hydpy import print_values >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables, width=30) eqb(5000.0), eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) The first option is to pass auxiliary file names: >>> v2af.remove('file1') >>> print_values(v2af.filenames) file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0) The second option is, to pass variables of the correct type and value: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb[0]) >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) One can pass multiple variables or iterables containing variables at once: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb, v2af.eqd1, v2af.eqd2) >>> print_values(v2af.filenames) file1 >>> print_values(v2af.variables) eqi1(2000.0), eqi2(1000.0) Passing an argument that equals neither a registered file name or a registered variable results in the following exception: >>> v2af.remove('test') Traceback (most recent call last): ... ValueError: While trying to remove the given object `test` of type \ `str` from the actual Variable2AuxFile object, the following error occurred: \ `'test'` is neither a registered filename nor a registered variable. def remove(self, *values): """Remove the defined variables. The variables to be removed can be selected in two ways. But the first example shows that passing nothing or an empty iterable to method |Variable2Auxfile.remove| does not remove any variable: >>> from hydpy import dummies >>> v2af = dummies.v2af >>> v2af.remove() >>> v2af.remove([]) >>> from hydpy import print_values >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables, width=30) eqb(5000.0), eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) The first option is to pass auxiliary file names: >>> v2af.remove('file1') >>> print_values(v2af.filenames) file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0) The second option is, to pass variables of the correct type and value: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb[0]) >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) One can pass multiple variables or iterables containing variables at once: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb, v2af.eqd1, v2af.eqd2) >>> print_values(v2af.filenames) file1 >>> print_values(v2af.variables) eqi1(2000.0), eqi2(1000.0) Passing an argument that equals neither a registered file name or a registered variable results in the following exception: >>> v2af.remove('test') Traceback (most recent call last): ... ValueError: While trying to remove the given object `test` of type \ `str` from the actual Variable2AuxFile object, the following error occurred: \ `'test'` is neither a registered filename nor a registered variable. """ for value in objecttools.extract(values, (str, variabletools.Variable)): try: deleted_something = False for fn2var in list(self._type2filename2variable.values()): for fn_, var in list(fn2var.items()): if value in (fn_, var): del fn2var[fn_] deleted_something = True if not deleted_something: raise ValueError( f'`{repr(value)}` is neither a registered ' f'filename nor a registered variable.') except BaseException: objecttools.augment_excmessage( f'While trying to remove the given object `{value}` ' f'of type `{objecttools.classname(value)}` from the ' f'actual Variable2AuxFile object')
A list of all handled auxiliary file names. >>> from hydpy import dummies >>> dummies.v2af.filenames ['file1', 'file2'] def filenames(self): """A list of all handled auxiliary file names. >>> from hydpy import dummies >>> dummies.v2af.filenames ['file1', 'file2'] """ fns = set() for fn2var in self._type2filename2variable.values(): fns.update(fn2var.keys()) return sorted(fns)
Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 >>> dummies.v2af.get_filename(eqb) def get_filename(self, variable): """Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 >>> dummies.v2af.get_filename(eqb) """ fn2var = self._type2filename2variable.get(type(variable), {}) for (fn_, var) in fn2var.items(): if var == variable: return fn_ return None
Calculate the smoothing parameter values. The following example is explained in some detail in module |smoothtools|: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.03', '1d' >>> from hydpy.models.dam import * >>> parameterstep() >>> remotedischargesafety(0.0) >>> remotedischargesafety.values[1] = 2.5 >>> derived.remotedischargesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_logistic1 >>> from hydpy import round_ >>> round_(smooth_logistic1(0.1, derived.remotedischargesmoothpar[0])) 1.0 >>> round_(smooth_logistic1(2.5, derived.remotedischargesmoothpar[1])) 0.99 def update(self): """Calculate the smoothing parameter values. The following example is explained in some detail in module |smoothtools|: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.03', '1d' >>> from hydpy.models.dam import * >>> parameterstep() >>> remotedischargesafety(0.0) >>> remotedischargesafety.values[1] = 2.5 >>> derived.remotedischargesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_logistic1 >>> from hydpy import round_ >>> round_(smooth_logistic1(0.1, derived.remotedischargesmoothpar[0])) 1.0 >>> round_(smooth_logistic1(2.5, derived.remotedischargesmoothpar[1])) 0.99 """ metapar = self.subpars.pars.control.remotedischargesafety self.shape = metapar.shape self(tuple(smoothtools.calc_smoothpar_logistic1(mp) for mp in metapar.values))
Calculate the smoothing parameter value. The following example is explained in some detail in module |smoothtools|: >>> from hydpy.models.dam import * >>> parameterstep() >>> waterlevelminimumremotetolerance(0.0) >>> derived.waterlevelminimumremotesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_logistic1 >>> from hydpy import round_ >>> round_(smooth_logistic1(0.1, ... derived.waterlevelminimumremotesmoothpar)) 1.0 >>> waterlevelminimumremotetolerance(2.5) >>> derived.waterlevelminimumremotesmoothpar.update() >>> round_(smooth_logistic1(2.5, ... derived.waterlevelminimumremotesmoothpar)) 0.99 def update(self): """Calculate the smoothing parameter value. The following example is explained in some detail in module |smoothtools|: >>> from hydpy.models.dam import * >>> parameterstep() >>> waterlevelminimumremotetolerance(0.0) >>> derived.waterlevelminimumremotesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_logistic1 >>> from hydpy import round_ >>> round_(smooth_logistic1(0.1, ... derived.waterlevelminimumremotesmoothpar)) 1.0 >>> waterlevelminimumremotetolerance(2.5) >>> derived.waterlevelminimumremotesmoothpar.update() >>> round_(smooth_logistic1(2.5, ... derived.waterlevelminimumremotesmoothpar)) 0.99 """ metapar = self.subpars.pars.control.waterlevelminimumremotetolerance self(smoothtools.calc_smoothpar_logistic1(metapar))
Calculate the smoothing parameter value. The following example is explained in some detail in module |smoothtools|: >>> from hydpy.models.dam import * >>> parameterstep() >>> highestremotedischarge(1.0) >>> highestremotetolerance(0.0) >>> derived.highestremotesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_min1 >>> from hydpy import round_ >>> round_(smooth_min1(-4.0, 1.5, derived.highestremotesmoothpar)) -4.0 >>> highestremotetolerance(2.5) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(-4.0, -1.5, derived.highestremotesmoothpar)) -4.01 Note that the example above corresponds to the example on function |calc_smoothpar_min1|, due to the value of parameter |HighestRemoteDischarge| being 1 m³/s. Doubling the value of |HighestRemoteDischarge| also doubles the value of |HighestRemoteSmoothPar| proportional. This leads to the following result: >>> highestremotedischarge(2.0) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(-4.0, 1.0, derived.highestremotesmoothpar)) -4.02 This relationship between |HighestRemoteDischarge| and |HighestRemoteSmoothPar| prevents from any smoothing when the value of |HighestRemoteDischarge| is zero: >>> highestremotedischarge(0.0) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar)) 1.0 In addition, |HighestRemoteSmoothPar| is set to zero if |HighestRemoteDischarge| is infinity (because no actual value will ever come in the vicinit of infinity), which is why no value would be changed through smoothing anyway): >>> highestremotedischarge(inf) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar)) 1.0 def update(self): """Calculate the smoothing parameter value. The following example is explained in some detail in module |smoothtools|: >>> from hydpy.models.dam import * >>> parameterstep() >>> highestremotedischarge(1.0) >>> highestremotetolerance(0.0) >>> derived.highestremotesmoothpar.update() >>> from hydpy.cythons.smoothutils import smooth_min1 >>> from hydpy import round_ >>> round_(smooth_min1(-4.0, 1.5, derived.highestremotesmoothpar)) -4.0 >>> highestremotetolerance(2.5) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(-4.0, -1.5, derived.highestremotesmoothpar)) -4.01 Note that the example above corresponds to the example on function |calc_smoothpar_min1|, due to the value of parameter |HighestRemoteDischarge| being 1 m³/s. Doubling the value of |HighestRemoteDischarge| also doubles the value of |HighestRemoteSmoothPar| proportional. This leads to the following result: >>> highestremotedischarge(2.0) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(-4.0, 1.0, derived.highestremotesmoothpar)) -4.02 This relationship between |HighestRemoteDischarge| and |HighestRemoteSmoothPar| prevents from any smoothing when the value of |HighestRemoteDischarge| is zero: >>> highestremotedischarge(0.0) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar)) 1.0 In addition, |HighestRemoteSmoothPar| is set to zero if |HighestRemoteDischarge| is infinity (because no actual value will ever come in the vicinit of infinity), which is why no value would be changed through smoothing anyway): >>> highestremotedischarge(inf) >>> derived.highestremotesmoothpar.update() >>> round_(smooth_min1(1.0, 1.0, derived.highestremotesmoothpar)) 1.0 """ control = self.subpars.pars.control if numpy.isinf(control.highestremotedischarge): self(0.0) else: self(control.highestremotedischarge * smoothtools.calc_smoothpar_min1(control.highestremotetolerance) )
Execute the given command in a new process. Only when both `verbose` and `blocking` are |True|, |run_subprocess| prints all responses to the current value of |sys.stdout|: >>> from hydpy import run_subprocess >>> import platform >>> esc = '' if 'windows' in platform.platform().lower() else '\\\\' >>> run_subprocess(f'python -c print{esc}(1+1{esc})') 2 With verbose being |False|, |run_subprocess| does never print out anything: >>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False) >>> process = run_subprocess('python', blocking=False, verbose=False) >>> process.kill() >>> _ = process.communicate() When `verbose` is |True| and `blocking` is |False|, |run_subprocess| prints all responses to the console ("invisible" for doctests): >>> process = run_subprocess('python', blocking=False) >>> process.kill() >>> _ = process.communicate() def run_subprocess(command: str, verbose: bool = True, blocking: bool = True) \ -> Optional[subprocess.Popen]: """Execute the given command in a new process. Only when both `verbose` and `blocking` are |True|, |run_subprocess| prints all responses to the current value of |sys.stdout|: >>> from hydpy import run_subprocess >>> import platform >>> esc = '' if 'windows' in platform.platform().lower() else '\\\\' >>> run_subprocess(f'python -c print{esc}(1+1{esc})') 2 With verbose being |False|, |run_subprocess| does never print out anything: >>> run_subprocess(f'python -c print{esc}(1+1{esc})', verbose=False) >>> process = run_subprocess('python', blocking=False, verbose=False) >>> process.kill() >>> _ = process.communicate() When `verbose` is |True| and `blocking` is |False|, |run_subprocess| prints all responses to the console ("invisible" for doctests): >>> process = run_subprocess('python', blocking=False) >>> process.kill() >>> _ = process.communicate() """ if blocking: result1 = subprocess.run( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8', shell=True) if verbose: # due to doctest replacing sys.stdout for output in (result1.stdout, result1.stderr): output = output.strip() if output: print(output) return None stdouterr = None if verbose else subprocess.DEVNULL result2 = subprocess.Popen( command, stdout=stdouterr, stderr=stdouterr, encoding='utf-8', shell=True) return result2
Execute the given Python commands. Function |exec_commands| is thought for testing purposes only (see the main documentation on module |hyd|). Seperate individual commands by semicolons and replaced whitespaces with underscores: >>> from hydpy.exe.commandtools import exec_commands >>> import sys >>> exec_commands("x_=_1+1;print(x)") Start to execute the commands ['x_=_1+1', 'print(x)'] for testing purposes. 2 |exec_commands| interprets double underscores as a single underscores: >>> exec_commands("x_=_1;print(x.____class____)") Start to execute the commands ['x_=_1', 'print(x.____class____)'] \ for testing purposes. <class 'int'> |exec_commands| evaluates additional keyword arguments before it executes the given commands: >>> exec_commands("e=x==y;print(e)", x=1, y=2) Start to execute the commands ['e=x==y', 'print(e)'] for testing purposes. False def exec_commands(commands: str, **parameters: Any) -> None: """Execute the given Python commands. Function |exec_commands| is thought for testing purposes only (see the main documentation on module |hyd|). Seperate individual commands by semicolons and replaced whitespaces with underscores: >>> from hydpy.exe.commandtools import exec_commands >>> import sys >>> exec_commands("x_=_1+1;print(x)") Start to execute the commands ['x_=_1+1', 'print(x)'] for testing purposes. 2 |exec_commands| interprets double underscores as a single underscores: >>> exec_commands("x_=_1;print(x.____class____)") Start to execute the commands ['x_=_1', 'print(x.____class____)'] \ for testing purposes. <class 'int'> |exec_commands| evaluates additional keyword arguments before it executes the given commands: >>> exec_commands("e=x==y;print(e)", x=1, y=2) Start to execute the commands ['e=x==y', 'print(e)'] for testing purposes. False """ cmdlist = commands.split(';') print(f'Start to execute the commands {cmdlist} for testing purposes.') for par, value in parameters.items(): exec(f'{par} = {value}') for command in cmdlist: command = command.replace('__', 'temptemptemp') command = command.replace('_', ' ') command = command.replace('temptemptemp', '_') exec(command)
Prepare an empty log file eventually and return its absolute path. When passing the "filename" `stdout`, |prepare_logfile| does not prepare any file and just returns `stdout`: >>> from hydpy.exe.commandtools import prepare_logfile >>> prepare_logfile('stdout') 'stdout' When passing the "filename" `default`, |prepare_logfile| generates a filename containing the actual date and time, prepares an empty file on disk, and returns its path: >>> from hydpy import repr_, TestIO >>> from hydpy.core.testtools import mock_datetime_now >>> from datetime import datetime >>> with TestIO(): ... with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0)): ... filepath = prepare_logfile('default') >>> import os >>> os.path.exists(filepath) True >>> repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/hydpy_2000-01-01_12-30-00.log' For all other strings, |prepare_logfile| does not add any date or time information to the filename: >>> with TestIO(): ... with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0)): ... filepath = prepare_logfile('my_log_file.txt') >>> os.path.exists(filepath) True >>> repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/my_log_file.txt' def prepare_logfile(filename: str) -> str: """Prepare an empty log file eventually and return its absolute path. When passing the "filename" `stdout`, |prepare_logfile| does not prepare any file and just returns `stdout`: >>> from hydpy.exe.commandtools import prepare_logfile >>> prepare_logfile('stdout') 'stdout' When passing the "filename" `default`, |prepare_logfile| generates a filename containing the actual date and time, prepares an empty file on disk, and returns its path: >>> from hydpy import repr_, TestIO >>> from hydpy.core.testtools import mock_datetime_now >>> from datetime import datetime >>> with TestIO(): ... with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0)): ... filepath = prepare_logfile('default') >>> import os >>> os.path.exists(filepath) True >>> repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/hydpy_2000-01-01_12-30-00.log' For all other strings, |prepare_logfile| does not add any date or time information to the filename: >>> with TestIO(): ... with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0)): ... filepath = prepare_logfile('my_log_file.txt') >>> os.path.exists(filepath) True >>> repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/my_log_file.txt' """ if filename == 'stdout': return filename if filename == 'default': filename = datetime.datetime.now().strftime( 'hydpy_%Y-%m-%d_%H-%M-%S.log') with open(filename, 'w'): pass return os.path.abspath(filename)
Execute a HydPy script function. Function |execute_scriptfunction| is indirectly applied and explained in the documentation on module |hyd|. def execute_scriptfunction() -> None: """Execute a HydPy script function. Function |execute_scriptfunction| is indirectly applied and explained in the documentation on module |hyd|. """ try: args_given = [] kwargs_given = {} for arg in sys.argv[1:]: if len(arg) < 3: args_given.append(arg) else: try: key, value = parse_argument(arg) kwargs_given[key] = value except ValueError: args_given.append(arg) logfilepath = prepare_logfile(kwargs_given.pop('logfile', 'stdout')) logstyle = kwargs_given.pop('logstyle', 'plain') try: funcname = str(args_given.pop(0)) except IndexError: raise ValueError( 'The first positional argument defining the function ' 'to be called is missing.') try: func = hydpy.pub.scriptfunctions[funcname] except KeyError: available_funcs = objecttools.enumeration( sorted(hydpy.pub.scriptfunctions.keys())) raise ValueError( f'There is no `{funcname}` function callable by `hyd.py`. ' f'Choose one of the following instead: {available_funcs}.') args_required = inspect.getfullargspec(func).args nmb_args_required = len(args_required) nmb_args_given = len(args_given) if nmb_args_given != nmb_args_required: enum_args_given = '' if nmb_args_given: enum_args_given = ( f' ({objecttools.enumeration(args_given)})') enum_args_required = '' if nmb_args_required: enum_args_required = ( f' ({objecttools.enumeration(args_required)})') raise ValueError( f'Function `{funcname}` requires `{nmb_args_required:d}` ' f'positional arguments{enum_args_required}, but ' f'`{nmb_args_given:d}` are given{enum_args_given}.') with _activate_logfile(logfilepath, logstyle, 'info', 'warning'): func(*args_given, **kwargs_given) except BaseException as exc: if logstyle not in LogFileInterface.style2infotype2string: logstyle = 'plain' with _activate_logfile(logfilepath, logstyle, 'exception', 'exception'): arguments = ', '.join(sys.argv) print(f'Invoking hyd.py with arguments `{arguments}` ' f'resulted in the following error:\n{str(exc)}\n\n' f'See the following stack traceback for debugging:\n', file=sys.stderr) traceback.print_tb(sys.exc_info()[2])
Return a single value for a string understood as a positional argument or a |tuple| containing a keyword and its value for a string understood as a keyword argument. |parse_argument| is intended to be used as a helper function for function |execute_scriptfunction| only. See the following examples to see which types of keyword arguments |execute_scriptfunction| covers: >>> from hydpy.exe.commandtools import parse_argument >>> parse_argument('x=3') ('x', '3') >>> parse_argument('"x=3"') '"x=3"' >>> parse_argument("'x=3'") "'x=3'" >>> parse_argument('x="3==3"') ('x', '"3==3"') >>> parse_argument("x='3==3'") ('x', "'3==3'") def parse_argument(string: str) -> Union[str, Tuple[str, str]]: """Return a single value for a string understood as a positional argument or a |tuple| containing a keyword and its value for a string understood as a keyword argument. |parse_argument| is intended to be used as a helper function for function |execute_scriptfunction| only. See the following examples to see which types of keyword arguments |execute_scriptfunction| covers: >>> from hydpy.exe.commandtools import parse_argument >>> parse_argument('x=3') ('x', '3') >>> parse_argument('"x=3"') '"x=3"' >>> parse_argument("'x=3'") "'x=3'" >>> parse_argument('x="3==3"') ('x', '"3==3"') >>> parse_argument("x='3==3'") ('x', "'3==3'") """ idx_equal = string.find('=') if idx_equal == -1: return string idx_quote = idx_equal+1 for quote in ('"', "'"): idx = string.find(quote) if -1 < idx < idx_quote: idx_quote = idx if idx_equal < idx_quote: return string[:idx_equal], string[idx_equal+1:] return string
Print the given string and the current date and time with high precision for logging purposes. >>> from hydpy.exe.commandtools import print_textandtime >>> from hydpy.core.testtools import mock_datetime_now >>> from datetime import datetime >>> with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0, 123456)): ... print_textandtime('something happens') something happens (2000-01-01 12:30:00.123456). def print_textandtime(text: str) -> None: """Print the given string and the current date and time with high precision for logging purposes. >>> from hydpy.exe.commandtools import print_textandtime >>> from hydpy.core.testtools import mock_datetime_now >>> from datetime import datetime >>> with mock_datetime_now(datetime(2000, 1, 1, 12, 30, 0, 123456)): ... print_textandtime('something happens') something happens (2000-01-01 12:30:00.123456). """ timestring = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') print(f'{text} ({timestring}).')
Write the given string as explained in the main documentation on class |LogFileInterface|. def write(self, string: str) -> None: """Write the given string as explained in the main documentation on class |LogFileInterface|.""" self.logfile.write('\n'.join( f'{self._string}{substring}' if substring else '' for substring in string.split('\n')))
Solve the differential equation of HydPy-L. At the moment, HydPy-L only implements a simple numerical solution of its underlying ordinary differential equation. To increase the accuracy (or sometimes even to prevent instability) of this approximation, one can set the value of parameter |MaxDT| to a value smaller than the actual simulation step size. Method |solve_dv_dt_v1| then applies the methods related to the numerical approximation multiple times and aggregates the results. Note that the order of convergence is one only. It is hard to tell how short the internal simulation step needs to be to ensure a certain degree of accuracy. In most cases one hour or very often even one day should be sufficient to gain acceptable results. However, this strongly depends on the given water stage-volume-discharge relationship. Hence it seems advisable to always define a few test waves and apply the llake model with different |MaxDT| values. Afterwards, select a |MaxDT| value lower than one which results in acceptable approximations for all test waves. The computation time of the llake mode per substep is rather small, so always include a safety factor. Of course, an adaptive step size determination would be much more convenient... Required derived parameter: |NmbSubsteps| Used aide sequence: |llake_aides.V| |llake_aides.QA| Updated state sequence: |llake_states.V| Calculated flux sequence: |llake_fluxes.QA| Note that method |solve_dv_dt_v1| calls the versions of `calc_vq`, `interp_qa` and `calc_v_qa` selected by the respective application model. Hence, also their parameter and sequence specifications need to be considered. Basic equation: :math:`\\frac{dV}{dt}= QZ - QA(V)` def solve_dv_dt_v1(self): """Solve the differential equation of HydPy-L. At the moment, HydPy-L only implements a simple numerical solution of its underlying ordinary differential equation. To increase the accuracy (or sometimes even to prevent instability) of this approximation, one can set the value of parameter |MaxDT| to a value smaller than the actual simulation step size. Method |solve_dv_dt_v1| then applies the methods related to the numerical approximation multiple times and aggregates the results. Note that the order of convergence is one only. It is hard to tell how short the internal simulation step needs to be to ensure a certain degree of accuracy. In most cases one hour or very often even one day should be sufficient to gain acceptable results. However, this strongly depends on the given water stage-volume-discharge relationship. Hence it seems advisable to always define a few test waves and apply the llake model with different |MaxDT| values. Afterwards, select a |MaxDT| value lower than one which results in acceptable approximations for all test waves. The computation time of the llake mode per substep is rather small, so always include a safety factor. Of course, an adaptive step size determination would be much more convenient... Required derived parameter: |NmbSubsteps| Used aide sequence: |llake_aides.V| |llake_aides.QA| Updated state sequence: |llake_states.V| Calculated flux sequence: |llake_fluxes.QA| Note that method |solve_dv_dt_v1| calls the versions of `calc_vq`, `interp_qa` and `calc_v_qa` selected by the respective application model. Hence, also their parameter and sequence specifications need to be considered. Basic equation: :math:`\\frac{dV}{dt}= QZ - QA(V)` """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new aid = self.sequences.aides.fastaccess flu.qa = 0. aid.v = old.v for _ in range(der.nmbsubsteps): self.calc_vq() self.interp_qa() self.calc_v_qa() flu.qa += aid.qa flu.qa /= der.nmbsubsteps new.v = aid.v
Calculate the auxiliary term. Required derived parameters: |Seconds| |NmbSubsteps| Required flux sequence: |QZ| Required aide sequence: |llake_aides.V| Calculated aide sequence: |llake_aides.VQ| Basic equation: :math:`VQ = 2 \\cdot V + \\frac{Seconds}{NmbSubsteps} \\cdot QZ` Example: The following example shows that the auxiliary term `vq` does not depend on the (outer) simulation step size but on the (inner) calculation step size defined by parameter `maxdt`: >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') >>> maxdt('6h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> fluxes.qz = 2. >>> aides.v = 1e5 >>> model.calc_vq_v1() >>> aides.vq vq(243200.0) def calc_vq_v1(self): """Calculate the auxiliary term. Required derived parameters: |Seconds| |NmbSubsteps| Required flux sequence: |QZ| Required aide sequence: |llake_aides.V| Calculated aide sequence: |llake_aides.VQ| Basic equation: :math:`VQ = 2 \\cdot V + \\frac{Seconds}{NmbSubsteps} \\cdot QZ` Example: The following example shows that the auxiliary term `vq` does not depend on the (outer) simulation step size but on the (inner) calculation step size defined by parameter `maxdt`: >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') >>> maxdt('6h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> fluxes.qz = 2. >>> aides.v = 1e5 >>> model.calc_vq_v1() >>> aides.vq vq(243200.0) """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess aid = self.sequences.aides.fastaccess aid.vq = 2.*aid.v+der.seconds/der.nmbsubsteps*flu.qz
Calculate the lake outflow based on linear interpolation. Required control parameters: |N| |llake_control.Q| Required derived parameters: |llake_derived.TOY| |llake_derived.VQ| Required aide sequence: |llake_aides.VQ| Calculated aide sequence: |llake_aides.QA| Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01','2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep() Next, for the sake of brevity, define a test function: >>> def test(*vqs): ... for vq in vqs: ... aides.vq(vq) ... model.interp_qa_v1() ... print(repr(aides.vq), repr(aides.qa)) The following three relationships between the auxiliary term `vq` and the tabulated discharge `q` are taken as examples. Each one is valid for one of the first three days in January and is defined via five nodes: >>> n(5) >>> derived.toy.update() >>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.], ... _1_2_6=[0., 1., 2., 2., 3.], ... _1_3_6=[0., 1., 2., 3., 4.]) >>> q(_1_1_6=[0., 0., 0., 0., 0.], ... _1_2_6=[0., 2., 5., 6., 9.], ... _1_3_6=[0., 2., 1., 3., 2.]) In the first example, discharge does not depend on the actual value of the auxiliary term and is always zero: >>> model.idx_sim = pub.timegrids.init['2000.01.01'] >>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.) vq(0.0) qa(0.0) vq(0.75) qa(0.0) vq(1.0) qa(0.0) vq(1.333333) qa(0.0) vq(2.0) qa(0.0) vq(2.333333) qa(0.0) vq(3.0) qa(0.0) vq(3.333333) qa(0.0) The seconds example demonstrates that relationships are allowed to contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and (2,7). Also it demonstrates that when the highest `vq` value is exceeded linear extrapolation based on the two highest (`vq`,`q`) pairs is performed: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] >>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.) vq(0.0) qa(0.0) vq(0.75) qa(1.5) vq(1.0) qa(2.0) vq(1.333333) qa(3.0) vq(2.0) qa(5.0) vq(2.333333) qa(7.0) vq(3.0) qa(9.0) vq(3.333333) qa(10.0) The third example shows that the relationships do not need to be arranged monotonously increasing. Particualarly for the extrapolation range, this could result in negative values of `qa`, which is avoided by setting it to zero in such cases: >>> model.idx_sim = pub.timegrids.init['2000.01.03'] >>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.) vq(0.5) qa(1.0) vq(1.5) qa(1.5) vq(2.5) qa(2.0) vq(3.5) qa(2.5) vq(4.5) qa(1.5) vq(10.0) qa(0.0) def interp_qa_v1(self): """Calculate the lake outflow based on linear interpolation. Required control parameters: |N| |llake_control.Q| Required derived parameters: |llake_derived.TOY| |llake_derived.VQ| Required aide sequence: |llake_aides.VQ| Calculated aide sequence: |llake_aides.QA| Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01','2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep() Next, for the sake of brevity, define a test function: >>> def test(*vqs): ... for vq in vqs: ... aides.vq(vq) ... model.interp_qa_v1() ... print(repr(aides.vq), repr(aides.qa)) The following three relationships between the auxiliary term `vq` and the tabulated discharge `q` are taken as examples. Each one is valid for one of the first three days in January and is defined via five nodes: >>> n(5) >>> derived.toy.update() >>> derived.vq(_1_1_6=[0., 1., 2., 2., 3.], ... _1_2_6=[0., 1., 2., 2., 3.], ... _1_3_6=[0., 1., 2., 3., 4.]) >>> q(_1_1_6=[0., 0., 0., 0., 0.], ... _1_2_6=[0., 2., 5., 6., 9.], ... _1_3_6=[0., 2., 1., 3., 2.]) In the first example, discharge does not depend on the actual value of the auxiliary term and is always zero: >>> model.idx_sim = pub.timegrids.init['2000.01.01'] >>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.) vq(0.0) qa(0.0) vq(0.75) qa(0.0) vq(1.0) qa(0.0) vq(1.333333) qa(0.0) vq(2.0) qa(0.0) vq(2.333333) qa(0.0) vq(3.0) qa(0.0) vq(3.333333) qa(0.0) The seconds example demonstrates that relationships are allowed to contain jumps, which is the case for the (`vq`,`q`) pairs (2,6) and (2,7). Also it demonstrates that when the highest `vq` value is exceeded linear extrapolation based on the two highest (`vq`,`q`) pairs is performed: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] >>> test(0., .75, 1., 4./3., 2., 7./3., 3., 10./3.) vq(0.0) qa(0.0) vq(0.75) qa(1.5) vq(1.0) qa(2.0) vq(1.333333) qa(3.0) vq(2.0) qa(5.0) vq(2.333333) qa(7.0) vq(3.0) qa(9.0) vq(3.333333) qa(10.0) The third example shows that the relationships do not need to be arranged monotonously increasing. Particualarly for the extrapolation range, this could result in negative values of `qa`, which is avoided by setting it to zero in such cases: >>> model.idx_sim = pub.timegrids.init['2000.01.03'] >>> test(.5, 1.5, 2.5, 3.5, 4.5, 10.) vq(0.5) qa(1.0) vq(1.5) qa(1.5) vq(2.5) qa(2.0) vq(3.5) qa(2.5) vq(4.5) qa(1.5) vq(10.0) qa(0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess aid = self.sequences.aides.fastaccess idx = der.toy[self.idx_sim] for jdx in range(1, con.n): if der.vq[idx, jdx] >= aid.vq: break aid.qa = ((aid.vq-der.vq[idx, jdx-1]) * (con.q[idx, jdx]-con.q[idx, jdx-1]) / (der.vq[idx, jdx]-der.vq[idx, jdx-1]) + con.q[idx, jdx-1]) aid.qa = max(aid.qa, 0.)
Update the stored water volume based on the equation of continuity. Note that for too high outflow values, which would result in overdraining the lake, the outflow is trimmed. Required derived parameters: |Seconds| |NmbSubsteps| Required flux sequence: |QZ| Updated aide sequences: |llake_aides.QA| |llake_aides.V| Basic Equation: :math:`\\frac{dV}{dt}= QZ - QA` Examples: Prepare a lake model with an initial storage of 100.000 m³ and an inflow of 2 m³/s and a (potential) outflow of 6 m³/s: >>> from hydpy.models.llake import * >>> parameterstep() >>> simulationstep('12h') >>> maxdt('6h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> aides.v = 1e5 >>> fluxes.qz = 2. >>> aides.qa = 6. Through calling method `calc_v_qa_v1` three times with the same inflow and outflow values, the storage is emptied after the second step and outflow is equal to inflow after the third step: >>> model.calc_v_qa_v1() >>> aides.v v(13600.0) >>> aides.qa qa(6.0) >>> model.new2old() >>> model.calc_v_qa_v1() >>> aides.v v(0.0) >>> aides.qa qa(2.62963) >>> model.new2old() >>> model.calc_v_qa_v1() >>> aides.v v(0.0) >>> aides.qa qa(2.0) Note that the results of method |calc_v_qa_v1| are not based depend on the (outer) simulation step size but on the (inner) calculation step size defined by parameter `maxdt`. def calc_v_qa_v1(self): """Update the stored water volume based on the equation of continuity. Note that for too high outflow values, which would result in overdraining the lake, the outflow is trimmed. Required derived parameters: |Seconds| |NmbSubsteps| Required flux sequence: |QZ| Updated aide sequences: |llake_aides.QA| |llake_aides.V| Basic Equation: :math:`\\frac{dV}{dt}= QZ - QA` Examples: Prepare a lake model with an initial storage of 100.000 m³ and an inflow of 2 m³/s and a (potential) outflow of 6 m³/s: >>> from hydpy.models.llake import * >>> parameterstep() >>> simulationstep('12h') >>> maxdt('6h') >>> derived.seconds.update() >>> derived.nmbsubsteps.update() >>> aides.v = 1e5 >>> fluxes.qz = 2. >>> aides.qa = 6. Through calling method `calc_v_qa_v1` three times with the same inflow and outflow values, the storage is emptied after the second step and outflow is equal to inflow after the third step: >>> model.calc_v_qa_v1() >>> aides.v v(13600.0) >>> aides.qa qa(6.0) >>> model.new2old() >>> model.calc_v_qa_v1() >>> aides.v v(0.0) >>> aides.qa qa(2.62963) >>> model.new2old() >>> model.calc_v_qa_v1() >>> aides.v v(0.0) >>> aides.qa qa(2.0) Note that the results of method |calc_v_qa_v1| are not based depend on the (outer) simulation step size but on the (inner) calculation step size defined by parameter `maxdt`. """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess aid = self.sequences.aides.fastaccess aid.qa = min(aid.qa, flu.qz+der.nmbsubsteps/der.seconds*aid.v) aid.v = max(aid.v+der.seconds/der.nmbsubsteps*(flu.qz-aid.qa), 0.)
Calculate the actual water stage based on linear interpolation. Required control parameters: |N| |llake_control.V| |llake_control.W| Required state sequence: |llake_states.V| Calculated state sequence: |llake_states.W| Examples: Prepare a model object: >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') For the sake of brevity, define a test function: >>> def test(*vs): ... for v in vs: ... states.v.new = v ... model.interp_w_v1() ... print(repr(states.v), repr(states.w)) Define a simple `w`-`v` relationship consisting of three nodes and calculate the water stages for different volumes: >>> n(3) >>> v(0., 2., 4.) >>> w(-1., 1., 2.) Perform the interpolation for a few test points: >>> test(0., .5, 2., 3., 4., 5.) v(0.0) w(-1.0) v(0.5) w(-0.5) v(2.0) w(1.0) v(3.0) w(1.5) v(4.0) w(2.0) v(5.0) w(2.5) The reference water stage of the relationship can be selected arbitrarily. Even negative water stages are returned, as is demonstrated by the first two calculations. For volumes outside the range of the (`v`,`w`) pairs, the outer two highest pairs are used for linear extrapolation. def interp_w_v1(self): """Calculate the actual water stage based on linear interpolation. Required control parameters: |N| |llake_control.V| |llake_control.W| Required state sequence: |llake_states.V| Calculated state sequence: |llake_states.W| Examples: Prepare a model object: >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') For the sake of brevity, define a test function: >>> def test(*vs): ... for v in vs: ... states.v.new = v ... model.interp_w_v1() ... print(repr(states.v), repr(states.w)) Define a simple `w`-`v` relationship consisting of three nodes and calculate the water stages for different volumes: >>> n(3) >>> v(0., 2., 4.) >>> w(-1., 1., 2.) Perform the interpolation for a few test points: >>> test(0., .5, 2., 3., 4., 5.) v(0.0) w(-1.0) v(0.5) w(-0.5) v(2.0) w(1.0) v(3.0) w(1.5) v(4.0) w(2.0) v(5.0) w(2.5) The reference water stage of the relationship can be selected arbitrarily. Even negative water stages are returned, as is demonstrated by the first two calculations. For volumes outside the range of the (`v`,`w`) pairs, the outer two highest pairs are used for linear extrapolation. """ con = self.parameters.control.fastaccess new = self.sequences.states.fastaccess_new for jdx in range(1, con.n): if con.v[jdx] >= new.v: break new.w = ((new.v-con.v[jdx-1]) * (con.w[jdx]-con.w[jdx-1]) / (con.v[jdx]-con.v[jdx-1]) + con.w[jdx-1])
Adjust the water stage drop to the highest value allowed and correct the associated fluxes. Note that method |corr_dw_v1| calls the method `interp_v` of the respective application model. Hence the requirements of the actual `interp_v` need to be considered additionally. Required control parameter: |MaxDW| Required derived parameters: |llake_derived.TOY| |Seconds| Required flux sequence: |QZ| Updated flux sequence: |llake_fluxes.QA| Updated state sequences: |llake_states.W| |llake_states.V| Basic Restriction: :math:`W_{old} - W_{new} \\leq MaxDW` Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> derived.toy.update() >>> derived.seconds.update() Select the first half of the second day of January as the simulation step relevant for the following examples: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] The following tests are based on method |interp_v_v1| for the interpolation of the stored water volume based on the corrected water stage: >>> model.interp_v = model.interp_v_v1 For the sake of simplicity, the underlying `w`-`v` relationship is assumed to be linear: >>> n(2.) >>> w(0., 1.) >>> v(0., 1e6) The maximum drop in water stage for the first half of the second day of January is set to 0.4 m/d. Note that, due to the difference between the parameter step size and the simulation step size, the actual value used for calculation is 0.2 m/12h: >>> maxdw(_1_1_18=.1, ... _1_2_6=.4, ... _1_2_18=.1) >>> maxdw maxdw(toy_1_1_18_0_0=0.1, toy_1_2_6_0_0=0.4, toy_1_2_18_0_0=0.1) >>> from hydpy import round_ >>> round_(maxdw.value[2]) 0.2 Define old and new water stages and volumes in agreement with the given linear relationship: >>> states.w.old = 1. >>> states.v.old = 1e6 >>> states.w.new = .9 >>> states.v.new = 9e5 Also define an inflow and an outflow value. Note the that the latter is set to zero, which is inconsistent with the actual water stage drop defined above, but done for didactic reasons: >>> fluxes.qz = 1. >>> fluxes.qa = 0. Calling the |corr_dw_v1| method does not change the values of either of following sequences, as the actual drop (0.1 m/12h) is smaller than the allowed drop (0.2 m/12h): >>> model.corr_dw_v1() >>> states.w w(0.9) >>> states.v v(900000.0) >>> fluxes.qa qa(0.0) Note that the values given above are not recalculated, which can clearly be seen for the lake outflow, which is still zero. Through setting the new value of the water stage to 0.6 m, the actual drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the water stage is trimmed and the other values are recalculated: >>> states.w.new = .6 >>> model.corr_dw_v1() >>> states.w w(0.8) >>> states.v v(800000.0) >>> fluxes.qa qa(5.62963) Through setting the maximum water stage drop to zero, method |corr_dw_v1| is effectively disabled. Regardless of the actual change in water stage, no trimming or recalculating is performed: >>> maxdw.toy_01_02_06 = 0. >>> states.w.new = .6 >>> model.corr_dw_v1() >>> states.w w(0.6) >>> states.v v(800000.0) >>> fluxes.qa qa(5.62963) def corr_dw_v1(self): """Adjust the water stage drop to the highest value allowed and correct the associated fluxes. Note that method |corr_dw_v1| calls the method `interp_v` of the respective application model. Hence the requirements of the actual `interp_v` need to be considered additionally. Required control parameter: |MaxDW| Required derived parameters: |llake_derived.TOY| |Seconds| Required flux sequence: |QZ| Updated flux sequence: |llake_fluxes.QA| Updated state sequences: |llake_states.W| |llake_states.V| Basic Restriction: :math:`W_{old} - W_{new} \\leq MaxDW` Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> derived.toy.update() >>> derived.seconds.update() Select the first half of the second day of January as the simulation step relevant for the following examples: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] The following tests are based on method |interp_v_v1| for the interpolation of the stored water volume based on the corrected water stage: >>> model.interp_v = model.interp_v_v1 For the sake of simplicity, the underlying `w`-`v` relationship is assumed to be linear: >>> n(2.) >>> w(0., 1.) >>> v(0., 1e6) The maximum drop in water stage for the first half of the second day of January is set to 0.4 m/d. Note that, due to the difference between the parameter step size and the simulation step size, the actual value used for calculation is 0.2 m/12h: >>> maxdw(_1_1_18=.1, ... _1_2_6=.4, ... _1_2_18=.1) >>> maxdw maxdw(toy_1_1_18_0_0=0.1, toy_1_2_6_0_0=0.4, toy_1_2_18_0_0=0.1) >>> from hydpy import round_ >>> round_(maxdw.value[2]) 0.2 Define old and new water stages and volumes in agreement with the given linear relationship: >>> states.w.old = 1. >>> states.v.old = 1e6 >>> states.w.new = .9 >>> states.v.new = 9e5 Also define an inflow and an outflow value. Note the that the latter is set to zero, which is inconsistent with the actual water stage drop defined above, but done for didactic reasons: >>> fluxes.qz = 1. >>> fluxes.qa = 0. Calling the |corr_dw_v1| method does not change the values of either of following sequences, as the actual drop (0.1 m/12h) is smaller than the allowed drop (0.2 m/12h): >>> model.corr_dw_v1() >>> states.w w(0.9) >>> states.v v(900000.0) >>> fluxes.qa qa(0.0) Note that the values given above are not recalculated, which can clearly be seen for the lake outflow, which is still zero. Through setting the new value of the water stage to 0.6 m, the actual drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the water stage is trimmed and the other values are recalculated: >>> states.w.new = .6 >>> model.corr_dw_v1() >>> states.w w(0.8) >>> states.v v(800000.0) >>> fluxes.qa qa(5.62963) Through setting the maximum water stage drop to zero, method |corr_dw_v1| is effectively disabled. Regardless of the actual change in water stage, no trimming or recalculating is performed: >>> maxdw.toy_01_02_06 = 0. >>> states.w.new = .6 >>> model.corr_dw_v1() >>> states.w w(0.6) >>> states.v v(800000.0) >>> fluxes.qa qa(5.62963) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new idx = der.toy[self.idx_sim] if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]): new.w = old.w-con.maxdw[idx] self.interp_v() flu.qa = flu.qz+(old.v-new.v)/der.seconds
Add water to or remove water from the calculated lake outflow. Required control parameter: |Verzw| Required derived parameter: |llake_derived.TOY| Updated flux sequence: |llake_fluxes.QA| Basic Equation: :math:`QA = QA* - Verzw` Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> derived.toy.update() Select the first half of the second day of January as the simulation step relevant for the following examples: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] Assume that, in accordance with previous calculations, the original outflow value is 3 m³/s: >>> fluxes.qa = 3. Prepare the shape of parameter `verzw` (usually, this is done automatically when calling parameter `n`): >>> verzw.shape = (None,) Set the value of the abstraction on the first half of the second day of January to 2 m³/s: >>> verzw(_1_1_18=0., ... _1_2_6=2., ... _1_2_18=0.) In the first example `verzw` is simply subtracted from `qa`: >>> model.modify_qa_v1() >>> fluxes.qa qa(1.0) In the second example `verzw` exceeds `qa`, resulting in a zero outflow value: >>> model.modify_qa_v1() >>> fluxes.qa qa(0.0) The last example demonstrates, that "negative abstractions" are allowed, resulting in an increase in simulated outflow: >>> verzw.toy_1_2_6 = -2. >>> model.modify_qa_v1() >>> fluxes.qa qa(2.0) def modify_qa_v1(self): """Add water to or remove water from the calculated lake outflow. Required control parameter: |Verzw| Required derived parameter: |llake_derived.TOY| Updated flux sequence: |llake_fluxes.QA| Basic Equation: :math:`QA = QA* - Verzw` Examples: In preparation for the following examples, define a short simulation time period with a simulation step size of 12 hours and initialize the required model object: >>> from hydpy import pub >>> pub.timegrids = '2000.01.01', '2000.01.04', '12h' >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> derived.toy.update() Select the first half of the second day of January as the simulation step relevant for the following examples: >>> model.idx_sim = pub.timegrids.init['2000.01.02'] Assume that, in accordance with previous calculations, the original outflow value is 3 m³/s: >>> fluxes.qa = 3. Prepare the shape of parameter `verzw` (usually, this is done automatically when calling parameter `n`): >>> verzw.shape = (None,) Set the value of the abstraction on the first half of the second day of January to 2 m³/s: >>> verzw(_1_1_18=0., ... _1_2_6=2., ... _1_2_18=0.) In the first example `verzw` is simply subtracted from `qa`: >>> model.modify_qa_v1() >>> fluxes.qa qa(1.0) In the second example `verzw` exceeds `qa`, resulting in a zero outflow value: >>> model.modify_qa_v1() >>> fluxes.qa qa(0.0) The last example demonstrates, that "negative abstractions" are allowed, resulting in an increase in simulated outflow: >>> verzw.toy_1_2_6 = -2. >>> model.modify_qa_v1() >>> fluxes.qa qa(2.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess idx = der.toy[self.idx_sim] flu.qa = max(flu.qa-con.verzw[idx], 0.)
Update the outlet link sequence. def pass_q_v1(self): """Update the outlet link sequence.""" flu = self.sequences.fluxes.fastaccess out = self.sequences.outlets.fastaccess out.q[0] += flu.qa
Threshold values of the response functions. def thresholds(self): """Threshold values of the response functions.""" return numpy.array( sorted(self._key2float(key) for key in self._coefs), dtype=float)
Prepare and return two |numpy| arrays based on the given arguments. Note that many functions provided by module |statstools| apply function |prepare_arrays| internally (e.g. |nse|). But you can also apply it manually, as shown in the following examples. Function |prepare_arrays| can extract time series data from |Node| objects. To set up an example for this, we define a initialization time period and prepare a |Node| object: >>> from hydpy import pub, Node, round_, nan >>> pub.timegrids = '01.01.2000', '07.01.2000', '1d' >>> node = Node('test') Next, we assign values the `simulation` and the `observation` sequences (to do so for the `observation` sequence requires a little trick, as its values are normally supposed to be read from a file): >>> node.prepare_simseries() >>> with pub.options.checkseries(False): ... node.sequences.sim.series = 1.0, nan, nan, nan, 2.0, 3.0 ... node.sequences.obs.ramflag = True ... node.sequences.obs.series = 4.0, 5.0, nan, nan, nan, 6.0 Now we can pass the node object to function |prepare_arrays| and get the (unmodified) time series data: >>> from hydpy import prepare_arrays >>> arrays = prepare_arrays(node=node) >>> round_(arrays[0]) 1.0, nan, nan, nan, 2.0, 3.0 >>> round_(arrays[1]) 4.0, 5.0, nan, nan, nan, 6.0 Alternatively, we can pass directly any iterables (e.g. |list| and |tuple| objects) containing the `simulated` and `observed` data: >>> arrays = prepare_arrays(sim=list(node.sequences.sim.series), ... obs=tuple(node.sequences.obs.series)) >>> round_(arrays[0]) 1.0, nan, nan, nan, 2.0, 3.0 >>> round_(arrays[1]) 4.0, 5.0, nan, nan, nan, 6.0 The optional `skip_nan` flag allows to skip all values, which are no numbers. Note that only those pairs of `simulated` and `observed` values are returned which do not contain any `nan`: >>> arrays = prepare_arrays(node=node, skip_nan=True) >>> round_(arrays[0]) 1.0, 3.0 >>> round_(arrays[1]) 4.0, 6.0 The final examples show the error messages returned in case of invalid combinations of input arguments: >>> prepare_arrays() Traceback (most recent call last): ... ValueError: Neither a `Node` object is passed to argument `node` nor \ are arrays passed to arguments `sim` and `obs`. >>> prepare_arrays(sim=node.sequences.sim.series, node=node) Traceback (most recent call last): ... ValueError: Values are passed to both arguments `sim` and `node`, \ which is not allowed. >>> prepare_arrays(obs=node.sequences.obs.series, node=node) Traceback (most recent call last): ... ValueError: Values are passed to both arguments `obs` and `node`, \ which is not allowed. >>> prepare_arrays(sim=node.sequences.sim.series) Traceback (most recent call last): ... ValueError: A value is passed to argument `sim` but \ no value is passed to argument `obs`. >>> prepare_arrays(obs=node.sequences.obs.series) Traceback (most recent call last): ... ValueError: A value is passed to argument `obs` but \ no value is passed to argument `sim`. def prepare_arrays(sim=None, obs=None, node=None, skip_nan=False): """Prepare and return two |numpy| arrays based on the given arguments. Note that many functions provided by module |statstools| apply function |prepare_arrays| internally (e.g. |nse|). But you can also apply it manually, as shown in the following examples. Function |prepare_arrays| can extract time series data from |Node| objects. To set up an example for this, we define a initialization time period and prepare a |Node| object: >>> from hydpy import pub, Node, round_, nan >>> pub.timegrids = '01.01.2000', '07.01.2000', '1d' >>> node = Node('test') Next, we assign values the `simulation` and the `observation` sequences (to do so for the `observation` sequence requires a little trick, as its values are normally supposed to be read from a file): >>> node.prepare_simseries() >>> with pub.options.checkseries(False): ... node.sequences.sim.series = 1.0, nan, nan, nan, 2.0, 3.0 ... node.sequences.obs.ramflag = True ... node.sequences.obs.series = 4.0, 5.0, nan, nan, nan, 6.0 Now we can pass the node object to function |prepare_arrays| and get the (unmodified) time series data: >>> from hydpy import prepare_arrays >>> arrays = prepare_arrays(node=node) >>> round_(arrays[0]) 1.0, nan, nan, nan, 2.0, 3.0 >>> round_(arrays[1]) 4.0, 5.0, nan, nan, nan, 6.0 Alternatively, we can pass directly any iterables (e.g. |list| and |tuple| objects) containing the `simulated` and `observed` data: >>> arrays = prepare_arrays(sim=list(node.sequences.sim.series), ... obs=tuple(node.sequences.obs.series)) >>> round_(arrays[0]) 1.0, nan, nan, nan, 2.0, 3.0 >>> round_(arrays[1]) 4.0, 5.0, nan, nan, nan, 6.0 The optional `skip_nan` flag allows to skip all values, which are no numbers. Note that only those pairs of `simulated` and `observed` values are returned which do not contain any `nan`: >>> arrays = prepare_arrays(node=node, skip_nan=True) >>> round_(arrays[0]) 1.0, 3.0 >>> round_(arrays[1]) 4.0, 6.0 The final examples show the error messages returned in case of invalid combinations of input arguments: >>> prepare_arrays() Traceback (most recent call last): ... ValueError: Neither a `Node` object is passed to argument `node` nor \ are arrays passed to arguments `sim` and `obs`. >>> prepare_arrays(sim=node.sequences.sim.series, node=node) Traceback (most recent call last): ... ValueError: Values are passed to both arguments `sim` and `node`, \ which is not allowed. >>> prepare_arrays(obs=node.sequences.obs.series, node=node) Traceback (most recent call last): ... ValueError: Values are passed to both arguments `obs` and `node`, \ which is not allowed. >>> prepare_arrays(sim=node.sequences.sim.series) Traceback (most recent call last): ... ValueError: A value is passed to argument `sim` but \ no value is passed to argument `obs`. >>> prepare_arrays(obs=node.sequences.obs.series) Traceback (most recent call last): ... ValueError: A value is passed to argument `obs` but \ no value is passed to argument `sim`. """ if node: if sim is not None: raise ValueError( 'Values are passed to both arguments `sim` and `node`, ' 'which is not allowed.') if obs is not None: raise ValueError( 'Values are passed to both arguments `obs` and `node`, ' 'which is not allowed.') sim = node.sequences.sim.series obs = node.sequences.obs.series elif (sim is not None) and (obs is None): raise ValueError( 'A value is passed to argument `sim` ' 'but no value is passed to argument `obs`.') elif (obs is not None) and (sim is None): raise ValueError( 'A value is passed to argument `obs` ' 'but no value is passed to argument `sim`.') elif (sim is None) and (obs is None): raise ValueError( 'Neither a `Node` object is passed to argument `node` nor ' 'are arrays passed to arguments `sim` and `obs`.') sim = numpy.asarray(sim) obs = numpy.asarray(obs) if skip_nan: idxs = ~numpy.isnan(sim) * ~numpy.isnan(obs) sim = sim[idxs] obs = obs[idxs] return sim, obs
Calculate the efficiency criteria after Nash & Sutcliffe. If the simulated values predict the observed values as well as the average observed value (regarding the the mean square error), the NSE value is zero: >>> from hydpy import nse >>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.0 >>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0]) 0.0 For worse and better simulated values the NSE is negative or positive, respectively: >>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]) -3.0 >>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.5 The highest possible value is one: >>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]) 1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |nse|. def nse(sim=None, obs=None, node=None, skip_nan=False): """Calculate the efficiency criteria after Nash & Sutcliffe. If the simulated values predict the observed values as well as the average observed value (regarding the the mean square error), the NSE value is zero: >>> from hydpy import nse >>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.0 >>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0]) 0.0 For worse and better simulated values the NSE is negative or positive, respectively: >>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]) -3.0 >>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]) 0.5 The highest possible value is one: >>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]) 1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |nse|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return 1.-numpy.sum((sim-obs)**2)/numpy.sum((obs-numpy.mean(obs))**2)
Calculate the absolute difference between the means of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import bias_abs >>> round_(bias_abs(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(bias_abs(sim=[5.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])) 1.0 >>> round_(bias_abs(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |bias_abs|. def bias_abs(sim=None, obs=None, node=None, skip_nan=False): """Calculate the absolute difference between the means of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import bias_abs >>> round_(bias_abs(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(bias_abs(sim=[5.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])) 1.0 >>> round_(bias_abs(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |bias_abs|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return numpy.mean(sim-obs)
Calculate the ratio between the standard deviation of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import std_ratio >>> round_(std_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(std_ratio(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(std_ratio(sim=[0.0, 3.0, 6.0], obs=[1.0, 2.0, 3.0])) 2.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |std_ratio|. def std_ratio(sim=None, obs=None, node=None, skip_nan=False): """Calculate the ratio between the standard deviation of the simulated and the observed values. >>> from hydpy import round_ >>> from hydpy import std_ratio >>> round_(std_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])) 0.0 >>> round_(std_ratio(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(std_ratio(sim=[0.0, 3.0, 6.0], obs=[1.0, 2.0, 3.0])) 2.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |std_ratio|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return numpy.std(sim)/numpy.std(obs)-1.
Calculate the product-moment correlation coefficient after Pearson. >>> from hydpy import round_ >>> from hydpy import corr >>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0])) 1.0 >>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])) 0.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |corr|. def corr(sim=None, obs=None, node=None, skip_nan=False): """Calculate the product-moment correlation coefficient after Pearson. >>> from hydpy import round_ >>> from hydpy import corr >>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0])) 1.0 >>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0])) -1.0 >>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])) 0.0 See the documentation on function |prepare_arrays| for some additional instructions for use of function |corr|. """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return numpy.corrcoef(sim, obs)[0, 1]
Calculate the probability densities based on the heteroskedastic skewed exponential power distribution. For convenience, the required parameters of the probability density function as well as the simulated and observed values are stored in a dictonary: >>> import numpy >>> from hydpy import round_ >>> from hydpy import hsepd_pdf >>> general = {'sigma1': 0.2, ... 'sigma2': 0.0, ... 'xi': 1.0, ... 'beta': 0.0, ... 'sim': numpy.arange(10.0, 41.0), ... 'obs': numpy.full(31, 25.0)} The following test function allows the variation of one parameter and prints some and plots all of probability density values corresponding to different simulated values: >>> def test(**kwargs): ... from matplotlib import pyplot ... special = general.copy() ... name, values = list(kwargs.items())[0] ... results = numpy.zeros((len(general['sim']), len(values)+1)) ... results[:, 0] = general['sim'] ... for jdx, value in enumerate(values): ... special[name] = value ... results[:, jdx+1] = hsepd_pdf(**special) ... pyplot.plot(results[:, 0], results[:, jdx+1], ... label='%s=%.1f' % (name, value)) ... pyplot.legend() ... for idx, result in enumerate(results): ... if not (idx % 5): ... round_(result) When varying parameter `beta`, the resulting probabilities correspond to the Laplace distribution (1.0), normal distribution (0.0), and the uniform distribution (-1.0), respectively. Note that we use -0.99 instead of -1.0 for approximating the uniform distribution to prevent from running into numerical problems, which are not solved yet: >>> test(beta=[1.0, 0.0, -0.99]) 10.0, 0.002032, 0.000886, 0.0 15.0, 0.008359, 0.010798, 0.0 20.0, 0.034382, 0.048394, 0.057739 25.0, 0.141421, 0.079788, 0.057739 30.0, 0.034382, 0.048394, 0.057739 35.0, 0.008359, 0.010798, 0.0 40.0, 0.002032, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() When varying parameter `xi`, the resulting density is negatively skewed (0.2), symmetric (1.0), and positively skewed (5.0), respectively: >>> test(xi=[0.2, 1.0, 5.0]) 10.0, 0.0, 0.000886, 0.003175 15.0, 0.0, 0.010798, 0.012957 20.0, 0.092845, 0.048394, 0.036341 25.0, 0.070063, 0.079788, 0.070063 30.0, 0.036341, 0.048394, 0.092845 35.0, 0.012957, 0.010798, 0.0 40.0, 0.003175, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() In the above examples, the actual `sigma` (5.0) is calculated by multiplying `sigma1` (0.2) with the mean simulated value (25.0), internally. This can be done for modelling homoscedastic errors. Instead, `sigma2` is multiplied with the individual simulated values to account for heteroscedastic errors. With increasing values of `sigma2`, the resulting densities are modified as follows: >>> test(sigma2=[0.0, 0.1, 0.2]) 10.0, 0.000886, 0.002921, 0.005737 15.0, 0.010798, 0.018795, 0.022831 20.0, 0.048394, 0.044159, 0.037988 25.0, 0.079788, 0.053192, 0.039894 30.0, 0.048394, 0.04102, 0.032708 35.0, 0.010798, 0.023493, 0.023493 40.0, 0.000886, 0.011053, 0.015771 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() def hsepd_pdf(sigma1, sigma2, xi, beta, sim=None, obs=None, node=None, skip_nan=False): """Calculate the probability densities based on the heteroskedastic skewed exponential power distribution. For convenience, the required parameters of the probability density function as well as the simulated and observed values are stored in a dictonary: >>> import numpy >>> from hydpy import round_ >>> from hydpy import hsepd_pdf >>> general = {'sigma1': 0.2, ... 'sigma2': 0.0, ... 'xi': 1.0, ... 'beta': 0.0, ... 'sim': numpy.arange(10.0, 41.0), ... 'obs': numpy.full(31, 25.0)} The following test function allows the variation of one parameter and prints some and plots all of probability density values corresponding to different simulated values: >>> def test(**kwargs): ... from matplotlib import pyplot ... special = general.copy() ... name, values = list(kwargs.items())[0] ... results = numpy.zeros((len(general['sim']), len(values)+1)) ... results[:, 0] = general['sim'] ... for jdx, value in enumerate(values): ... special[name] = value ... results[:, jdx+1] = hsepd_pdf(**special) ... pyplot.plot(results[:, 0], results[:, jdx+1], ... label='%s=%.1f' % (name, value)) ... pyplot.legend() ... for idx, result in enumerate(results): ... if not (idx % 5): ... round_(result) When varying parameter `beta`, the resulting probabilities correspond to the Laplace distribution (1.0), normal distribution (0.0), and the uniform distribution (-1.0), respectively. Note that we use -0.99 instead of -1.0 for approximating the uniform distribution to prevent from running into numerical problems, which are not solved yet: >>> test(beta=[1.0, 0.0, -0.99]) 10.0, 0.002032, 0.000886, 0.0 15.0, 0.008359, 0.010798, 0.0 20.0, 0.034382, 0.048394, 0.057739 25.0, 0.141421, 0.079788, 0.057739 30.0, 0.034382, 0.048394, 0.057739 35.0, 0.008359, 0.010798, 0.0 40.0, 0.002032, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() When varying parameter `xi`, the resulting density is negatively skewed (0.2), symmetric (1.0), and positively skewed (5.0), respectively: >>> test(xi=[0.2, 1.0, 5.0]) 10.0, 0.0, 0.000886, 0.003175 15.0, 0.0, 0.010798, 0.012957 20.0, 0.092845, 0.048394, 0.036341 25.0, 0.070063, 0.079788, 0.070063 30.0, 0.036341, 0.048394, 0.092845 35.0, 0.012957, 0.010798, 0.0 40.0, 0.003175, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() In the above examples, the actual `sigma` (5.0) is calculated by multiplying `sigma1` (0.2) with the mean simulated value (25.0), internally. This can be done for modelling homoscedastic errors. Instead, `sigma2` is multiplied with the individual simulated values to account for heteroscedastic errors. With increasing values of `sigma2`, the resulting densities are modified as follows: >>> test(sigma2=[0.0, 0.1, 0.2]) 10.0, 0.000886, 0.002921, 0.005737 15.0, 0.010798, 0.018795, 0.022831 20.0, 0.048394, 0.044159, 0.037988 25.0, 0.079788, 0.053192, 0.039894 30.0, 0.048394, 0.04102, 0.032708 35.0, 0.010798, 0.023493, 0.023493 40.0, 0.000886, 0.011053, 0.015771 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) sigmas = _pars_h(sigma1, sigma2, sim) mu_xi, sigma_xi, w_beta, c_beta = _pars_sepd(xi, beta) x, mu = obs, sim a = (x-mu)/sigmas a_xi = numpy.empty(a.shape) idxs = mu_xi+sigma_xi*a < 0. a_xi[idxs] = numpy.absolute(xi*(mu_xi+sigma_xi*a[idxs])) a_xi[~idxs] = numpy.absolute(1./xi*(mu_xi+sigma_xi*a[~idxs])) ps = (2.*sigma_xi/(xi+1./xi)*w_beta * numpy.exp(-c_beta*a_xi**(2./(1.+beta))))/sigmas return ps
Calculate the mean of the logarithmised probability densities of the 'heteroskedastic skewed exponential power distribution. The following examples are taken from the documentation of function |hsepd_pdf|, which is used by function |hsepd_manual|. The first one deals with a heteroscedastic normal distribution: >>> from hydpy import round_ >>> from hydpy import hsepd_manual >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.2, ... xi=1.0, beta=0.0, ... sim=numpy.arange(10.0, 41.0), ... obs=numpy.full(31, 25.0))) -3.682842 The second one is supposed to show to small zero probability density values are set to 1e-200 before calculating their logarithm (which means that the lowest possible value returned by function |hsepd_manual| is approximately -460): >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0, ... xi=1.0, beta=-0.99, ... sim=numpy.arange(10.0, 41.0), ... obs=numpy.full(31, 25.0))) -209.539335 def hsepd_manual(sigma1, sigma2, xi, beta, sim=None, obs=None, node=None, skip_nan=False): """Calculate the mean of the logarithmised probability densities of the 'heteroskedastic skewed exponential power distribution. The following examples are taken from the documentation of function |hsepd_pdf|, which is used by function |hsepd_manual|. The first one deals with a heteroscedastic normal distribution: >>> from hydpy import round_ >>> from hydpy import hsepd_manual >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.2, ... xi=1.0, beta=0.0, ... sim=numpy.arange(10.0, 41.0), ... obs=numpy.full(31, 25.0))) -3.682842 The second one is supposed to show to small zero probability density values are set to 1e-200 before calculating their logarithm (which means that the lowest possible value returned by function |hsepd_manual| is approximately -460): >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0, ... xi=1.0, beta=-0.99, ... sim=numpy.arange(10.0, 41.0), ... obs=numpy.full(31, 25.0))) -209.539335 """ sim, obs = prepare_arrays(sim, obs, node, skip_nan) return _hsepd_manual(sigma1, sigma2, xi, beta, sim, obs)
Calculate the mean of the logarithmised probability densities of the 'heteroskedastic skewed exponential power distribution. Function |hsepd| serves the same purpose as function |hsepd_manual|, but tries to estimate the parameters of the heteroscedastic skewed exponential distribution via an optimization algorithm. This is shown by generating a random sample. 1000 simulated values are scattered around the observed (true) value of 10.0 with a standard deviation of 2.0: >>> import numpy >>> numpy.random.seed(0) >>> sim = numpy.random.normal(10.0, 2.0, 1000) >>> obs = numpy.full(1000, 10.0) First, as a reference, we calculate the "true" value based on function |hsepd_manual| and the correct distribution parameters: >>> from hydpy import round_ >>> from hydpy import hsepd, hsepd_manual >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0, ... xi=1.0, beta=0.0, ... sim=sim, obs=obs)) -2.100093 When using function |hsepd|, the returned value is even a little "better": >>> round_(hsepd(sim=sim, obs=obs)) -2.09983 This is due to the deviation from the random sample to its theoretical distribution. This is reflected by small differences between the estimated values and the theoretical values of `sigma1` (0.2), , `sigma2` (0.0), `xi` (1.0), and `beta` (0.0). The estimated values are returned in the mentioned order through enabling the `return_pars` option: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True) >>> round_(pars, decimals=5) 0.19966, 0.0, 0.96836, 0.0188 There is no guarantee that the optimization numerical optimization algorithm underlying function |hsepd| will always find the parameters resulting in the largest value returned by function |hsepd_manual|. You can increase its robustness (and decrease computation time) by supplying good initial parameter values: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True, ... inits=(0.2, 0.0, 1.0, 0.0)) >>> round_(pars, decimals=5) 0.19966, 0.0, 0.96836, 0.0188 However, the following example shows a case when this strategie results in worse results: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True, ... inits=(0.0, 0.2, 1.0, 0.0)) >>> round_(value) -2.174492 >>> round_(pars) 0.0, 0.213179, 1.705485, 0.505112 def hsepd(sim=None, obs=None, node=None, skip_nan=False, inits=None, return_pars=False, silent=True): """Calculate the mean of the logarithmised probability densities of the 'heteroskedastic skewed exponential power distribution. Function |hsepd| serves the same purpose as function |hsepd_manual|, but tries to estimate the parameters of the heteroscedastic skewed exponential distribution via an optimization algorithm. This is shown by generating a random sample. 1000 simulated values are scattered around the observed (true) value of 10.0 with a standard deviation of 2.0: >>> import numpy >>> numpy.random.seed(0) >>> sim = numpy.random.normal(10.0, 2.0, 1000) >>> obs = numpy.full(1000, 10.0) First, as a reference, we calculate the "true" value based on function |hsepd_manual| and the correct distribution parameters: >>> from hydpy import round_ >>> from hydpy import hsepd, hsepd_manual >>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0, ... xi=1.0, beta=0.0, ... sim=sim, obs=obs)) -2.100093 When using function |hsepd|, the returned value is even a little "better": >>> round_(hsepd(sim=sim, obs=obs)) -2.09983 This is due to the deviation from the random sample to its theoretical distribution. This is reflected by small differences between the estimated values and the theoretical values of `sigma1` (0.2), , `sigma2` (0.0), `xi` (1.0), and `beta` (0.0). The estimated values are returned in the mentioned order through enabling the `return_pars` option: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True) >>> round_(pars, decimals=5) 0.19966, 0.0, 0.96836, 0.0188 There is no guarantee that the optimization numerical optimization algorithm underlying function |hsepd| will always find the parameters resulting in the largest value returned by function |hsepd_manual|. You can increase its robustness (and decrease computation time) by supplying good initial parameter values: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True, ... inits=(0.2, 0.0, 1.0, 0.0)) >>> round_(pars, decimals=5) 0.19966, 0.0, 0.96836, 0.0188 However, the following example shows a case when this strategie results in worse results: >>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True, ... inits=(0.0, 0.2, 1.0, 0.0)) >>> round_(value) -2.174492 >>> round_(pars) 0.0, 0.213179, 1.705485, 0.505112 """ def transform(pars): """Transform the actual optimization problem into a function to be minimized and apply parameter constraints.""" sigma1, sigma2, xi, beta = constrain(*pars) return -_hsepd_manual(sigma1, sigma2, xi, beta, sim, obs) def constrain(sigma1, sigma2, xi, beta): """Apply constrains on the given parameter values.""" sigma1 = numpy.clip(sigma1, 0.0, None) sigma2 = numpy.clip(sigma2, 0.0, None) xi = numpy.clip(xi, 0.1, 10.0) beta = numpy.clip(beta, -0.99, 5.0) return sigma1, sigma2, xi, beta sim, obs = prepare_arrays(sim, obs, node, skip_nan) if not inits: inits = [0.1, 0.2, 3.0, 1.0] values = optimize.fmin(transform, inits, ftol=1e-12, xtol=1e-12, disp=not silent) values = constrain(*values) result = _hsepd_manual(*values, sim=sim, obs=obs) if return_pars: return result, values return result
Return the weighted mean of the given timepoints. With equal given weights, the result is simply the mean of the given time points: >>> from hydpy import calc_mean_time >>> calc_mean_time(timepoints=[3., 7.], ... weights=[2., 2.]) 5.0 With different weights, the resulting mean time is shifted to the larger ones: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[1., 3.]) 6.0 Or, in the most extreme case: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[0., 4.]) 7.0 There will be some checks for input plausibility perfomed, e.g.: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[-2., 2.]) Traceback (most recent call last): ... ValueError: While trying to calculate the weighted mean time, \ the following error occurred: For the following objects, at least \ one value is negative: weights. def calc_mean_time(timepoints, weights): """Return the weighted mean of the given timepoints. With equal given weights, the result is simply the mean of the given time points: >>> from hydpy import calc_mean_time >>> calc_mean_time(timepoints=[3., 7.], ... weights=[2., 2.]) 5.0 With different weights, the resulting mean time is shifted to the larger ones: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[1., 3.]) 6.0 Or, in the most extreme case: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[0., 4.]) 7.0 There will be some checks for input plausibility perfomed, e.g.: >>> calc_mean_time(timepoints=[3., 7.], ... weights=[-2., 2.]) Traceback (most recent call last): ... ValueError: While trying to calculate the weighted mean time, \ the following error occurred: For the following objects, at least \ one value is negative: weights. """ timepoints = numpy.array(timepoints) weights = numpy.array(weights) validtools.test_equal_shape(timepoints=timepoints, weights=weights) validtools.test_non_negative(weights=weights) return numpy.dot(timepoints, weights)/numpy.sum(weights)
Return the weighted deviation of the given timepoints from their mean time. With equal given weights, the is simply the standard deviation of the given time points: >>> from hydpy import calc_mean_time_deviation >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.]) 2.0 One can pass a precalculated or alternate mean time: >>> from hydpy import round_ >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.], ... mean_time=4.)) 2.236068 >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[1., 3.])) 1.732051 Or, in the most extreme case: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[0., 4.]) 0.0 There will be some checks for input plausibility perfomed, e.g.: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[-2., 2.]) Traceback (most recent call last): ... ValueError: While trying to calculate the weighted time deviation \ from mean time, the following error occurred: For the following objects, \ at least one value is negative: weights. def calc_mean_time_deviation(timepoints, weights, mean_time=None): """Return the weighted deviation of the given timepoints from their mean time. With equal given weights, the is simply the standard deviation of the given time points: >>> from hydpy import calc_mean_time_deviation >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.]) 2.0 One can pass a precalculated or alternate mean time: >>> from hydpy import round_ >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.], ... mean_time=4.)) 2.236068 >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[1., 3.])) 1.732051 Or, in the most extreme case: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[0., 4.]) 0.0 There will be some checks for input plausibility perfomed, e.g.: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[-2., 2.]) Traceback (most recent call last): ... ValueError: While trying to calculate the weighted time deviation \ from mean time, the following error occurred: For the following objects, \ at least one value is negative: weights. """ timepoints = numpy.array(timepoints) weights = numpy.array(weights) validtools.test_equal_shape(timepoints=timepoints, weights=weights) validtools.test_non_negative(weights=weights) if mean_time is None: mean_time = calc_mean_time(timepoints, weights) return (numpy.sqrt(numpy.dot(weights, (timepoints-mean_time)**2) / numpy.sum(weights)))
Return a table containing the results of the given evaluation criteria for the given |Node| objects. First, we define two nodes with different simulation and observation data (see function |prepare_arrays| for some explanations): >>> from hydpy import pub, Node, nan >>> pub.timegrids = '01.01.2000', '04.01.2000', '1d' >>> nodes = Node('test1'), Node('test2') >>> for node in nodes: ... node.prepare_simseries() ... node.sequences.sim.series = 1.0, 2.0, 3.0 ... node.sequences.obs.ramflag = True ... node.sequences.obs.series = 4.0, 5.0, 6.0 >>> nodes[0].sequences.sim.series = 1.0, 2.0, 3.0 >>> nodes[0].sequences.obs.series = 4.0, 5.0, 6.0 >>> nodes[1].sequences.sim.series = 1.0, 2.0, 3.0 >>> with pub.options.checkseries(False): ... nodes[1].sequences.obs.series = 3.0, nan, 1.0 Selecting functions |corr| and |bias_abs| as evaluation criteria, function |evaluationtable| returns the following table (which is actually a pandas data frame): >>> from hydpy import evaluationtable, corr, bias_abs >>> evaluationtable(nodes, (corr, bias_abs)) corr bias_abs test1 1.0 -3.0 test2 NaN NaN One can pass alternative names for both the node objects and the criteria functions. Also, `nan` values can be skipped: >>> evaluationtable(nodes, (corr, bias_abs), ... nodenames=('first node', 'second node'), ... critnames=('corrcoef', 'bias'), ... skip_nan=True) corrcoef bias first node 1.0 -3.0 second node -1.0 0.0 The number of assigned node objects and criteria functions must match the number of givern alternative names: >>> evaluationtable(nodes, (corr, bias_abs), ... nodenames=('first node',)) Traceback (most recent call last): ... ValueError: While trying to evaluate the simulation results of some \ node objects, the following error occurred: 2 node objects are given \ which does not match with number of given alternative names beeing 1. >>> evaluationtable(nodes, (corr, bias_abs), ... critnames=('corrcoef',)) Traceback (most recent call last): ... ValueError: While trying to evaluate the simulation results of some \ node objects, the following error occurred: 2 criteria functions are given \ which does not match with number of given alternative names beeing 1. def evaluationtable(nodes, criteria, nodenames=None, critnames=None, skip_nan=False): """Return a table containing the results of the given evaluation criteria for the given |Node| objects. First, we define two nodes with different simulation and observation data (see function |prepare_arrays| for some explanations): >>> from hydpy import pub, Node, nan >>> pub.timegrids = '01.01.2000', '04.01.2000', '1d' >>> nodes = Node('test1'), Node('test2') >>> for node in nodes: ... node.prepare_simseries() ... node.sequences.sim.series = 1.0, 2.0, 3.0 ... node.sequences.obs.ramflag = True ... node.sequences.obs.series = 4.0, 5.0, 6.0 >>> nodes[0].sequences.sim.series = 1.0, 2.0, 3.0 >>> nodes[0].sequences.obs.series = 4.0, 5.0, 6.0 >>> nodes[1].sequences.sim.series = 1.0, 2.0, 3.0 >>> with pub.options.checkseries(False): ... nodes[1].sequences.obs.series = 3.0, nan, 1.0 Selecting functions |corr| and |bias_abs| as evaluation criteria, function |evaluationtable| returns the following table (which is actually a pandas data frame): >>> from hydpy import evaluationtable, corr, bias_abs >>> evaluationtable(nodes, (corr, bias_abs)) corr bias_abs test1 1.0 -3.0 test2 NaN NaN One can pass alternative names for both the node objects and the criteria functions. Also, `nan` values can be skipped: >>> evaluationtable(nodes, (corr, bias_abs), ... nodenames=('first node', 'second node'), ... critnames=('corrcoef', 'bias'), ... skip_nan=True) corrcoef bias first node 1.0 -3.0 second node -1.0 0.0 The number of assigned node objects and criteria functions must match the number of givern alternative names: >>> evaluationtable(nodes, (corr, bias_abs), ... nodenames=('first node',)) Traceback (most recent call last): ... ValueError: While trying to evaluate the simulation results of some \ node objects, the following error occurred: 2 node objects are given \ which does not match with number of given alternative names beeing 1. >>> evaluationtable(nodes, (corr, bias_abs), ... critnames=('corrcoef',)) Traceback (most recent call last): ... ValueError: While trying to evaluate the simulation results of some \ node objects, the following error occurred: 2 criteria functions are given \ which does not match with number of given alternative names beeing 1. """ if nodenames: if len(nodes) != len(nodenames): raise ValueError( '%d node objects are given which does not match with ' 'number of given alternative names beeing %s.' % (len(nodes), len(nodenames))) else: nodenames = [node.name for node in nodes] if critnames: if len(criteria) != len(critnames): raise ValueError( '%d criteria functions are given which does not match ' 'with number of given alternative names beeing %s.' % (len(criteria), len(critnames))) else: critnames = [crit.__name__ for crit in criteria] data = numpy.empty((len(nodes), len(criteria)), dtype=float) for idx, node in enumerate(nodes): sim, obs = prepare_arrays(None, None, node, skip_nan) for jdx, criterion in enumerate(criteria): data[idx, jdx] = criterion(sim, obs) table = pandas.DataFrame( data=data, index=nodenames, columns=critnames) return table
Set all primary parameters at once. def set_primary_parameters(self, **kwargs): """Set all primary parameters at once.""" given = sorted(kwargs.keys()) required = sorted(self._PRIMARY_PARAMETERS) if given == required: for (key, value) in kwargs.items(): setattr(self, key, value) else: raise ValueError( 'When passing primary parameter values as initialization ' 'arguments of the instantaneous unit hydrograph class `%s`, ' 'or when using method `set_primary_parameters, one has to ' 'to define all values at once via keyword arguments. ' 'But instead of the primary parameter names `%s` the ' 'following keywords were given: %s.' % (objecttools.classname(self), ', '.join(required), ', '.join(given)))
True/False flag that indicates wheter the values of all primary parameters are defined or not. def primary_parameters_complete(self): """True/False flag that indicates wheter the values of all primary parameters are defined or not.""" for primpar in self._PRIMARY_PARAMETERS.values(): if primpar.__get__(self) is None: return False return True