code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from PIL import Image
CROP = 1 # Fit smallest side to screen (cuts off parts of image)
FIT = 2 # Fit largest side (leaves black spaces)
FILL = 3 # Adjust scale to fill (may distort)
PAD = 4 # leave some space around image (use fit with different params)
class ImageScale(object):
def __init__(self, path):
self.path = path
def adjust_size(self, dst_x, dst_y, mode=FIT):
"""
given a x and y of dest, determine the ratio and return
an (x,y,w,h) for a output image.
"""
# get image size
image = Image.open(self.path)
width, height = image.size
if mode == FIT:
return adjust_crop(dst_x, dst_y, width, height)
def adjust_crop(dst_w, dst_h, img_w, img_h):
"""
given a x and y of dest, determine the ratio and return
an (x,y,w,h) for a cropped image (note x or y could be neg).
>>> adjust_crop(4,3,5,5)
(0, -0.5, 4.0, 4.0)
>>> adjust_crop(8,6,5,5)
(0, -1.0, 8.0, 8.0)
>>> adjust_crop(4,3,5,2)
(-1.75, 0, 7.5, 3.0)
>>> adjust_crop(8,6,5,2)
(-3.5, 0, 15.0, 6.0)
"""
dst_w = float(dst_w)
dst_h = float(dst_h)
img_w = float(img_w)
img_h = float(img_h)
dst_ratio = float(dst_w)/dst_h
img_ratio = float(img_w)/img_h
if dst_ratio > img_ratio:
scale = dst_w/img_w
x = 0
w = dst_w
h = img_h * scale
y = dst_h/2 - h/2
elif dst_ratio <= img_ratio:
scale = dst_h/img_h
y = 0
w = img_w * scale
h = img_h * scale
x = dst_w/2 - w/2
return x,y,w,h
def adjust_pad(dst_w, dst_h, img_w, img_h, amount=.1):
x, y, w, h = adjust_fit(dst_w * (1-2*amount), dst_h * (1-2*amount), img_w, img_h)
x = x + dst_w * amount
y = y + dst_h * amount
return x,y,w,h
def adjust_fit(dst_w, dst_h, img_w, img_h):
"""
given a x and y of dest, determine the ratio and return
an (x,y,w,h) for a fitted image (note x or y could be neg).
>>> adjust_fit(4,3,5,5)
(0.5, 0, 3.0, 3.0)
>>> adjust_fit(8,6,5,5)
(1.0, 0, 6.0, 6.0)
>>> adjust_fit(4,3,5,2)
(0, 0.69999999999999996, 4.0, 1.6000000000000001)
>>> adjust_fit(8,6,5,2)
(0, 1.3999999999999999, 8.0, 3.2000000000000002)
"""
dst_w = float(dst_w)
dst_h = float(dst_h)
img_w = float(img_w)
img_h = float(img_h)
dst_ratio = float(dst_w)/dst_h
img_ratio = float(img_w)/img_h
if dst_ratio > img_ratio:
# image is narrower, use height
y = 0
h = dst_h
w = h * img_ratio
x = dst_w/2 - w/2
else:
scale = dst_h/img_h
x = 0
w = dst_w
h = w/img_ratio
y = dst_h/2 - h/2
return x,y,w,h
def adjust_fill(dst_w, dst_h, img_w, img_h):
return 0,0,dst_w,dst_h
if __name__ == "__main__":
import doctest
doctest.testmod() | /rst2odp-0.3.1.tar.gz/rst2odp-0.3.1/odplib/imagescale.py | 0.807233 | 0.456046 | imagescale.py | pypi |
__author__ = "Patrick Fournier"
__version__ = "0.1.1"
__license__ = "MIT License"
import re
import docutils
import docutils.nodes
import docutils.parsers
import docutils.parsers.rst
import rst2pdf
import rst2pdf.genelements
class SpreadSheet:
"""
Simple spreadsheet from
http://code.activestate.com/recipes/355045-spreadsheet/
"""
_cells = {}
tools = {}
def __init__(self):
SpreadSheet.tools['sum'] = self.sum
def __setitem__(self, key, formula):
self._cells[key] = formula
def getformula(self, key):
return self._cells[key]
def __getitem__(self, key ):
return eval(self._cells[key], SpreadSheet.tools, self)
@staticmethod
def col_coords(col):
# Compute alphabetical representation of col.
s = ''
while col >= 0:
digit = col % 26
s = chr(ord('a') + digit) + s
col = col / 26 - 1
return s
@staticmethod
def row_coords(row):
return str(row+1)
@staticmethod
def coords(col, row):
"""
Utility function to return spreadsheet index (1-based) from
row and column index (zero-based).
"""
return SpreadSheet.col_coords(col) + SpreadSheet.row_coords(row)
@staticmethod
def inv_coords(c):
"""
Utility function to return row and column index (0-based) from
spreadsheet index (1-based).
"""
r = re.search('([a-z]+)([0-9]+)', c);
col_str = r.group(1)
row = int(r.group(2)) - 1
col = 0
m = 1
while col_str != '':
col = col + (ord(col_str[-1]) - ord('a') + 1) * m
col_str = col_str[:-1]
m = m * 26
col = col - 1
return (col, row)
def sum(self, cells):
"""
Return the sum of the cells in the range given by *cells*.
Example:
Spreadsheet.sum('a1:c5')
Range limits are included in sum.
"""
(first, last) = cells.split(':')
(c1, l1) = SpreadSheet.inv_coords(first)
(c2, l2) = SpreadSheet.inv_coords(last)
s = 0
for c in xrange(c1, c2+1):
for l in xrange(l1, l2+1):
ax = SpreadSheet.coords(c, l)
s += self[ax]
return s;
class SpreadsheetNode(docutils.nodes.General, docutils.nodes.Element):
pass
class SpreadsheetDirective(docutils.parsers.rst.Directive):
"""Directive to insert spreadsheet markup."""
required_arguments = 0
optional_arguments = 0
option_spec = {}
has_content = True
def run(self):
self.assert_has_content()
node = SpreadsheetNode()
self.state.nested_parse(self.content, self.content_offset, node)
self.resolve(node)
return [node]
def resolve(self, ssn):
"""
Resolve formulae in SpreadsheetNode ssn, replacing them by a
computed value. The SpreadsheetNode must contains a table node
as its first child.
Formulae are piece of text enclosed in ={...}. Oly one formula
per cell is supported.
A simple formula is simply a number, like this:
={4}
Some more complex formulae:
={a1 * a2}
={b# * f#}
={@1 + @2}
={sum("a1:a20")}
Special characters @ and # are replaced by current column and row,
respectively.
A cell has value 0 (zero) unless it contains a formula. In
this case, the cell takes the value of the formula's result.
"""
# Find table body.
# ssn[0] is the table node
tgroup_idx = ssn[0].first_child_matching_class(docutils.nodes.tgroup)
tbody_idx = ssn[0][tgroup_idx].first_child_matching_class(docutils.nodes.tbody)
body = ssn[0][tgroup_idx][tbody_idx]
s = SpreadSheet()
# Copy values and formulae from table to spreadsheet.
for row in xrange(len(body)):
offset = 0
if type(body[row]) == docutils.nodes.row:
for col in xrange(len(body[row])):
if type(body[row][col]) == docutils.nodes.entry:
ax = s.coords(col + offset, row)
text = self.parse_entry(body[row][col], '0')
text = text.replace('@', s.col_coords(col + offset))
text = text.replace('#', s.row_coords(row))
s[ax] = text
# Increment column address if cell spans more
# than one column.
if body[row][col].hasattr('morecols'):
offset += body[row][col].get('morecols')
# Copy values from spreadsheet to table.
for row in xrange(len(body)):
offset = 0
if type(body[row]) == docutils.nodes.row:
for col in xrange(len(body[row])):
if type(body[row][col]) == docutils.nodes.entry:
ax = s.coords(col + offset, row)
self.replace_value(ssn[0][tgroup_idx][tbody_idx][row][col], s[ax])
# Increment column address if cell spans more
# than one column.
if body[row][col].hasattr('morecols'):
offset += body[row][col].get('morecols')
def parse_entry(self, entry, default_text):
"""
Extract formula from table cell *entry*.
If the cell does not contain any formula, return
*default_text*.
"""
for i in xrange(len(entry)):
if type(entry[i]) == docutils.nodes.Text:
r = re.search('={(.*)}', entry[i].astext())
if r:
default_text = r.group(1)
else:
default_text = self.parse_entry(entry[i], default_text)
return default_text
def replace_value(self, entry, value):
"""Replace formulae with their value."""
for i in xrange(len(entry)):
if type(entry[i]) == docutils.nodes.Text:
(new_text, subs_count) = re.subn('={.*}', str(value), entry[i].astext())
if subs_count > 0:
new_node = docutils.nodes.Text(new_text)
entry.replace(entry[i], new_node)
else:
self.replace_value(entry[i], value)
class SpreadsheetHandler(rst2pdf.genelements.NodeHandler, SpreadsheetNode):
pass
docutils.parsers.rst.directives.register_directive("simplespreadsheet", SpreadsheetDirective) | /rst_simplespreadsheet-0.1.1.tar.gz/rst_simplespreadsheet-0.1.1/rst_simplespreadsheet.py | 0.686895 | 0.370795 | rst_simplespreadsheet.py | pypi |
from argparse import ArgumentParser, ArgumentTypeError, Namespace
from importlib.metadata import version
from pathlib import Path
from rstats_logreader.reader import RStatsParser
def parse_args(arg_list):
"""
Read arguments from stdin while validating and performing any necessary conversions
:returns: (Namespace) Tool arguments
"""
args = None
def norm_resolution(inp):
resolutions = {"d": "daily", "w": "weekly", "m": "monthly"}
if inp:
if any(res not in resolutions for res in inp):
raise ArgumentTypeError("Invalid resolution for logs")
return Namespace(**{v: k in inp for (k, v) in resolutions.items()})
def norm_day(day):
days = {"Mon": 0, "Tue": 1, "Wed": 2, "Thu": 3, "Fri": 4, "Sat": 5, "Sun": 6}
if day:
return days[day]
main_parser = ArgumentParser(
prog="rstats-reader",
description="Displays statistics in RStats logfiles, with optional conversion",
epilog="Released under Prosperity 3.0.0, (C) 2016, 2019-20, 2022 Jeremy Brown",
)
main_parser.add_argument(
"path",
type=Path,
help="gzipped RStats logfile",
)
main_parser.add_argument(
"--print",
type=norm_resolution,
dest="print_freq",
metavar="{dwm}",
help="Print daily, weekly or monthly statistics to the console",
)
main_parser.add_argument(
"-w",
"--week-start",
type=norm_day,
default="Mon",
metavar="{Mon - Sun}",
choices=range(7),
help="Day of the week statistics should reset",
)
main_parser.add_argument(
"-m",
"--month-start",
type=int,
default=1,
choices=range(1, 32),
metavar="{1 - 31}",
help="Day of the month statistics should reset",
)
main_parser.add_argument(
"-u",
"--units",
default="MiB",
metavar="{B - TiB}",
choices=["B", "KiB", "MiB", "GiB", "TiB"],
help="Units statistics will be displayed in",
)
main_parser.add_argument(
"--version",
action="version",
version=f"%(prog)s {version('rstats-logreader')}",
)
write_group = main_parser.add_argument_group("write")
write_group.add_argument(
"--write",
type=norm_resolution,
dest="write_freq",
metavar="{dwm}",
help="Write daily, weekly or monthly statistics to a file",
)
write_group.add_argument(
"-o",
"--outfile",
type=Path,
metavar="outfile.dat",
help="File to write statistics to",
)
write_group.add_argument(
"-f",
"--format",
default="csv",
choices=["csv", "json"],
help="Format to write statistics in",
)
args = main_parser.parse_args(arg_list)
if getattr(args, "write_freq", False) and args.outfile is None:
raise ArgumentTypeError("Missing output filename")
return args
def main(args=None):
"""
Tool entry point
"""
args = parse_args(args)
parser = RStatsParser(args.week_start, args.month_start, args.units, args.path)
if args.print_freq is not None:
for line in parser.get_console_stats(**vars(args.print_freq)):
print(line)
if args.write_freq is not None:
parser.write_stats(args.outfile, args.format, **vars(args.write_freq)) | /rstats_logreader-3.1.2-py3-none-any.whl/rstats_logreader/cli.py | 0.733165 | 0.399402 | cli.py | pypi |
import pathlib
import typing as t
from . import _compat as _t
SourceFileOrString = t.Union[pathlib.Path, _t.Literal["<string>"], _t.Literal["<stdin>"]]
"""Path to source file or if it is a string then '<string>' or '<stdin>'."""
class LintError(_t.TypedDict):
"""Dict with information about an linting error."""
source_origin: SourceFileOrString
line_number: int
message: str
YieldedLintError = t.Generator[LintError, None, None]
"""Yielded version of type :py:class:`LintError`."""
class IgnoreDict(_t.TypedDict):
"""Dict with ignore information."""
# NOTE: Pattern type-arg errors pydanic: https://github.com/samuelcolvin/pydantic/issues/2636
messages: t.Optional[t.Pattern] # type: ignore[type-arg]
languages: t.List[str]
directives: t.List[str]
roles: t.List[str]
substitutions: t.List[str]
def construct_ignore_dict(
# NOTE: Pattern type-arg errors pydanic: https://github.com/samuelcolvin/pydantic/issues/2636
messages: t.Optional[t.Pattern] = None, # type: ignore[type-arg]
languages: t.Optional[t.List[str]] = None,
directives: t.Optional[t.List[str]] = None,
roles: t.Optional[t.List[str]] = None,
substitutions: t.Optional[t.List[str]] = None,
) -> IgnoreDict:
"""Create an :py:class:`IgnoreDict` with passed values or defaults.
:param messages: Value for :py:attr:`IgnoreDict.messages`;
:py:obj:`None` results in an empty list; defaults to :py:obj:`None`
:param directives: Value for :py:attr:`IgnoreDict.directives`;
:py:obj:`None` results in an empty list; defaults to :py:obj:`None`
:param roles: Value for :py:attr:`IgnoreDict.roles`;
:py:obj:`None` results in an empty list; defaults to :py:obj:`None`
:param substitutions: Value for :py:attr:`IgnoreDict.substitutions`;
:py:obj:`None` results in an empty list; defaults to :py:obj:`None`
:return: :py:class:`IgnoreDict` with passed values or defaults
"""
return IgnoreDict(
messages=messages,
languages=languages if languages is not None else [],
directives=directives if directives is not None else [],
roles=roles if roles is not None else [],
substitutions=substitutions if substitutions is not None else [],
)
CheckerRunFunction = t.Callable[..., YieldedLintError]
"""Function to run checks.
Returned by :py:meth:`rstcheck_core.checker.CodeBlockChecker.create_checker`.
"""
class InlineConfig(_t.TypedDict):
"""Dict with a config key and config value comming from a inline config comment."""
key: str
value: str
class InlineFlowControl(_t.TypedDict):
"""Dict with a flow control value and line number comming from a inline config comment."""
value: str
line_number: int | /rstcheck_core-1.0.3-py3-none-any.whl/rstcheck_core/types.py | 0.816113 | 0.356867 | types.py | pypi |
import importlib
import logging
import typing as t
import docutils.nodes
import docutils.parsers.rst.directives
import docutils.parsers.rst.roles
import docutils.writers
from . import _extras
logger = logging.getLogger(__name__)
class IgnoredDirective(docutils.parsers.rst.Directive): # pragma: no cover
"""Stub for unknown directives."""
has_content = True
def run(self) -> t.List: # type: ignore[type-arg]
"""Do nothing."""
return []
def ignore_role(
name: str,
rawtext: str,
text: str,
lineno: int,
inliner: docutils.parsers.rst.states.Inliner,
options: t.Optional[t.Dict[str, t.Any]] = None,
content: t.Optional[t.List[str]] = None,
) -> t.Tuple[t.List, t.List]: # type: ignore[type-arg] # pragma: no cover
"""Stub for unknown roles."""
# pylint: disable=unused-argument,too-many-arguments
return ([], [])
def clean_docutils_directives_and_roles_cache() -> None: # pragma: no cover
"""Clean docutils' directives and roles cache by reloading their modules.
Reloads:
- :py:mod:`docutils.parsers.rst.directives`
- :py:mod:`docutils.parsers.rst.roles`
"""
logger.info("Reload module docutils.parsers.rst.directives/roles")
importlib.reload(docutils.parsers.rst.directives)
importlib.reload(docutils.parsers.rst.roles)
def ignore_directives_and_roles(directives: t.List[str], roles: t.List[str]) -> None:
"""Ignore directives and roles in docutils.
:param directives: Directives to ignore
:param roles: Roles to ignore
"""
for directive in directives:
docutils.parsers.rst.directives.register_directive(directive, IgnoredDirective)
for role in roles:
docutils.parsers.rst.roles.register_local_role(role, ignore_role)
class CodeBlockDirective(docutils.parsers.rst.Directive):
"""Code block directive."""
has_content = True
optional_arguments = 1
def run(self) -> t.List[docutils.nodes.literal_block]:
"""Run directive.
:return: Literal block
"""
try:
language = self.arguments[0]
except IndexError:
language = ""
code = "\n".join(self.content)
literal = docutils.nodes.literal_block(code, code)
literal["classes"].append("code-block")
literal["language"] = language
return [literal]
def register_code_directive(
*,
ignore_code_directive: bool = False,
ignore_codeblock_directive: bool = False,
ignore_sourcecode_directive: bool = False,
) -> None:
"""Optionally register code directives.
:param ignore_code_directive: If "code" directive should be ignored,
so that the code block will not be checked; defaults to :py:obj:`False`
:param ignore_codeblock_directive: If "code-block" directive should be ignored,
so that the code block will not be checked; defaults to :py:obj:`False`
:param ignore_sourcecode_directive: If "sourcecode" directive should be ignored,
so that the code block will not be checked; defaults to :py:obj:`False`
"""
if not _extras.SPHINX_INSTALLED:
if ignore_code_directive is False:
logger.debug("Register custom directive for 'code'.")
docutils.parsers.rst.directives.register_directive("code", CodeBlockDirective)
# NOTE: docutils maps `code-block` and `sourcecode` to `code`
if ignore_codeblock_directive is False:
logger.debug("Register custom directive for 'code-block'.")
docutils.parsers.rst.directives.register_directive("code-block", CodeBlockDirective)
if ignore_sourcecode_directive is False:
logger.debug("Register custom directive for 'sourcecode'.")
docutils.parsers.rst.directives.register_directive("sourcecode", CodeBlockDirective) | /rstcheck_core-1.0.3-py3-none-any.whl/rstcheck_core/_docutils.py | 0.621771 | 0.285917 | _docutils.py | pypi |
import importlib
import logging
import typing as t
from . import _compat as _t
logger = logging.getLogger(__name__)
ExtraDependencies = _t.Literal["sphinx", "tomli"]
"""List of all dependencies installable through extras."""
class DependencyInfos(_t.TypedDict):
"""Information about a dependency."""
min_version: t.Tuple[int, ...]
extra: str
ExtraDependenciesInfos: t.Dict[ExtraDependencies, DependencyInfos] = {
"sphinx": DependencyInfos(min_version=(2, 0), extra="sphinx"),
"tomli": DependencyInfos(min_version=(2, 0), extra="toml"),
}
"""Dependency map with their min. supported version and extra by which they can be installed."""
def is_installed_with_supported_version(package: ExtraDependencies) -> bool:
"""Check if the package is installed and has the minimum required version.
:param package: Name of packge to check
:return: Bool if package is installed with supported version
"""
logger.debug(f"Check if package is installed with supported version: '{package}'.")
try:
importlib.import_module(package)
except ImportError:
return False
version: str = _t.version(package)
version_tuple = tuple(int(v) for v in version.split(".")[:3])
return version_tuple >= ExtraDependenciesInfos[package]["min_version"]
SPHINX_INSTALLED = is_installed_with_supported_version("sphinx")
TOMLI_INSTALLED = is_installed_with_supported_version("tomli")
ExtraDependenciesInstalled: t.Dict[ExtraDependencies, bool] = {
"sphinx": SPHINX_INSTALLED,
"tomli": TOMLI_INSTALLED,
}
def install_guard(package: ExtraDependencies) -> None:
"""Guard code that needs the ``package`` installed and throw :py:exc:`ModuleNotFoundError`.
See example in module docstring.
:param package: Name of packge to check
:raises ModuleNotFoundError: When the package is not installed.
"""
if ExtraDependenciesInstalled[package] is True:
return
extra = ExtraDependenciesInfos[package]
raise ModuleNotFoundError(
f"No supported version of {package} installed. "
f"Install rstcheck with {extra} extra (rstcheck[{extra}]) or "
f"install a supported version of {package} yourself."
)
def install_guard_tomli(tomllib_imported: bool) -> None:
"""Specific version of :py:func:`install_guard` for ``tomli``.
:param tomllib_imported: If tomllib is imported
:raises ModuleNotFoundError: When ``tomli`` is not installed.
"""
if tomllib_imported or ExtraDependenciesInstalled["tomli"] is True:
return
extra = ExtraDependenciesInfos["tomli"]
raise ModuleNotFoundError(
"tomllib could not be imported and no supported version of tomli installed. "
f"Install rstcheck with {extra} extra (rstcheck[{extra}]) or "
f"install a supported version of tomli yourself."
) | /rstcheck_core-1.0.3-py3-none-any.whl/rstcheck_core/_extras.py | 0.809351 | 0.180612 | _extras.py | pypi |
import configparser
import contextlib
import enum
import logging
import pathlib
import re
import typing as t
import pydantic
from . import _extras
tomllib_imported = False
try:
import tomllib # type: ignore[import]
tomllib_imported = True
except ModuleNotFoundError:
if _extras.TOMLI_INSTALLED: # pragma: no cover
import tomli as tomllib # type: ignore[no-redef]
logger = logging.getLogger(__name__)
CONFIG_FILES = [".rstcheck.cfg", "setup.cfg"]
"""Supported default config files."""
if _extras.TOMLI_INSTALLED: # pragma: no cover
CONFIG_FILES = [".rstcheck.cfg", "pyproject.toml", "setup.cfg"]
class ReportLevel(enum.Enum):
"""Report levels supported by docutils."""
INFO = 1
WARNING = 2
ERROR = 3
SEVERE = 4
NONE = 5
ReportLevelMap = {
"info": 1,
"warning": 2,
"error": 3,
"severe": 4,
"none": 5,
}
"""Map docutils report levels in text form to numbers."""
DEFAULT_REPORT_LEVEL = ReportLevel.INFO
"""Default report level."""
def _split_str_validator(value: t.Any) -> t.Optional[t.List[str]]: # noqa: ANN401
"""Validate and parse strings and string-lists.
Comma separated strings are split into a list.
:param value: Value to validate
:raises ValueError: If not a :py:class:`str` or :py:class:`list` of :py:class:`str`
:return: List of strings
"""
if value is None:
return None
if isinstance(value, str):
return [v.strip() for v in value.split(",") if v.strip()]
if isinstance(value, list) and all(isinstance(v, str) for v in value):
return [v.strip() for v in value if v.strip()]
raise ValueError("Not a string or list of strings")
class RstcheckConfigFile(pydantic.BaseModel): # pylint: disable=no-member
"""Rstcheck config file.
:raises ValueError: If setting has incorrect value or type
:raises pydantic.error_wrappers.ValidationError: If setting is not parsable into correct type
"""
report_level: t.Optional[ReportLevel]
ignore_directives: t.Optional[t.List[str]]
ignore_roles: t.Optional[t.List[str]]
ignore_substitutions: t.Optional[t.List[str]]
ignore_languages: t.Optional[t.List[str]]
# NOTE: Pattern type-arg errors pydanic: https://github.com/samuelcolvin/pydantic/issues/2636
ignore_messages: t.Optional[t.Pattern] # type: ignore[type-arg]
@pydantic.validator("report_level", pre=True)
@classmethod
def valid_report_level(cls, value: t.Any) -> t.Optional[ReportLevel]: # noqa: ANN401
"""Validate the report_level setting.
:param value: Value to validate
:raises ValueError: If ``value`` is not a valid docutils report level
:return: Instance of :py:class:`ReportLevel` or None if emptry string.
"""
if value is None:
return None
if isinstance(value, ReportLevel):
return value
if value == "":
return DEFAULT_REPORT_LEVEL
if isinstance(value, bool):
raise ValueError("Invalid report level")
if isinstance(value, str):
if value.casefold() in set(ReportLevelMap):
return ReportLevel(ReportLevelMap[value.casefold()])
with contextlib.suppress(ValueError):
value = int(value)
if isinstance(value, int) and 1 <= value <= 5:
return ReportLevel(value)
raise ValueError("Invalid report level")
@pydantic.validator(
"ignore_directives", "ignore_roles", "ignore_substitutions", "ignore_languages", pre=True
)
@classmethod
def split_str(cls, value: t.Any) -> t.Optional[t.List[str]]: # noqa: ANN401
"""Validate and parse the following ignore_* settings.
- ignore_directives
- ignore_roles
- ignore_substitutions
- ignore_languages
Comma separated strings are split into a list.
:param value: Value to validate
:raises ValueError: If not a :py:class:`str` or :py:class:`list` of :py:class:`str`
:return: List of things to ignore in the respective category
"""
return _split_str_validator(value)
@pydantic.validator("ignore_messages", pre=True)
@classmethod
def join_regex_str(
cls, value: t.Any # noqa: ANN401
) -> t.Optional[t.Union[str, t.Pattern[str]]]:
"""Validate and concatenate the ignore_messages setting to a RegEx string.
If a list ist given, the entries are concatenated with "|" to create an or RegEx.
:param value: Value to validate
:raises ValueError: If not a :py:class:`str` or :py:class:`list` of :py:class:`str`
:return: A RegEx string with messages to ignore or :py:class:`typing.Pattern` if it is one
already
"""
if value is None:
return None
if isinstance(value, re.Pattern):
return value
if isinstance(value, list) and all(isinstance(v, str) for v in value):
return r"|".join(value)
if isinstance(value, str):
return value
raise ValueError("Not a string or list of strings")
class RstcheckConfig(RstcheckConfigFile): # pylint: disable=too-few-public-methods
"""Rstcheck config.
:raises ValueError: If setting has incorrect value or type
:raises pydantic.error_wrappers.ValidationError: If setting is not parsable into correct type
"""
config_path: t.Optional[pathlib.Path]
recursive: t.Optional[bool]
warn_unknown_settings: t.Optional[bool]
class _RstcheckConfigINIFile(
pydantic.BaseModel # pylint: disable=no-member
): # pylint: disable=too-few-public-methods
"""Type for [rstcheck] section in INI file.
The types apply to the file's data before the parsing by :py:class:`RstcheckConfig` is done.
:raises pydantic.error_wrappers.ValidationError: If setting is not parsable into correct type
"""
report_level: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
ignore_directives: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
ignore_roles: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
ignore_substitutions: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
ignore_languages: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
ignore_messages: pydantic.NoneStr = pydantic.Field(None) # pylint: disable=no-member
def _load_config_from_ini_file(
ini_file: pathlib.Path,
*,
log_missing_section_as_warning: bool = True,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Load, parse and validate rstcheck config from a ini file.
:param ini_file: INI file to load config from
:param log_missing_section_as_warning: If a missing [tool.rstcheck] section should be logged at
WARNING (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level;
defaults to :py:obj:`True`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:raises FileNotFoundError: If the file is not found
:return: instance of :py:class:`RstcheckConfigFile` or :py:class:`None` on missing config
section
or ``NONE`` is passed as the config path.
"""
logger.debug(f"Try loading config from INI file: '{ini_file}'")
if ini_file.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
resolved_file = ini_file.resolve()
if not resolved_file.is_file():
raise FileNotFoundError(f"{resolved_file}")
parser = configparser.ConfigParser()
parser.read(resolved_file)
if not parser.has_section("rstcheck"):
if log_missing_section_as_warning:
logger.warning(f"Config file has no [rstcheck] section: '{ini_file}'.")
return None
logger.info(f"Config file has no [rstcheck] section: '{ini_file}'.")
return None
config_values_raw = dict(parser.items("rstcheck"))
if warn_unknown_settings:
known_settings = _RstcheckConfigINIFile().dict().keys()
unknown = [s for s in config_values_raw.keys() if s not in known_settings]
if unknown:
logger.warning(f"Unknown setting(s) {unknown} found in file: '{ini_file}'.")
config_values_checked = _RstcheckConfigINIFile(**config_values_raw)
config_values_parsed = RstcheckConfigFile(**config_values_checked.dict())
return config_values_parsed
class _RstcheckConfigTOMLFile(
pydantic.BaseModel # pylint: disable=no-member,
): # pylint: disable=too-few-public-methods
"""Type for [tool.rstcheck] section in TOML file.
The types apply to the file's data before the parsing by :py:class:`RstcheckConfig` is done.
:raises pydantic.error_wrappers.ValidationError: If setting is not parsable into correct type
"""
report_level: t.Optional[str] = pydantic.Field(None)
ignore_directives: t.Optional[t.List[str]] = pydantic.Field(None)
ignore_roles: t.Optional[t.List[str]] = pydantic.Field(None)
ignore_substitutions: t.Optional[t.List[str]] = pydantic.Field(None)
ignore_languages: t.Optional[t.List[str]] = pydantic.Field(None)
ignore_messages: t.Optional[t.Union[str, t.List[str]]] = pydantic.Field(None)
def _load_config_from_toml_file(
toml_file: pathlib.Path,
*,
log_missing_section_as_warning: bool = True,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Load, parse and validate rstcheck config from a TOML file.
.. warning::
Needs tomli installed for python versions before 3.11!
Use toml extra.
:param toml_file: TOML file to load config from
:param log_missing_section_as_warning: If a missing [tool.rstcheck] section should be logged at
WARNING (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level;
defaults to :py:obj:`True`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:raises ValueError: If the file is not a TOML file
:raises FileNotFoundError: If the file is not found
:return: instance of :py:class:`RstcheckConfigFile` or :py:obj:`None` on missing config section
or ``NONE`` is passed as the config path.
"""
_extras.install_guard_tomli(tomllib_imported)
logger.debug(f"Try loading config from TOML file: '{toml_file}'.")
if toml_file.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
resolved_file = toml_file.resolve()
if not resolved_file.is_file():
logging.error(f"Config file is not a file: '{toml_file}'.")
raise FileNotFoundError(f"{resolved_file}")
if resolved_file.suffix.casefold() != ".toml":
logging.error(f"Config file is not a TOML file: '{toml_file}'.")
raise ValueError("File is not a TOML file")
with open(resolved_file, "rb") as toml_file_handle:
toml_dict = tomllib.load(toml_file_handle)
optional_rstcheck_section = t.Optional[t.Dict[str, t.Any]]
rstcheck_section: optional_rstcheck_section = toml_dict.get("tool", {}).get("rstcheck")
if rstcheck_section is None:
if log_missing_section_as_warning:
logger.warning(f"Config file has no [tool.rstcheck] section: '{toml_file}'.")
return None
logger.info(f"Config file has no [tool.rstcheck] section: '{toml_file}'.")
return None
if warn_unknown_settings:
known_settings = _RstcheckConfigTOMLFile().dict().keys()
unknown = [s for s in rstcheck_section.keys() if s not in known_settings]
if unknown:
logger.warning(f"Unknown setting(s) {unknown} found in file: '{toml_file}'.")
config_values_checked = _RstcheckConfigTOMLFile(**rstcheck_section)
config_values_parsed = RstcheckConfigFile(**config_values_checked.dict())
return config_values_parsed
def load_config_file(
file_path: pathlib.Path,
*,
log_missing_section_as_warning: bool = True,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Load, parse and validate rstcheck config from a file.
.. caution::
If a TOML file is passed this function need tomli installed for python versions before 3.11!
Use toml extra or install manually.
:param file_path: File to load config from
:param log_missing_section_as_warning: If a missing config section should be logged at
WARNING (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level;
defaults to :py:obj:`True`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:raises FileNotFoundError: If the file is not found
:return: instance of :py:class:`RstcheckConfigFile` or :py:obj:`None` on missing config section
or ``NONE`` is passed as the config path.
"""
logger.debug("Try loading config file.")
if file_path.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
if file_path.suffix.casefold() == ".toml":
return _load_config_from_toml_file(
file_path,
log_missing_section_as_warning=log_missing_section_as_warning,
warn_unknown_settings=warn_unknown_settings,
)
return _load_config_from_ini_file(
file_path,
log_missing_section_as_warning=log_missing_section_as_warning,
warn_unknown_settings=warn_unknown_settings,
)
def load_config_file_from_dir(
dir_path: pathlib.Path,
*,
log_missing_section_as_warning: bool = False,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Search, load, parse and validate rstcheck config from a directory.
Searches files from :py:data:`CONFIG_FILES` in the directory. If a file is found, try to load
the config from it. If is has no config, search further.
:param dir_path: Directory to search
:param log_missing_section_as_warning: If a missing config section in a config file should be
logged at WARNING (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level;
defaults to :py:obj:`False`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:return: instance of :py:class:`RstcheckConfigFile` or
:py:obj:`None` if no file is found or no file has a rstcheck section
or ``NONE`` is passed as the config path.
"""
logger.debug(f"Try loading config file from directory: '{dir_path}'.")
if dir_path.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
config = None
for file_name in CONFIG_FILES:
file_path = (dir_path / file_name).resolve()
if file_path.is_file():
config = load_config_file(
file_path,
log_missing_section_as_warning=(
log_missing_section_as_warning or (file_name == ".rstcheck.cfg")
),
warn_unknown_settings=warn_unknown_settings,
)
if config is not None:
break
if config is None:
logger.info(
f"No config section in supported config files found in directory: '{dir_path}'."
)
return config
def load_config_file_from_dir_tree(
dir_path: pathlib.Path,
*,
log_missing_section_as_warning: bool = False,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Search, load, parse and validate rstcheck config from a directory tree.
Searches files from :py:data:`CONFIG_FILES` in the directory. If a file is found, try to load
the config from it. If is has no config, search further. If no config is found in the directory
search its parents one by one.
:param dir_path: Directory to search
:param log_missing_section_as_warning: If a missing config section in a config file should be
logged at ``WARNING`` (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level;
defaults to :py:obj:`False`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:return: instance of :py:class:`RstcheckConfigFile` or
:py:obj:`None` if no file is found or no file has a rstcheck section
or ``NONE`` is passed as the config path.
"""
logger.debug(f"Try loading config file from directory tree: '{dir_path}'.")
if dir_path.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
config = None
search_dir = dir_path.resolve()
while True:
config = load_config_file_from_dir(
search_dir,
log_missing_section_as_warning=log_missing_section_as_warning,
warn_unknown_settings=warn_unknown_settings,
)
if config is not None:
break
parent_dir = search_dir.parent.resolve()
if parent_dir == search_dir:
break
search_dir = parent_dir
if config is None:
logger.info(
f"No config section in supported config files found in directory tree: '{dir_path}'."
)
return config
def load_config_file_from_path(
path: pathlib.Path,
*,
search_dir_tree: bool = False,
log_missing_section_as_warning_for_file: bool = True,
log_missing_section_as_warning_for_dir: bool = False,
warn_unknown_settings: bool = False,
) -> t.Optional[RstcheckConfigFile]:
"""Analyse the path and call the correct config file loader.
:param path: Path to load config file from; can be a file or directory
:param search_dir_tree: If the directory tree should be searched;
only applies if ``path`` is a directory;
defaults to :py:obj:`False`
:param log_missing_section_as_warning_for_file: If a missing config section in a config file
should be logged at WARNING (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level when the
given path is a file;
defaults to :py:obj:`True`
:param log_missing_section_as_warning_for_dir: If a missing config section in a config file
should be logged at ``WARNING`` (:py:obj:`True`) or ``INFO`` (:py:obj:`False`) level when
the given file is a direcotry;
defaults to :py:obj:`False`
:param warn_unknown_settings: If a warning should be logged for unknown settings in config file;
defaults to :py:obj:`False`
:raises FileNotFoundError: When the passed path is not found.
:return: instance of :py:class:`RstcheckConfigFile` or
:py:obj:`None` if no file is found or no file has a rstcheck section
or ``NONE`` is passed as the config path.
"""
logger.debug(f"Try loading config file from path: '{path}'.")
if path.name == "NONE":
logger.info("Config path is set to 'NONE'. No config file is loaded.")
return None
resolved_path = path.resolve()
if resolved_path.is_file():
return load_config_file(
resolved_path,
log_missing_section_as_warning=log_missing_section_as_warning_for_file,
warn_unknown_settings=warn_unknown_settings,
)
if resolved_path.is_dir():
if search_dir_tree:
return load_config_file_from_dir_tree(
resolved_path,
log_missing_section_as_warning=log_missing_section_as_warning_for_dir,
warn_unknown_settings=warn_unknown_settings,
)
return load_config_file_from_dir(
resolved_path,
log_missing_section_as_warning=log_missing_section_as_warning_for_dir,
warn_unknown_settings=warn_unknown_settings,
)
raise FileNotFoundError(2, "Passed config path not found.", path)
def merge_configs(
config_base: RstcheckConfig,
config_add: t.Union[RstcheckConfig, RstcheckConfigFile],
*,
config_add_is_dominant: bool = True,
) -> RstcheckConfig:
"""Merge two configs into a new one.
:param config_base: The base config to merge into
:param config_add: The config that is merged into the ``config_base``
:param config_add_is_dominant: If the ``config_add`` overwrites values of ``config_base``;
defaults to :py:obj:`True`
:return: New merged config
"""
logger.debug("Merging configs.")
sub_config: t.Union[RstcheckConfig, RstcheckConfigFile] = config_base
sub_config_dict = sub_config.dict()
for setting in dict(sub_config_dict):
if sub_config_dict[setting] is None:
del sub_config_dict[setting]
dom_config: t.Union[RstcheckConfig, RstcheckConfigFile] = config_add
dom_config_dict = dom_config.dict()
for setting in dict(dom_config_dict):
if dom_config_dict[setting] is None:
del dom_config_dict[setting]
if config_add_is_dominant is False:
sub_config_dict, dom_config_dict = dom_config_dict, sub_config_dict
merged_config_dict = {**sub_config_dict, **dom_config_dict}
return RstcheckConfig(**merged_config_dict) | /rstcheck_core-1.0.3-py3-none-any.whl/rstcheck_core/config.py | 0.679072 | 0.238013 | config.py | pypi |
import functools
import logging
import re
import typing as t
from . import _compat as _t, types
logger = logging.getLogger(__name__)
RSTCHECK_CONFIG_COMMENT_REGEX = re.compile(r"\.\. rstcheck: (.*)=(.*)$")
VALID_INLINE_CONFIG_KEYS = (
"ignore-directives",
"ignore-roles",
"ignore-substitutions",
"ignore-languages",
)
ValidInlineConfigKeys = t.Union[
_t.Literal["ignore-directives"],
_t.Literal["ignore-roles"],
_t.Literal["ignore-substitutions"],
_t.Literal["ignore-languages"],
]
RSTCHECK_FLOW_CONTROL_COMMENT_REGEX = re.compile(r"\.\. rstcheck: ([a-z-]*)$")
VALID_INLINE_FLOW_CONTROLS = ("ignore-next-code-block",)
@functools.lru_cache()
def get_inline_config_from_source(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.List[types.InlineConfig]:
"""Get rstcheck inline configs from source.
Unknown configs are ignored.
:param source: Source to get config from
:param source_origin: Origin of the source with the inline ignore comments
:param warn_unknown_settings: If a warning should be logged on unknown settings;
defaults to :py:obj:`False`
:return: A list of inline configs
"""
configs: t.List[types.InlineConfig] = []
for (idx, line) in enumerate(source.splitlines()):
match = RSTCHECK_CONFIG_COMMENT_REGEX.search(line)
if match is None:
continue
key = match.group(1).strip()
value = match.group(2).strip()
if key not in VALID_INLINE_CONFIG_KEYS:
if warn_unknown_settings:
logger.warning(
f"Unknown inline config '{key}' found. "
f"Source: '{source_origin}' at line {idx + 1}"
)
continue
configs.append(types.InlineConfig(key=key, value=value))
return configs
def _filter_config_and_split_values(
target_config: ValidInlineConfigKeys,
source: str,
source_origin: types.SourceFileOrString,
warn_unknown_settings: bool = False,
) -> t.Generator[str, None, None]:
"""Get specified configs and comma split them.
:param target_config: Config target to filter for
:param source: Source to get config from
:param source_origin: Origin of the source with the inline ignore comments
:param warn_unknown_settings: If a warning should be logged on unknown settings;
defaults to :py:obj:`False`
:return: None
:yield: Single values for the ``target_config``
"""
inline_configs = get_inline_config_from_source(source, source_origin, warn_unknown_settings)
for inline_config in inline_configs:
if inline_config["key"] == target_config:
for language in inline_config["value"].split(","):
yield language.strip()
def find_ignored_directives(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.Generator[str, None, None]:
"""Search the rst source for rstcheck inline ignore-directives comments.
Directives are ignored via comment.
For example, to ignore directive1, directive2, and directive3:
.. testsetup::
from rstcheck_core.inline_config import find_ignored_directives
>>> list(find_ignored_directives('''
... Example
... =======
...
... .. rstcheck: ignore-directives=directive1,directive3
...
... .. rstcheck: ignore-directives=directive2
... ''', "<string>"))
['directive1', 'directive3', 'directive2']
:param source: Rst source code
:param source_origin: Origin of the source with the inline ignore comments
:return: None
:yield: Found directives to ignore
"""
yield from _filter_config_and_split_values(
"ignore-directives", source, source_origin, warn_unknown_settings
)
def find_ignored_roles(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.Generator[str, None, None]:
"""Search the rst source for rstcheck inline ignore-roles comments.
Roles are ignored via comment.
For example, to ignore role1, role2, and role3:
.. testsetup::
from rstcheck_core.inline_config import find_ignored_roles
>>> list(find_ignored_roles('''
... Example
... =======
...
... .. rstcheck: ignore-roles=role1,role3
...
... .. rstcheck: ignore-roles=role2
... ''', "<string>"))
['role1', 'role3', 'role2']
:param source: Rst source code
:param source_origin: Origin of the source with the inline ignore comments
:return: None
:yield: Found roles to ignore
"""
yield from _filter_config_and_split_values(
"ignore-roles", source, source_origin, warn_unknown_settings
)
def find_ignored_substitutions(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.Generator[str, None, None]:
"""Search the rst source for rstcheck inline ignore-substitutions comments.
Substitutions are ignored via comment.
For example, to ignore substitution1, substitution2, and substitution3:
.. testsetup::
from rstcheck_core.inline_config import find_ignored_substitutions
>>> list(find_ignored_substitutions('''
... Example
... =======
...
... .. rstcheck: ignore-substitutions=substitution1,substitution3
...
... .. rstcheck: ignore-substitutions=substitution2
... ''', "<string>"))
['substitution1', 'substitution3', 'substitution2']
:param source: Rst source code
:param source_origin: Origin of the source with the inline ignore comments
:return: None
:yield: Found substitutions to ignore
"""
yield from _filter_config_and_split_values(
"ignore-substitutions", source, source_origin, warn_unknown_settings
)
def find_ignored_languages(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.Generator[str, None, None]:
"""Search the rst source for rstcheck inline ignore-languages comments.
Languages are ignored via comment.
For example, to ignore C++, JSON, and Python:
.. testsetup::
from rstcheck_core.inline_config import find_ignored_languages
>>> list(find_ignored_languages('''
... Example
... =======
...
... .. rstcheck: ignore-languages=cpp,json
...
... .. rstcheck: ignore-languages=python
... ''', "<string>"))
['cpp', 'json', 'python']
:param source: Rst source code
:param source_origin: Origin of the source with the inline ignore comments
:return: None
:yield: Found languages to ignore
"""
yield from _filter_config_and_split_values(
"ignore-languages", source, source_origin, warn_unknown_settings
)
@functools.lru_cache()
def get_inline_flow_control_from_source(
source: str, source_origin: types.SourceFileOrString, warn_unknown_settings: bool = False
) -> t.List[types.InlineFlowControl]:
"""Get rstcheck inline flow control from source.
Unknown flow controls are ignored.
:param source: Source to get config from
:param source_origin: Origin of the source with the inline flow control
:param warn_unknown_settings: If a warning should be logged on unknown settings;
defaults to :py:obj:`False`
:return: A list of inline flow controls
"""
configs: t.List[types.InlineFlowControl] = []
for (idx, line) in enumerate(source.splitlines()):
match = RSTCHECK_FLOW_CONTROL_COMMENT_REGEX.search(line)
if match is None:
continue
value = match.group(1).strip()
line_number = idx + 1
if value not in VALID_INLINE_FLOW_CONTROLS:
if warn_unknown_settings:
logger.warning(
f"Unknown inline flow control '{value}' found. "
f"Source: '{source_origin}' at line {line_number}"
)
continue
configs.append(types.InlineFlowControl(value=value, line_number=line_number))
return configs
def find_code_block_ignore_lines(
source: str,
source_origin: types.SourceFileOrString,
warn_unknown_settings: bool = False,
) -> t.Generator[int, None, None]:
"""Get lines of ``ignore-next-code-block`` flow control comments.
:param source: Source to get config from
:param source_origin: Origin of the source with the inline ignore comments
:param warn_unknown_settings: If a warning should be logged on unknown settings;
defaults to :py:obj:`False`
:return: None
:yield: Single values for the ``target_config``
"""
flow_controls = get_inline_flow_control_from_source(
source, source_origin, warn_unknown_settings
)
for flow_control in flow_controls:
if flow_control["value"] == "ignore-next-code-block":
yield flow_control["line_number"] | /rstcheck_core-1.0.3-py3-none-any.whl/rstcheck_core/inline_config.py | 0.704465 | 0.212212 | inline_config.py | pypi |
from __future__ import absolute_import
import textwrap
import logging
import io
from rstcloth.cloth import Cloth
logger = logging.getLogger("rstcloth")
def fill(string, first=0, hanging=0, wrap=True, width=72):
"""
:param string:
:param first:
:param hanging:
:param wrap:
:param width:
:return:
"""
first_indent = ' ' * first
hanging_indent = ' ' * hanging
if wrap is True:
return textwrap.fill(
string,
width=width,
break_on_hyphens=False,
break_long_words=False,
initial_indent=first_indent,
subsequent_indent=hanging_indent
)
else:
content = string.split('\n')
if first == hanging:
return '\n'.join([first_indent + line for line in content])
elif first > hanging:
indent_diff = first - hanging
o = indent_diff * ' '
o += '\n'.join([hanging_indent + line for line in content])
return o
elif first < hanging:
indent_diff = hanging - first
o = '\n'.join([hanging_indent + line for line in content])
return o[indent_diff:]
def _indent(content, indent):
"""
:param content:
:param indent:
:return:
"""
if indent == 0:
return content
else:
indent = ' ' * indent
if isinstance(content, list):
return [''.join([indent, line]) for line in content]
else:
return ''.join([indent, content])
class RstCloth(Cloth):
"""
RstCloth is the base object to create a ReStructuredText document programatically.
:param line_width: (optional, default=72), the line width to use if wrap is set to true in an individual action.
:return:
"""
def __init__(self, line_width=72):
self._line_width = line_width
self._data = []
def _add(self, content):
"""
:param content: the text to write into this element
:return:
"""
if isinstance(content, list):
self._data.extend(content)
else:
self._data.append(content)
def newline(self, count=1):
"""
:param count: (optional default=1) the number of newlines to add
:return:
"""
if isinstance(count, int):
if count == 1:
self._add('')
else:
# subtract one because every item gets one \n for free.
self._add('\n' * (count - 1))
else:
raise Exception("Count of newlines must be a positive int.")
def table(self, header, data, indent=0):
"""
:param header: a list of header values (strings), to use for the table
:param data: a list of lists of row data (same length as the header list each)
:return:
"""
t = Table(header, data=data)
self._add(_indent('\n' + t.render(), indent))
def directive(self, name, arg=None, fields=None, content=None, indent=0, wrap=True):
"""
:param name: the directive itself to use
:param arg: the argument to pass into the directive
:param fields: fields to append as children underneath the directive
:param content: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
o = []
o.append('.. {0}::'.format(name))
if arg is not None:
o[0] += ' ' + arg
if fields is not None:
for k, v in fields:
o.append(_indent(':' + k + ': ' + str(v), 3))
if content is not None:
o.append('')
if isinstance(content, list):
o.extend(_indent(content, 3))
else:
o.append(_indent(content, 3))
self._add(_indent(o, indent))
@staticmethod
def role(name, value, text=None):
"""
:param name: the name of the role
:param value: the value of the role
:param text: (optional, default=None) text after the role
:return:
"""
if isinstance(name, list):
name = ':'.join(name)
if text is None:
return ':{0}:`{1}`'.format(name, value)
else:
return ':{0}:`{2} <{1}>`'.format(name, value, text)
@staticmethod
def bold(string):
"""
:param string: the text to write into this element
:return:
"""
return '**{0}**'.format(string)
@staticmethod
def emph(string):
"""
:param string: the text to write into this element
:return:
"""
return '*{0}*'.format(string)
@staticmethod
def pre(string):
"""
:param string: the text to write into this element
:return:
"""
return '``{0}``'.format(string)
@staticmethod
def inline_link(text, link):
"""
:param text: the printed value of the link
:param link: the url the link should goto
:return:
"""
return '`{0} <{1}>`_'.format(text, link)
@staticmethod
def footnote_ref(name):
"""
:param name: the text to write into this element
:return:
"""
return '[#{0}]'.format(name)
def _paragraph(self, content, wrap=True):
"""
:param content: the text to write into this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
return [i.rstrip() for i in fill(content, wrap=wrap, width=self._line_width).split('\n')]
def replacement(self, name, value, indent=0):
"""
:param name: the name of the replacement
:param value: the value fo the replacement
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
output = '.. |{0}| replace:: {1}'.format(name, value)
self._add(_indent(output, indent))
def codeblock(self, content, indent=0, wrap=True, language=None):
"""
:param content: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:param language:
:return:
"""
if language is None:
o = ['::', _indent(content, 3)]
self._add(_indent(o, indent))
else:
self.directive(name='code-block', arg=language, content=content, indent=indent)
def footnote(self, ref, text, indent=0, wrap=True):
"""
:param ref: the reference value
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
self._add(fill('.. [#{0}] {1}'.format(ref, text), indent, indent + 3, wrap, width=self._line_width))
def definition(self, name, text, indent=0, wrap=True, bold=False):
"""
:param name: the name of the definition
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:param bold:
:return:
"""
o = []
if bold is True:
name = self.bold(name)
o.append(_indent(name, indent))
o.append(fill(text, indent + 3, indent + 3, wrap=wrap, width=self._line_width))
self._add(o)
def li(self, content, bullet='-', indent=0, wrap=True):
"""
:param content: the text to write into this element
:param bullet: (optional, default='-') the character of the bullet
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
bullet += ' '
hanging_indent_len = indent + len(bullet)
if isinstance(content, list):
content = bullet + '\n'.join(content)
self._add(fill(content, indent, indent + hanging_indent_len, wrap, width=self._line_width))
else:
content = bullet + fill(content, 0, len(bullet), wrap, width=self._line_width)
self._add(fill(content, indent, indent, wrap, width=self._line_width))
def field(self, name, value, indent=0, wrap=True):
"""
:param name: the name of the field
:param value: the value of the field
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
output = [':{0}:'.format(name)]
if len(name) + len(value) < 60:
output[0] += ' ' + value
final = True
else:
output.append('')
final = False
if wrap is True and final is False:
content = fill(value, wrap=wrap, width=self._line_width).split('\n')
for line in content:
output.append(_indent(line, 3))
if wrap is False and final is False:
output.append(_indent(value, 3))
for line in output:
self._add(_indent(line, indent))
def ref_target(self, name, indent=0):
"""
:param name: the name of the reference target
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
o = '.. _{0}:'.format(name)
self._add(_indent(o, indent))
def content(self, content, indent=0, wrap=True):
"""
:param content: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:param wrap: (optional, default=True) Whether or not to wrap lines to the line_width
:return:
"""
if isinstance(content, list):
for line in content:
self._add(_indent(line, indent))
else:
lines = self._paragraph(content, wrap)
for line in lines:
self._add(_indent(line, indent))
def title(self, text, char='=', indent=0):
"""
:param text: the text to write into this element
:param char: (optional, default='=') the character to underline the title with
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
line = char * len(text)
self._add(_indent([line, text, line], indent))
def heading(self, text, char, indent=0):
"""
:param text: the text to write into this element
:param char: the character to line the heading with
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self._add(_indent([text, char * len(text)], indent))
def h1(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char='=', indent=indent)
def h2(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char='-', indent=indent)
def h3(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char='~', indent=indent)
def h4(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char='+', indent=indent)
def h5(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char='^', indent=indent)
def h6(self, text, indent=0):
"""
:param text: the text to write into this element
:param indent: (optional default=0) number of characters to indent this element
:return:
"""
self.heading(text, char=';', indent=indent)
class Table(object):
def __init__(self, header, data=None):
"""
:param header: a list of header values
:param data: optional, a list of lists of data to add as rows.
:return:
"""
self.num_columns = len(header)
self.num_rows = 0
self.header = header
self.rows = []
if data is not None:
for row in data:
self.append(row)
def append(self, row):
"""
:param row: a single row to add (list)
:return:
"""
row = [str(x) for x in row]
if len(row) != self.num_columns:
raise ValueError('row length mismatch')
self.num_rows += 1
self.rows.append(row)
return self
def _max_col_with(self, idx):
"""
:param idx: the index to return max width of
:return:
"""
max_ = max(
[len(self.header[idx])] +
[len(x[idx]) for x in self.rows]
)
return max_
def render(self, padding=3):
"""
:return:
"""
widths = [self._max_col_with(x) + padding for x in range(self.num_columns)]
f = io.StringIO()
# first right out the header
f.write('+')
for width in widths:
f.write('-' * width + '+')
f.write('\n')
f.write('|')
for col, width in zip(self.header, widths):
f.write(col + ' ' * (width - len(col)) + '|')
f.write('\n')
f.write('+')
for width in widths:
f.write('=' * width + '+')
f.write('\n')
# then the rows:
for ridx in range(self.num_rows):
f.write('|')
for col, width in zip(self.rows[ridx], widths):
f.write(col + ' ' * (width - len(col)) + '|')
f.write('\n')
f.write('+')
for width in widths:
f.write('-' * width + '+')
f.write('\n')
f.seek(0)
return f.read() | /rstcloth2-0.3.0.tar.gz/rstcloth2-0.3.0/rstcloth/rstcloth_builder.py | 0.741019 | 0.20003 | rstcloth_builder.py | pypi |
rstool
======
rstool is an open source command-line program for reading and converting
native radiosonde data to NetCDF and calculation of derived physical quantities.
Supported instruments:
- [InterMet Systems](https://www.intermetsystems.com) (iMet) radiosondes such as
iMet-1-ABxn, data files produced by the iMetOS-II software (`.dat`).
- [Windsond](http://windsond.com/), data files produced by the Windsond software
(`.sounding`).
Support for other instruments can be added by writing a Python module
in `rstoollib/drivers`
to read the data files produced by the radiosonde (see the template
in `rstoollib/drivers/template.py`).
Usage
-----
```sh
rstool <input_type> <output_type> <input> [<surface>] <output>
```
rstool converts native radiosonde data to NetCDF raw, points (pts) and
profile (prof) datasets and calculates derived profile variables.
Arguments:
- `input_type` - See Input types below.
- `output_type` - See Output types below.
- `input` - Input file or directory.
- `surface` - Near-surface variables (NetCDF).
- `output` - Output file (NetCDF).
Input types:
- `imet` - InterMet Systems iMet-1-ABxn. `input` should be the
directory generated by the iMetOS-II software.
- `prof` - Profile (prof) dataset.
- `pts` - Collection of measurement points (NetCDF). The output of running
rstool with the output type `pts`.
- `raw:<instrument>` - "Raw" instrument-dependent format (NetCDF). `instrument`
is one of `imet`, `ws`.
- `ws` - Windsond. `input` should be the `.sounding` file generated by the
Windsond software.
Output types:
- `pts` - Collection of measurement points (NetCDF).
- `prof` - Vertical profile calculated by interpolating the measurement points
during the ascent of the radiosonde as a function of height (NetCDF).
- `prof:desc` - The same as `prof`, but for the descent.
- `raw` - "Raw" instrument-dependent format (NetCDF).
The following input/output type combinations are supported:
- `<instrument> raw` - native instrument to instrument-dependent raw (NetCDF)
- `<instrument> pts` - native instrument to points
- `<instrument> prof` - native instrument to profile
- `<instrument> prof:desc` - native instrument to descending profile
- `raw:<instrument> pts` - instrument raw (NetCDF) to points
- `raw:<instrument> prof` - instrument raw (NetCDF) to profile
- `raw:<instrument> prof:desc` - instrument raw (NetCDF) to descending profile
- `pts prof` - points to profile
- `pts prof:desc` - points to descending profile
- `prof prof` - profile to profile (with derived variables)
where `instrument` is one of: `imet`, `ws`.
Examples
--------
Convert Windsond sounding `2000-01-01_0000.sounding` to profile:
```sh
rstool ws prof 2000-01-01_0000.sounding 2000-01-01_0000.prof.nc
```
Convert iMet sounding in the directory `2000-01-01_0000` to profile:
```sh
rstool imet prof 2000-01-01_0000 2000-01-01_0000.prof.nc
```
Convert Windond sounding `2000-01-01_0000.sounding` to Windsond raw:
```sh
rstool ws raw 2000-01-01_0000.sounding 2000-01-01_0000.raw.nc
```
Convert iMet sounding in directory `2000-01-01_0000` to points:
```sh
rstool imet pts 2000-01-01_0000 2000-01-01_0000.pts.nc
```
Convert Windsond raw to points:
```sh
rstool raw:ws pts 2000-01-01_0000.raw.nc 2000-01-01_0000.pts.nc
```
Convert points to profile:
```sh
rstool pts profile 2000-01-01_0000.pts.nc 2000-01-01_0000.prof.nc
```
Installation
------------
Recommended operating system to run rstool is Linux with Python 3.
Install with:
```sh
pip3 install rstool
```
**Note:** Append `--user` to install in your home directory on unix-like
operating systems (make sure the directory `~/.local/bin` is included in the
environmental variable `PATH`).
Run with:
`rstool`
on the command line.
## Format description
Below is a description of the output NetCDF formats. Whenever possible,
CF Conventions and CMIP5 standard names are loosely followed by "duck typing".
Time is expressed as Julian date (fractional number of days since -4712-01-01
12:00 UTC, or -4713-11-24 12:00 UTC in the proleptic Gregorian calendar). This
can be converted to UNIX time (number of non-leap seconds since 1 January
1970 00:00 UTC) as `(time - 2440587.5)*86400`.
The formats can be converted in the order raw (raw:\<instrument\>) → points
(pts) → profile (prof).
All variables are stored either as 64-bit floating point (float64) or 64-bit
integer (int64). Missing values are stored as NaN in float64 and
-9223372036854775806 in int64.
### Points (pts)
pts is an instrument-independent format containing a sequence of radiosonde
measurements
as received by the base station ordered by time, converted to a standard set
of variables.
| Variable | Description | Units | Type |
| --- | --- | --- | --- |
| hur | relative humidity | % | float64 |
| hurs | near-surface relative humidity | % | float64 |
| lat | latitude | degrees North | float64 |
| lon | longitude | degrees East | float64 |
| p | pressure | Pa | float64 |
| ps | surface air pressure | Pa | float64 |
| ta | air temperature | K | float64 |
| tas | near-surface air temperature | K | float64 |
| time | time | days since -4713-11-24 12:00 UTC (`proleptic_gregorian` calendar) | float64 |
| station_lat | station latitude | degrees North | float64 |
| station_lon | station longitude | degrees East | float64 |
| station_z | station altitude | m | float64 |
| uas | eastward near-surface wind | m.s<sup>-1</sup> | float64 |
| vas | northward near-surface wind | m.s<sup>-1</sup> | float64 |
| z | altitude | m | float64 |
### Profile (prof)
prof is an instrument-independent format containing standard variables
interpolated as a function of height. Profiles are calculated by averaging
points (pts) on a regular vertical pressure grid. For calculation of
an ascending profile (default), only strictly increasing subset of points is
considered. For a descending profile (prof:desc), only strictly decreasing
subset of points is considered. Vertical intervals with no points are
filled with missing values. It is therefore possible to identify vertical
intervals where no radiosonde data were recieved, and optionally interpolate
(linearly or in some other way) across these intervals when plotting.
| Variable | Description | Units | Type |
| --- | --- | --- | --- |
| bvf | Brunt–Väisälä frequency in air | s<sup>-1</sup> | float64 |
| e | water vapor pressure in air | Pa | float64 |
| es | saturation vapor pressure | Pa | float64 |
| hur | relative humidity | % | float64 |
| hurs | near-surface relative humidity | % | float64 |
| lat | latitude | degrees North | float64 |
| lon | longitude | degrees East | float64 |
| p | pressure | Pa | float64 |
| p2 | pressure | Pa | float64 |
| p_lcl | atmosphere lifting condensation level pressure | Pa | float64 |
| ps | surface air pressure | Pa | float64 |
| station_lat | station latitude | degrees North | float64 |
| station_lon | station longitude | degrees East | float64 |
| station_z | station altitude | m | float64 |
| ta | air temperature | K | float64 |
| ta_par | dry parcel temperature | K | float64 |
| ta_par_s | saturated parcel temperature | K | float64 |
| ta_surf_par | dry surface parcel temperature | K | float64 |
| ta_surf_par_s | saturated surface parcel temperature | K | float64 |
| tas | near-surface air temperature | K | float64 |
| theta | air potential temperature | K | float64 |
| time | time | days since -4713-11-24 12:00 UTC (`proleptic_gregorian` calendar) | float64 |
| ts | surface temperature | K | float64 |
| ua | eastward wind | m.s<sup>-1</sup> | float64 |
| uas | eastward near-surface wind speed | m.s<sup>-1</sup> | float64 |
| va | northward wind | m.s<sup>-1</sup> | float64 |
| vas | northward near-surface wind speed | m.s<sup>-1</sup> | float64 |
| wdd | wind from direction | degrees | float64 |
| wdds | near-surface wind from direction | degrees | float64 |
| wds | wind speed | m.s<sup>-1</sup> | float64 |
| wdss | near-surface wind speed | m.s<sup>-1</sup> | float64 |
| z | altitude | m | float64 |
| zg | geopotential height | m | float64 |
| zg_lcl | lifting condensation level geopotential height | m | float64 |
### Surface (surf)
surf dataset specifies near-surface variables which can be used
as an optional input to rstool. These can come from a co-located automatic
weather station (AWS). Some native radiosonde data can already contain
these varibales (iMet). Near-surface variables are needed to calculate
some derived profile variables such as the lifting condensation level.
All variables must have a single dimension `time`. The point nearest
to the radiosonde launch time is picked. If no points are within
1 hour of the launch, the surface input is ignored.
Either (`uas`, `vas`) or (`wdds`, `wdss`) can be defined.
| Variable | Description | Units | Type |
| --- | --- | --- | --- |
| time | time | days since -4713-11-24 12:00 UTC (`proleptic_gregorian` calendar) | float64 |
| hurs | near-surface relative humidity | % | float64 |
| ts | surface temperature | K | float64 |
| uas | eastward near-surface wind speed | m.s<sup>-1</sup> | float64 |
| vas | northward near-surface wind speed | m.s<sup>-1</sup> | float64 |
| wdds | near-surface wind from direction | degrees | float64 |
| wdss | near-surface wind speed | m.s<sup>-1</sup> | float64 |
### iMet raw (raw:imet)
raw:imet is a raw instrument format of the InterMet radiosonde converted to
NetCDF by reading the `.dat` file.
| Variable | Description | Units | Type |
| --- | --- | --- | --- |
| alt | altitude | m | float64 |
| date_time | date time | year/month/day hour:minute:second | string |
| f_offs | frequency offset | Hz | float64 |
| freq | frequency | Hz | float64 |
| hum | relative humidity | % | float64 |
| hurs | near-surface relative humidity | % | float64 |
| lat | latitude | degrees North | float64 |
| long | longitude | degrees East | float64 |
| press | pressure | Pa | float64 |
| ps | surface air pressure | Pa | float64 |
| sample | sample number | 1 | int64 |
| station_lat | station latitude | degrees North | float64 |
| station_lon | station longitude | degrees East | float64 |
| station_z | station altitude | m | float64 |
| tair | air temperature | K | float64 |
| tas | near-surface air temperature | K | float64 |
| uas | eastward near-surface wind speed | m.s<sup>-1</sup> | float64 |
| vas | northward near-surface wind speed | m.s<sup>-1</sup> | float64 |
### Windsond raw (raw:ws)
raw:ws is a raw instrument format of the Windsond radiosonde converted to
NetCDF by reading the `.sounding` file.
| Variable | Description | Units | Type | Comment |
| --- | --- | --- | --- | --- |
| afc | automatic frequency control | Hz | int64 |
| afc1 | automatic frequency control 1 | Hz | int64 |
| afc2 | automatic frequency control 2 | Hz | int64 |
| alt | GPS altitude | m | int64 |
| ang | wind direction | degrees | float64 |
| ang\<n\> | wind direction (old) | degrees | float64 |
| behlan | behavior after landing | 1 | int64 | 0: power-save, 1: beacon at once |
| burn | burn string | 1 | int64 | 0: at cut down
| crc | cyclic redundancy check (CRC) | 1 | int64 |
| cutalt | cut dow altitude | m | int64 |
| extra | extra information | | string |
| fwver | firmware version | 1 | float64 |
| galt | altitude | m | int64 | |
| gpa | ground pressure | Pa | int |
| hdop | GPS horizontal dilution of precision (HDOP) | | float64 |
| hu | relative humidity | % | float64 |
| hu\<n\> | relative humidity (old) | % | float64 |
| hw | hw | 1 | int64 |
| id | sond ID | 1 | int64 |
| install | install | | string |
| label | label | | string |
| lat | latitude | degrees North | float64 |
| lon | longitude | degrees East | float64 |
| lux | light | lux | int64 |
| mcnt | message counter | 1 | int64 |
| md | mode | 1 | int64 | 0: init, 1: ready for launch, 2: rising, 3: falling, 4: on ground, silent, 5: on ground, beeping, 6: on ground, sometimes beeping, 7: cutting down |
| new | GPS validity | 1 | int64 | 0: GPS is old |
| node_id | node ID | 1 | int64 |
| offset | time start | seconds since 1970-01-01T00:00 | float64 |
| pa | air pressure | Pa | int64 |
| pwr | power | W | float64 |
| q | quality | 1 | int64 |
| q0 | quality | 1 | int64 |
| q1 | quality | 1 | int64 |
| qu | quality | % | float64 |
| r | quality | 1 | int64 |
| rec | correction | 1 | int64 |
| rec<n> | correction (old) | 1 | int64 |
| relAP | release altitude | m | int64 |
| role | role | 1 | int64 |
| sats | number of GPS satellites | 1 | int64 |
| seq | sequence number | 1 | int64 |
| sid | session ID | 1 | int64 |
| software | software version | | string |
| spd | wind speed | m.s<sup>-1</sup> | float64 |
| spd\<n\> | wind speed (old) | m.s<sup>-1</sup> | float64 |
| su | power supply | V | float64 |
| syn | | 1 | int64 |
| te | temperature | K | float64 |
| te\<n\> | temperature (old) | K | float64 |
| tei | internal temperature | K | float64 |
| timezone | timezone | 1 | int64 |
| ucnt | ucnt | 1 | int64 |
| version | version | 1 | int64 |
### Attributes
rstool writes the following attributes in the raw (raw:\<instrument\>),
points (pts) and profile (prof) NetCDF files.
| Attribute | Description | Comment |
| --- | --- | --- |
| created | date created | year-month-dayThour:minute:second
| software | software identification | rstool x.y.z (https://github.com/peterkuma/rstool)
In addition, the following attributes may be available in raw, pts and prof
datasets depending on the instrument:
| Attribute | Description |
| --- | --- |
| station | station information |
| balloon | ballon information |
| sonde | sonde information |
| operator | operator name |
## License
This software can be used, modified and distributed freely under
the terms of an MIT license (see [LICENSE.md](LICENSE.md)).
## Releases
### 1.0.0 (2021-12-11)
- Changed calendar to proleptic Gregorian.
- Added standard\_name attributes.
### 0.1.1 (2020-08-14)
- Fixed missing surf module.
- Installation of the script via setuptools entry points.
### 0.1.0 (2020-08-12)
- Initial beta release.
## See also
[ALCF](https://alcf-lidar.github.io),
[ccplot](https://ccplot.org),
[cl2nc](https://github.com/peterkuma/cl2nc),
[mpl2nc](https://github.com/peterkuma/mpl2nc),
[mrr2c](https://github.com/peterkuma/mrr2c)
| /rstool-1.0.0.tar.gz/rstool-1.0.0/README.md | 0.53437 | 0.961353 | README.md | pypi |
import numpy as np
import scipy.constants
from rstoollib.const import *
from scipy.optimize import fmin
from scipy.integrate import quad
def calc_g(lat=45.):
"""Calculate gravity from latitude (degrees)."""
return 9.780327*(
1 +
0.0053024*np.sin(lat/180.0*np.pi)**2 -
0.0000058*np.sin(2*lat/180.0*np.pi)**2
)
def calc_zg(z, lat):
return z*calc_g(lat)/g0
def calc_z(zg, lat):
return zg/calc_g(lat)*g0
def calc_ua(wds, wdd):
"""Calculate zonal wind speed (m.s-1) from wind speed wds (m.s-1) and
wind direction wdd (degrees)."""
return np.sin(wdd/180.*np.pi)*wds
def calc_va(wds, wdd):
"""Calculate meridional wind speed (m.s-1) from wind speed wds (m.s-1)
and wind direction wdd (degrees)."""
return np.cos(wdd/180.*np.pi)*wds
def calc_wds(ua, va):
""" Calculate wind speed (m.s-1) from meridional wind speed ua (m.s-1)
and zoal wind speed va (m.s-1)."""
return np.sqrt(ua**2. + va**2.)
def calc_wdd(ua, va):
""" Calculate wind direction (degrees) from meridional wind speed ua
(m.s-1) and zoal wind speed va (m.s-1)."""
x = np.arctan2(-ua, -va)/np.pi*180.
return np.where(x >= 0., x, 360. + x)
def calc_theta(p, ta):
"""Calculate potential temperature (K) from pressure p (Pa) and air
temperature ta (K)."""
p0 = p[0]
return ta*((p0/p)**(1.0*R_d/c_p))
def calc_bvf(ta, zg, p):
"""Calculate Bunt-Vaisala fequency from air temperature ta (K),
geopotential height zg (m) and pressure p (Pa)."""
zgx = np.arange(0, 20000, 400)
tax = np.interp(zgx, zg, ta)
px = np.interp(zgx, zg, p)
bvf2 = 1.*scipy.constants.g*np.diff(tax)/np.diff(zgx)/((tax[1:] + tax[:-1])/2. + 273.15)
bvf = np.sqrt(np.abs(bvf2))*np.sign(bvf2)
return (px[1:] + px[:-1])/2.0, bvf
def calc_es(ta):
"""Calculate saturated vapor pressure (Pa) from air temperature ta
(K)."""
return 6.112*np.exp((17.67*(ta - 273.15))/(ta - 273.15 + 243.5))*1e2
def calc_ws(p, ta):
"""Calculate saturated water vapour mixing ratio (1) from pressure p
(Pa) and air temperature ta (K)."""
return calc_w(p, calc_es(ta))
def calc_gamma_s(p, ta, lat=45.):
"""Calculate saturated adiabatic lapse rate from pressure p (Pa),
temperature ta (K), at latitude lat (degrees)."""
gamma_d = calc_gamma_d(lat)
ws = calc_ws(p, ta)
return gamma_d*(1. + l_v*ws/(R_d*ta))/(1. + l_v**2.*ws/(R_d*c_p*ta**2.))
def calc_gamma_d(lat=45.):
"""Calculate dry adiabatic lapse rate at latitude lat (degrees)."""
g = calc_g(lat)
return -(g/c_p)
def calc_ta_par(p, ta0):
"""Calculate dry parcel temperature at pressures p (Pa), assuming
surface air temperature ta0 (K).
"""
p0 = p[0]
return ta0*(p/p0)**(R_d/c_p)
def calc_ta_par_s(p, ta0, e0):
"""Calculate saturated parcel temperature at pressures p (Pa), assuming
surface air temperature ta0 (K) and surface water vapor pressure e0
(Pa). p has to be an array dense enough for an acurrate integration."""
g = calc_g()
p0 = p[0]
n = len(p)
ta_s = np.full(n, np.nan, np.float64)
ta_s[0] = ta0
gamma_d = calc_gamma_d()
w0 = calc_w(p0, e0)
for i in range(1, n):
es = calc_es(ta_s[i-1])
ws = calc_w(p[i-1], es)
if w0 < ws:
gamma = gamma_d
else:
gamma = calc_gamma_s(p[i], ta_s[i-1])
dta_dp = -R_d*ta_s[i-1]/(p[i]*g)*gamma
dp = p[i] - p[i-1]
ta_s[i] = ta_s[i-1] + dta_dp*dp
return ta_s
def calc_w(p, e):
"""Calculate water vapor mixing ratio (1) from pressure p (Pa) and water
vapor pressure e (Pa)."""
return epsilon*e/(p - e)
def calc_w_from_q(q):
"""Calculate water vapor mixing ratio (1) from specific humidity q
(1)."""
return q/(1. - q)
def calc_e(w, p):
"""Calculate specific humidity (1) from water vapor mixing ratio w (1)
and pressure p (Pa)."""
return w*p/(epsilon + w)
@np.vectorize
def calc_td(e):
"""Calculate dew point (K) from water vapor pressure e (Pa)."""
def f(ta):
es = calc_es(ta)
return np.abs(es - e)
return fmin(f, 273.15, disp=False)[0]
def calc_lclp(p0, e0, ta0):
"""Calculate lifting condensation level (LCL) pressure (Pa) from surface
pressure p0 (Pa), surface water vapor mixing ratio e0 (Pa) and surface
air temperature ta0 (K)."""
w0 = calc_w(p0, e0)
def f(p):
ta = ta0*((p/p0)**(1.0*R_d/c_p))
es = calc_es(ta)
w = calc_w(p, es)
return np.abs(w - w0)
return fmin(f, 1000e2, disp=False)[0]
def calc_clp(p, e, ta):
w0 = calc_w(p[0], e[0])
es = calc_es(ta)
ws = calc_w(p, es)
def f(p1):
ws1 = np.interp(p1, p[::-1], ws[::-1])
return np.abs(ws1 - w0)
return fmin(f, p[0], disp=False)[0]
def calc_llp(ts, p, theta):
p0 = p[0]
p1 = min(p0, np.interp(ts, theta, p))
return p1 | /rstool-1.0.0.tar.gz/rstool-1.0.0/rstoollib/algorithms.py | 0.680135 | 0.527682 | algorithms.py | pypi |
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
import numpy as np
import pandas as pd
from roomsage_data_science_tools.probability_distributions.beta import get_beta_dist_mean, get_beta_dist_variance
# ------------------------------------------------------------
def calculate_weights_for_mixing_periods(var_x, var_y, b):
"""
Calculate weights for mixing two periods, where second period has higher bias and lower variance than the first one
:param float var_x: Variance of the first period
:param float var_y: Variance of the second period
:param float b:
:return:
"""
lambda_0 = (var_y + np.sqrt(var_x * (b - var_y) + b * var_y)) / (var_x + var_y)
return np.minimum(1, lambda_0)
def get_forecast_for_beta_dist(data, superior_level_data, mix_var_multiplier=4, min_forecast_accuracy=0.2):
"""
:param Union[pd.DataFrame, np.ndarray, pd.Series] data: Dataframe with input values (successes, trials)
:param Union[pd.DataFrame, np.ndarray, pd.Series] superior_level_data: Dataframe with successes and trials for superior level
:param mix_var_multiplier:
:param min_forecast_accuracy:
:return:
"""
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
data = data.values
elif isinstance(data, list):
data = np.array(data)
if isinstance(superior_level_data, pd.DataFrame) or isinstance(superior_level_data, pd.Series):
superior_level_data = superior_level_data.values
elif isinstance(superior_level_data, list):
superior_level_data = np.array(superior_level_data)
x = data[-1, :]
u_x = get_beta_dist_mean(x[0], x[1])
var_x = get_beta_dist_variance(x[0], x[1])
for i in range(1, data.shape[0] + 1):
y = data[-i:].sum(axis=0)
u_y = get_beta_dist_mean(y[0], y[1])
var_y = get_beta_dist_variance(y[0], y[1])
b = np.maximum(mix_var_multiplier * var_y, (min_forecast_accuracy * u_x) ** 2)
lambda_0 = calculate_weights_for_mixing_periods(var_x, var_y, b)
if b == (min_forecast_accuracy * u_x) ** 2:
return lambda_0 * u_x + (1 - lambda_0) * u_y
u_x = lambda_0 * u_x + (1 - lambda_0) * u_y
var_x = lambda_0**2 * var_x + (1-lambda_0)**2 * var_y
u_y = get_beta_dist_mean(superior_level_data[0], superior_level_data[1])
var_y = get_beta_dist_variance(superior_level_data[0], superior_level_data[1])
b = np.maximum(mix_var_multiplier * var_y, (min_forecast_accuracy * u_x) ** 2)
lambda_0 = calculate_weights_for_mixing_periods(var_x, var_y, b)
return lambda_0 * u_x + (1 - lambda_0) * u_y | /forecasting/forecast_mixing.py | 0.857186 | 0.525491 | forecast_mixing.py | pypi |
from __future__ import annotations
import io
import os
from contextlib import ExitStack
from datetime import datetime
from enum import Enum
from typing import IO, Any, Optional, Union
from urllib.parse import urljoin
from requests import PreparedRequest, Response, Session
from requests.auth import AuthBase
from requests.structures import CaseInsensitiveDict
from .exceptions import (
BlobNotFound,
InvalidReference,
InvalidToken,
InvalidURL,
ServerError,
)
from .models import Blob, BlobMetadata
# https://stackoverflow.com/questions/53418046/how-do-i-type-hint-a-filename-in-a-function
File = Union[str, bytes, os.PathLike]
FilePathOrBuffer = Union[File, IO[bytes], io.BufferedReader]
MAX_BATCH_SIZE = 100
URL_ENV_VAR = "RSTR_URL"
TOKEN_ENV_VAR = "RSTR_TOKEN"
class _RequestMethods(str, Enum):
GET = "get"
HEAD = "head"
POST = "post"
PUT = "put"
DELETE = "delete"
class Rstr:
def __init__(self, url: Optional[str] = None, token: Optional[str] = None) -> None:
"""Class for interacting with a remote blob store.
It is recommended that this is used as a context manager:
>>> with Rstr(url=url, token=token) as rstr:
>>> blob = rsrt.get(...)
but it can also be used as a normal object
>>> rstr = Rstr(url=url, token=token)
>>> blob = rstr.get(...)
in which case the HTTP session will be initialized by the constructor and closed
by the destructor.
Args:
url (Optional[str], optional): The url of the remote blob store.
Defaults to the value of the environment variable ``RSTR_URL``.
token (Optional[str], optional): The API token used for authentication.
Defaults to the value of the environment variable ``RSTR_TOKEN``.
Raises:
InvalidURL: if no URL is specified.
InvalidToken: if no token is specified.
"""
url = url or os.getenv(URL_ENV_VAR)
if url is None:
raise InvalidURL("Must specify a valid URL.")
token = token or os.getenv(TOKEN_ENV_VAR)
if token is None:
raise InvalidToken("Must specify a valid API token.")
self.url: str = url
self._token = token
self._session: Optional[Session] = None
def __repr__(self) -> str:
return f'Rstr("{self.url}")'
def _init_session(self) -> None:
if self._session is None:
self._session = Session()
self._session.auth = _TokenAuth(self._token)
def _close_session(self) -> None:
if self._session is not None:
self._session.close()
self._session = None
def __enter__(self) -> "Rstr":
self._init_session()
return self
def __exit__(self, *_: Any) -> None:
self._close_session()
def __del__(self) -> None:
self._close_session()
@staticmethod
def _headers_to_metadata(headers: CaseInsensitiveDict) -> BlobMetadata:
"""Build a BlobMetadata object from the `headers` attribute of a `requests.Response` object.
The blob's metadata is specified in the HTTP response headers.
Returns:
BlobMetadata: the blob's metadata
"""
return BlobMetadata(
filename=headers["filename"],
size=int(headers["content-length"]),
created=datetime.fromisoformat(headers["created"]),
mime=headers["content-type"],
)
def _request(
self, endpoint: str, method: _RequestMethods, **kwargs: Any
) -> Response:
if self._session is None:
self._init_session()
assert self._session is not None
response = self._session.request(
method.value, urljoin(self.url, endpoint), **kwargs
)
if response.status_code == 500:
raise ServerError
elif response.status_code == 401:
raise InvalidToken(
"Unauthorized: the specified API token does not match any entry."
)
return response
def status_ok(self) -> bool:
"""Check the status of the rstr server.
Returns:
bool: returns true if the server is running
Raises:
InvalidToken: if the authentication fails.
Example:
>>> with Rstr(url=url, token=token) as rstr:
>>> assert rstr.status_ok()
"""
return self._request("status", _RequestMethods.GET).status_code == 200
def add(
self,
files: list[FilePathOrBuffer],
batch_size: int = MAX_BATCH_SIZE,
) -> list[str]:
"""Upload a batch of files to the blob store.
Args:
files (list[FilePathOrBuffer]): a list of paths or file-like objects to upload
batch_size (int, optional): How many documents to upload at once.
Defaults to ``MAX_BATCH_SIZE``.
Returns:
list[str] a list of references to the blobs
Raises:
InvalidToken: if the authentication fails.
Example:
Upload a file given its path
>>> with Rstr(url=url, token=token) as rstr:
>>> refs = rstr.add(["/path/to/my/file.pdf"])
>>> print(refs)
['eb8471d882d2a90a4b1c60dcaa41fc5d0c33143f8ebc910247453a130e74ca68']
"""
batch_size = min(batch_size, MAX_BATCH_SIZE)
blob_refs: list[str] = []
for batch_number in range(len(files) // batch_size + 1):
batch_files = files[
batch_number * batch_size : (batch_number + 1) * batch_size
]
files_to_upload: list[tuple[str, Union[bytes, IO[bytes]]]] = []
with ExitStack() as stack:
for file in batch_files:
if isinstance(file, (io.BufferedReader, io.BytesIO, bytes)):
files_to_upload.append(("file", file))
elif isinstance(file, str):
files_to_upload.append(
("file", stack.enter_context(open(file, "rb")))
)
else:
raise TypeError
response = self._request(
"blobs", _RequestMethods.POST, files=files_to_upload
)
blob_refs.extend(response.json())
return blob_refs
def get(self, reference: str) -> Blob:
"""Get a blob from the blob store.
Args:
reference (str): the reference to the blob
Returns:
Blob: the blob retrieved from the blob store
Raises:
BlobNotFound: if no blob corresponding to the reference is present on the server.
InvalidReference: if the reference is malformed.
InvalidToken: if the authentication fails.
Example:
Download a file given its reference
>>> ref = "eb8471d882..."
>>> with Rstr(url=url, token=token) as rstr:
>>> blob = rstr.get(ref)
>>> print(blob)
Blob(eb8471d882)
>>> blob.content
b"..."
>>> blob.metadata
BlobMetadata('file.pdf', 'application/pdf', 1024 bytes)
"""
response = self._request(f"blobs/{reference}", _RequestMethods.GET)
if response.status_code == 404:
raise BlobNotFound(f"The blob {reference} was not found.")
elif response.status_code == 400:
raise InvalidReference(f"The reference {reference} is invalid.")
else:
response.raise_for_status()
metadata = self._headers_to_metadata(response.headers)
return Blob(reference=reference, content=response.content, metadata=metadata)
def metadata(self, reference: str) -> BlobMetadata:
"""Get a blob's metadata from the blob store without downloading the blob's content.
Args:
reference (str): a reference to the blob
Returns:
BlobMetadata: the metadata relative to the blob
Raises:
BlobNotFound: if no blob corresponding to the reference is present on the server.
InvalidReference: if the reference is malformed.
InvalidToken: if the authentication fails.
Example:
>>> ref = "eb8471d882..."
>>> with Rstr(url=url, token=token) as rstr:
>>> blob_metadata = rstr.metadata(ref)
>>> blob_metadata
BlobMetadata('file.pdf', 'application/pdf', 1024 bytes)
"""
response = self._request(f"blobs/{reference}", _RequestMethods.HEAD)
if response.status_code == 404:
raise BlobNotFound(f"The blob {reference} was not found.")
elif response.status_code == 400:
raise InvalidReference(f"The reference {reference} is invalid.")
else:
response.raise_for_status()
return self._headers_to_metadata(response.headers)
def delete(self, reference: str) -> None:
"""Permanently delete a blob from the blob store.
Args:
reference (str): the reference to the blob that should be deleted
Raises:
BlobNotFound: if no blob corresponding to the reference is present on the server.
InvalidReference: if the reference is malformed.
InvalidToken: if the authentication fails.
Example:
>>> ref = "eb8471d882..."
>>> with Rstr(url=url, token=token) as rstr:
>>> rstr.delete(ref)
>>> blob = rstr.get(ref)
rstr.exceptions.BlobNotFound: The blob eb8471d882... was not found.
"""
response = self._request(f"blobs/{reference}", _RequestMethods.DELETE)
if response.status_code == 404:
raise BlobNotFound(f"The blob {reference} was not found.")
elif response.status_code == 400:
raise InvalidReference(f"The reference {reference} is invalid.")
else:
response.raise_for_status()
class _TokenAuth(AuthBase):
def __init__(self, token: str) -> None:
"""Class for handling simple token-based authentication used in rstr.
Args:
token (str): the API token provided by your rstr instance.
"""
self.token = token
def __call__(self, r: PreparedRequest) -> PreparedRequest:
r.headers["X-Auth-Token"] = self.token
return r | /rstr-client-0.2.0.tar.gz/rstr-client-0.2.0/rstr/client.py | 0.907668 | 0.200714 | client.py | pypi |
===============================
rstr = Random Strings in Python
===============================
.. image:: https://circleci.com/gh/leapfrogonline/rstr.svg?style=svg
:target: https://circleci.com/gh/leapfrogonline/rstr
rstr is a helper module for easily generating random strings of various types.
It could be useful for fuzz testing, generating dummy data, or other
applications.
It has no dependencies outside the standard library.
A Word of Caution
-----------------
By default, rstr uses the Python ``random`` module to generate pseudorandom text. This module is based on the Mersenne Twister and is *not* cryptographically secure.
**If you wish to use rstr for password-generation or other cryptographic
applications, you must create an instance that uses** SystemRandom_.
For example:
::
>> from rstr import Rstr
>> from random import SystemRandom
>> rs = Rstr(SystemRandom())
Use
---
The basic method of rstr is ``rstr()``. At a minimum, it requires one argument,
an alphabet of characters from which to create a string.
::
>>> import rstr
>>> rstr.rstr('ABC')
'AACAACCB'
By default, it will return a string between 1 and 10 characters in length. You
may specify an exact length by including it as a second argument:
::
>>> rstr.rstr('ABC', 4)
'ACBC'
You can also generate a range of lengths by adding two arguments. In the following
case, rstr will return a string with a randomly selected length between 5 and 10
characters.
::
>>> rstr.rstr('ABC', 5, 10)
'CBCCCABAA'
It's also possible to include particular characters in your string. This is useful
when testing a validator to make sure that certain characters are rejected.
Characters listed in the 'include' argument will *always* be present somewhere
in the resulting string.
::
>>> rstr.rstr('ABC', include='&')
'CA&A'
Conversely, you can exclude particular characters from the generated string. This is
helpful when starting with a pre-defined population of characters.
::
>>> import string
>>> rstr.rstr(string.digits, exclude='5')
'8661442'
Note that any of the arguments that accept strings can also
accept lists or tuples of strings:
::
>>> rstr.rstr(['A', 'B', 'C'], include = ['@'], exclude=('C',))
'BAAABBA@BAA'
Other methods
-------------
The other methods provided by rstr, besides ``rstr()`` and ``xeger()``, are convenience
methods that can be called without arguments, and provide a pre-defined alphabet.
They accept the same arguments as ``rstr()`` for purposes of
specifying lengths and including or excluding particular characters.
letters()
The characters provided by string.letters in the standard library.
uppercase()
The characters provided by string.uppercase in the standard library.
lowercase()
The characters provided by string.lowercase in the standard library.
printable()
The characters provided by string.printable in the standard library.
punctuation()
The characters provided by string.punctuation in the standard library.
nonwhitespace()
The characters provided by string.printable in the standard library, except
for those representing whitespace: tab, space, etc.
digits()
The characters provided by string.digits in the standard library.
nondigits()
The characters provided by the concatenation of string.letters and
string.punctuation in the standard library.
nonletters()
The characters provided by the concatenation of string.digits and
string.punctuation in the standard library.
normal()
Characters commonly accepted in text input, equivalent to string.digits +
string.letters + ' ' (the space character).
unambiguous()
The characters provided by the concatenation of string.digits and
string.letters except characters which are similar: 1, l and I, etc.
postalsafe()
Characters that are safe for use in postal addresses in the United States:
upper- and lower-case letters, digits, spaces, and the punctuation marks period,
hash (#), hyphen, and forward-slash.
urlsafe()
Characters safe (unreserved) for use in URLs: letters, digits, hyphen, period, underscore,
and tilde.
domainsafe()
Characters that are allowed for use in hostnames, and consequently, in internet domains: letters,
digits, and the hyphen.
Xeger
-----
Inspired by the Java library of the same name, the ``xeger()`` method allows users to
create a random string from a regular expression.
For example to generate a postal code that fits the Canadian format:
>>> import rstr
>>> rstr.xeger(r'[A-Z]\d[A-Z] \d[A-Z]\d')
u'R6M 1W5'
xeger works fine with most simple regular expressions, but it doesn't support all
Python regular expression features.
Custom Alphabets
----------------
If you have custom alphabets of characters that you would like to use with a method
shortcut, you can specify them by keyword when instantiating an Rstr object:
>>> from rstr import Rstr
>>> rs = Rstr(vowels='AEIOU')
>>> rs.vowels()
'AEEUU'
You can also add an alphabet to an existing instance with the add_alphabet() method:
>>> rs.add_alphabet('odds', '13579')
>>> rs.odds()
'339599519'
Examples
--------
You can combine rstr with Python's built-in string formatting to produce strings
that fit a variety of templates.
An email address:
::
'{0}@{1}.{2}'.format(rstr.nonwhitespace(exclude='@'),
rstr.domainsafe(),
rstr.letters(3))
A URL:
::
'http://{0}.{1}/{2}/?{3}'.format(rstr.domainsafe(),
rstr.letters(3),
rstr.urlsafe(),
rstr.urlsafe())
A postal address:
::
"""{0} {1}
{2} {3}
{4}, {5} {6}
""".format(rstr.letters(4, 8).title(),
rstr.letters(4, 8).title(),
rstr.digits(3, 5),
rstr.letters(4, 10).title(),
rstr.letters(4, 15).title(),
rstr.uppercase(2),
rstr.digits(5),
)
.. _SystemRandom: https://docs.python.org/3/library/random.html#random.SystemRandom
| /rstr-3.2.0.tar.gz/rstr-3.2.0/README.rst | 0.845129 | 0.662292 | README.rst | pypi |
# [[[end]]]
__author__ = "Takafumi Arakaki"
__version__ = "0.1.2"
__license__ = "MIT License"
import re
from docutils import nodes
from docutils.parsers.rst.directives.tables import RSTTable
class ListLike(object):
"""
An iterative class with convenient cast functions
>>> col = ListLike(['1', '2', '3'])
>>> col
ListLike(['1', '2', '3'])
>>> ', '.join(col)
'1, 2, 3'
>>> col.str
['1', '2', '3']
>>> col.int
[1, 2, 3]
>>> col.float
[1.0, 2.0, 3.0]
"""
def __init__(self, data=None):
if data is None:
self._data = []
else:
self._data = list(data)
def __repr__(self):
return '{0}({1!r})'.format(self.__class__.__name__, self._data)
def __getitem__(self, key):
"""
>>> col = ListLike(['1', '2', '3'])
>>> col[0]
'1'
"""
return self._data[key]
def __iter__(self):
"""
>>> col = ListLike(['1', '2', '3'])
>>> list(col)
['1', '2', '3']
"""
return iter(self._data)
def __contains__(self, item):
"""
>>> col = ListLike(['1', '2', '3'])
>>> '1' in col
True
"""
return item in self._data
def __reversed__(self):
"""
>>> reversed(ListLike(['1', '2', '3']))
ListLike(['3', '2', '1'])
"""
return ListLike(reversed(self._data))
def __len__(self):
"""
>>> col = ListLike(['1', '2', '3'])
>>> len(col)
3
"""
return len(self._data)
def append(self, val):
"""
>>> col = ListLike(['1', '2', '3'])
>>> col.append('4')
>>> col
ListLike(['1', '2', '3', '4'])
"""
self._data.append(val)
@property
def str(self):
"""Access the stored data as a list of strings (original data)"""
return self._data
@property
def int(self):
"""Access the stored data as a list of ints"""
return map(int, self._data)
@property
def float(self):
"""Access the stored data as a list of floats"""
return map(float, self._data)
def sum(self, type='float'):
"""
Get the sum of the stored data
>>> col = ListLike(['1', '2', '3'])
>>> col.sum('int')
6
"""
return sum(getattr(self, type))
def mean(self):
"""
Get the mean of the stored data
>>> col = ListLike(['1', '2', '3'])
>>> col.mean()
2.0
"""
return self.sum('float') / len(self)
_RE_EQ_LHS = re.compile(
r"\{(?P<col>[^\:\}]*)(\:(?P<cond>[^\:\}]*))?\} *= *(?P<eq>.*) *")
def parse_equations(argument):
r"""
Parse `eq` option
>>> parse_equations('''\
... {1:i==0} = 0
... {1:i==last} = sum(col.int)
... {1:i<0<last} = {p} + 1
... {2} = {1} * 2''') #doctest: +NORMALIZE_WHITESPACE
[(1, 'i==0', '0'),
(1, 'i==last', 'sum(col.int)'),
(1, 'i<0<last', '{p} + 1'),
(2, None, '{1} * 2')]
"""
deflist = []
for eqstr in argument.splitlines():
match = _RE_EQ_LHS.match(eqstr)
if match:
(dest, _, cond, defun) = match.groups()
else:
raise ValueError("cannot parse '{0}'".format(eqstr))
deflist.append((int(dest), cond, defun))
return deflist
def safecall(func):
"""
Decorator to call a function w/o a system error
This decorator adds two "hidden" arguments to the original function.
_debug : bool
If this argument is True, the error occurred int the function
call will not be suppressed. (Default: False)
_mix : bool
If this argument is True, the traceback will be returned
instead of the original returned value when the error occurred.
If this argument is False, the original result and traceback
will be returned in 2-tuple always. (Default: False)
>>> @safecall
... def somethingwrong(wrong):
... if wrong == 'wrong':
... raise Exception(wrong)
... return wrong
...
>>> somethingwrong('nothing wrong')
'nothing wrong'
>>> somethingwrong('nothing wrong', _mix=False)
('nothing wrong', None)
>>> tb = somethingwrong('wrong') # error will be suppressed
>>> tb.splitlines()[-1]
'Exception: wrong'
>>> somethingwrong('wrong', _debug=True) # error will be raised
Traceback (most recent call last):
...
Exception: wrong
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwds):
mix = kwds.pop('_mix', True)
if kwds.pop('_debug', False):
result = func(*args, **kwds)
tb = None
else:
try:
result = func(*args, **kwds)
tb = None
except:
import traceback
result = None
tb = traceback.format_exc()
if mix:
return result if tb is None else tb
else:
return (result, tb)
return wrapper
class ScopeSetup(object):
def __init__(self, code=None):
if code is not None:
(self.scope, self.tb) = self._get_scope(code, _mix=False)
if self.tb is None:
self.fail = False
else:
self.fail = True
else:
self.scope = {}
self.tb = None
self.fail = False
def with_default(self, *args, **kwds):
default = dict(*args, **kwds)
default.update(self.scope)
return default
@staticmethod
@safecall
def _get_scope(code):
scope = {}
exec code in scope
return scope
empty_setup = ScopeSetup()
safe_eval = safecall(eval)
def parse_format(argument):
(result, tb) = safe_eval('{%s}' % ' '.join(argument.split('\n')),
_mix=False)
if tb is not None:
raise ValueError("fail to parse '{0}'".format(argument))
return result
def new_paragraph(rawtext):
paragraph = nodes.paragraph()
paragraph += nodes.Text(rawtext)
return paragraph
def error_with_tb(tb, error, msg):
tb_node = nodes.literal_block(tb, tb)
return [error(msg, tb_node)]
class SpreadSheet(RSTTable):
option_spec = {'eq': parse_equations,
'setup': ScopeSetup,
'format': parse_format,}
def run(self):
table_node_list = RSTTable.run(self)
table_node = table_node_list[0]
message = table_node_list[1:]
if 'eq' not in self.options:
return [table_node] + message
deflist = self.options['eq']
setup = self.options.get('setup', empty_setup)
if setup.fail:
return [table_node] + message + error_with_tb(
setup.tb,
self.state.reporter.error,
'An error occurs while executing `:setup:`')
formatdict = self.options.get('format', {})
for tbody in table_node.traverse(nodes.tbody):
last = len(tbody.traverse(nodes.row)) - 1
cols = {}
for (irow, row) in enumerate(tbody.traverse(nodes.row)):
# fetch data
coldata = [
None if len(entry) == 0 else str(entry[0][0])
for entry in row.traverse(nodes.entry)]
if irow == 0:
for i in range(len(coldata)):
cols[i] = ListLike()
# calculate
for (i, cond, eq) in deflist:
_scope = setup.with_default(
i=irow, col=cols[i], cols=cols, last=last)
if cond is not None:
(cond_result, cond_tb,
) = safe_eval(cond, _scope, _mix=False)
if cond_tb is not None:
message += error_with_tb(
cond_tb,
self.state.reporter.error,
"Following error occurs while "
"validating the condition of the cell "
"in {0}-th row, {1}-th col: '{2}'"
.format(irow, i, cond))
else:
cond_result = True
if cond_result:
(result, tb,
) = safe_eval(eq.format(*coldata), _scope,
_mix=False)
if tb is None:
if i in formatdict:
coldata[i] = formatdict[i] % result
else:
coldata[i] = str(result)
else:
message += error_with_tb(
tb,
self.state.reporter.error,
"Following error occurs while "
"validating the equation of the cell "
"in {0}-th row, {1}-th col: '{2}'"
.format(irow, i, eq))
# store cols
for (i, c) in enumerate(coldata):
cols[i].append(c)
# fill-in
for (entry, col) in zip(row.traverse(nodes.entry),
coldata):
if len(entry) == 0 and col is not None:
entry += new_paragraph(col)
return [table_node] + message
def register_directive():
from docutils.parsers.rst import directives
directives.register_directive("spreadsheet", SpreadSheet)
def setup(app):
app.add_directive("spreadsheet", SpreadSheet)
_WRITER_LIST = [
'html', 'latex', 'pprint', 'pformat', 'pdf', 'xml', 's5',
'pseudoxml']
_USAGE = '%prog [<writer>] [options] [<source> [<destination>]]'
def main():
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import sys
from docutils.core import publish_cmdline, default_description
description = (
'Generates document with <writer>. Choose a '
'writer from [{0}]. The default writer is "html". '
).format('|'.join(_WRITER_LIST)) + default_description + (
' This generation tool includes "spreadsheet" directive.')
if len(sys.argv) < 2 or sys.argv[1] not in _WRITER_LIST:
writer_name = 'html'
argv = sys.argv[1:]
else:
writer_name = sys.argv[1]
argv = sys.argv[2:]
register_directive()
publish_cmdline(writer_name=writer_name, description=description,
usage=_USAGE, argv=argv)
if __name__ == '__main__':
main() | /rstspreadsheet-0.1.2.tar.gz/rstspreadsheet-0.1.2/rstspreadsheet.py | 0.64512 | 0.38145 | rstspreadsheet.py | pypi |
from .wiki import WikiQuery
from collections import OrderedDict
class RealTimeQuery(WikiQuery):
"""
A class for making real-time price queries to the RuneScape Wiki API.
This class extends the `WikiQuery` class and provides specific functionality for making queries to the real-time
price endpoints of the RuneScape Wiki API. The class provides options for querying the latest prices, mapping item
IDs to names, querying average prices over given time periods, and requesting timeseries data for specific items.
For using each route, it is best practice to use the respective child classes.
Args:
route (str, optional): The specific route to query within the endpoint. Can be one of ``'latest'``,
``'mapping'``, ``'5m'``, `'1h'``, or ``'timeseries'``.
game (str, optional): The specific game mode to query. Can be one of ``'osrs'``, ``'dmm'``, or ``'fsw'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
``**kwargs``: Additional keyword arguments to include in the query. Varies by route.
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
json (dict): The raw JSON formatted response from the API. Formatted as OrderedDict for all Real-Time queries.
"""
def __init__(self, route="", game="osrs", user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
base_url = 'https://prices.runescape.wiki/api/v1/' + game + '/' + route
super().__init__(base_url, user_agent=user_agent, **kwargs)
self.json = self.response.json()
class Latest(RealTimeQuery):
"""
A class for querying the latest real-time prices from the RuneScape Wiki API. This class extends the `RealTimeQuery`
class and provides specific functionality for making queries to the ``'latest'`` route of the real-time price API.
Args:
game (str, optional): The specific game mode to query. Can be one of ``'osrs'``, ``'dmm'``, or ``'fsw'``.
Default ``'osrs'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Keyword Args:
id (str, optional): The itemID to query if only one itemID is desired.
Note:
It is best practice to query all item ids (do not provide a kwarg) and to loop through the `.content` object
for specific IDs you require. This requires only one query to the RSWiki API.
Attributes:
content (dict): A dict obj where the keys are all itemIDs and the values are dicts
content format::
{
item_id :
{
'high': insta_buy_price (int),
'highTime': insta_buy_time_unix (int),
'low': insta_sell_price (int),
'lowTime': insta_sell_time_unix (int)
},
# New key for next item_id
}
Example:
Example to get a specific item ID::
>>> query = Latest('osrs', user_agent='My Project - me@example.com')
>>> query.content['2']
{'high': 152, 'highTime': 1672437534, 'low': 154, 'lowTime': 1672437701}
"""
def __init__(self, game='osrs', user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
super().__init__(route="latest", game=game, user_agent=user_agent, **kwargs)
# Response is {'data': {}}
self.content = self.json['data']
class Mapping(RealTimeQuery):
"""
A class for querying the item mappings from the RuneScape Wiki API.
This class extends the `RealTimeQuery` class and provides specific functionality for making queries to the
``'mapping'`` route of the real-time API.
Args:
game (str, optional): The specific game mode to query. Can be one of ``'osrs'``, ``'dmm'``, or ``'fsw'``.
Default ``'osrs'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Attributes:
content (list): A list of all item mapping information
content format::
[
{
'examine': examine_text (str),
'highalch': high_alch (int),
'icon': icon_name (str),
'id': item_id (int),
'limit': ge_limit (int),
'lowalch': low_alch (int),
'members': members (bool),
'name': name (str),
'value': ge_price (int)
}
# Next index is next item
]
Example:
Example to get mapping information for an item::
>>> query = Mapping('osrs', user_agent='My Project - me@example.com')
>>> query.content[0]
{'examine': 'Fabulously ancient mage protection enchanted in the 3rd Age.', 'id': 10344, 'members': True,
'lowalch': 20200, 'limit': 8, 'value': 50500, 'highalch': 30300, 'icon': '3rd age amulet.png', 'name': '3rd age amulet'}
Example to create an item hash map::
>>> query = Mapping('osrs', user_agent='My Project - me@example.com')
>>> item_map = {}
>>> for d in query.content:
>>> item_map[str(d['id'])] = d
>>> item_map[d['name']] = d
>>> item_map['Coal']['id']
453
"""
def __init__(self, game='osrs', user_agent='RS Wiki API Python Wrapper - Default'):
super().__init__(route="mapping", game=game, user_agent=user_agent)
self.content = self.json
class AvgPrice(RealTimeQuery):
"""
A class for querying the average real-time prices from the RuneScape Wiki API.
This class extends the `RealTimeQuery` class and provides specific functionality for making queries to the
``'5m'`` or ``'1h'`` routes of the real-time API.
Args:
route (str): The route to query. Must be '5m' or '1h'.
game (str, optional): The specific game mode to query. Can be one of ``'osrs'``, ``'dmm'``, or ``'fsw'``.
Default ``'osrs'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Keyword Args:
timestamp (str, optional): The timestamp (UNIX formatted) to begin the average calculation at.
Attributes:
content (dict): A dict obj where the keys are all itemIDs and the values are dicts
content format::
{
item_id :
{
'avgHighPrice': average_instabuy_price (int),
'avgLowPrice': average_instasell_price (int),
'highPriceVolume': instabuy_volume (int),
'lowPriceVolume': instasell_volume (int)
},
# New key for next item_id
}
Example:
Example to get a specific item ID::
>>> query = AvgPrice('5m', 'osrs', user_agent='My Project - me@example.com')
>>> query.content['2']
{'avgHighPrice': 158, 'highPriceVolume': 127372, 'avgLowPrice': 159, 'lowPriceVolume': 11785}
"""
def __init__(self, route, game='osrs', user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
# Valid routes are '5m' or '1h'
assert route in ['5m', '1h'], 'Invalid route selected'
# TODO Validate the timestamp is valid if the kwarg is used
super().__init__(route, game=game, user_agent=user_agent, **kwargs)
# Response is {'data': {OrderedDict()}}
self.content = self.json['data']
class TimeSeries(RealTimeQuery):
"""
A class for querying the time-series real-time prices from the RuneScape Wiki API.
This class extends the `RealTimeQuery` class and provides specific functionality for making queries to the
``'timeseries'`` route of the real-time price API. This provides timeseries information for a single Item. This
basically provides AvgPrice information over a series of points. Length of content provided is dependent on
continuity of data and timestep selected.
Args:
game (str, optional): The specific game mode to query. Can be one of ``'osrs'``, ``'dmm'``, or ``'fsw'``.
Default ``'osrs'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Keyword Args:
id (str, required): The itemID to provide timeseries data for.
timestep (str, required): The period of the time-series data to retrieve. Valid values are ``'5m'``,
``'1h'``, or ``'6h'``.
Attributes:
content (dict): A dict obj where the keys are all itemIDs and the values are dicts
content format::
[
{
'avgHighPrice': average_instabuy_price (int),
'avgLowPrice': average_instasell_price (int),
'highPriceVolume': instabuy_volume (int),
'lowPriceVolume': instasell_volume (int),
'timestamp': unix_timestamp (int)
},
# New index for next timestep
]
Example:
Example to get a specific item ID::
>>> query = TimeSeries('osrs', user_agent='My Project - me@example.com', id='2', timestep='5m')
>>> query.content[0]
{'timestamp': 1672330200, 'avgHighPrice': 162, 'avgLowPrice': 155, 'highPriceVolume': 204403, 'lowPriceVolume': 11966}
"""
def __init__(self, game='osrs', user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
# TODO Validate the timestep is valid (5m, 1h, 6h)
super().__init__(route="timeseries", game=game, user_agent=user_agent, **kwargs)
# Response is {'data': [{OrderedDict()}]}
self.content = self.json['data'] | /rswiki-wrapper-0.0.6.tar.gz/rswiki-wrapper-0.0.6/rswiki_wrapper/osrs.py | 0.93189 | 0.502991 | osrs.py | pypi |
import requests
import json
from time import sleep
class WikiQuery(object):
"""
A class for querying the RS Wiki API. If no URL is provided, the constructor returns a WikiQuery object with a
None type ``response`` object. The ``response`` can then be created using ``MediaWiki.update()`` or
specific Child-class methods.
Args:
url (str, optional): The URL of the API endpoint to query.
user_agent (str, optional): The user agent string to use for the request. Default is
``'RS Wiki API Python Wrapper - Default'``.
``**kwargs``: Additional parameters to include in the query. See child classes for required kwargs.
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
"""
def __init__(self, url: str = None, user_agent: str = 'RS Wiki API Python Wrapper - Default', **kwargs):
"""
Constructor method
"""
super().__init__()
if user_agent == 'RS Wiki API Python Wrapper - Default':
print("WARNING: You are using the default user_agent. Please configure the query with the parameter "
"user_agent='{Project Name} - {Contact Information}'")
self.headers = {
'User-Agent': user_agent
}
if url is not None:
self.response = requests.get(url, headers=self.headers, params=kwargs)
def update(self, url, **kwargs):
"""
Refresh the query with a new URL and additional parameters. Updates the ``self.response`` attribute.
Args:
url (str): The URL of the API endpoint to query.
``**kwargs``: Additional parameters to include in the query. See child classes for required kwargs.
"""
self.response = requests.get(url, headers=self.headers, params=kwargs)
class WeirdGloop(WikiQuery):
"""
This class extends the ``WikiQuery`` class to make queries to the Weird Gloop API.
For general usage, it is recommended to use the specific Exchange or Runescape child classes.
Args:
route (str): The route of the Weird Gloop API to query.
game (str): The game to query in the Weird Gloop API.
endpoint (str): The endpoint of the Weird Gloop API to query.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
``**kwargs``: Additional keyword arguments to pass, depending on the specific query
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
"""
def __init__(self, route: str, game:str, endpoint: str, user_agent: str, **kwargs):
# https://api.weirdgloop.org/#/ for full documentation
base_url = 'https://api.weirdgloop.org/' + route + game + '/' + endpoint
super().__init__(base_url, user_agent, **kwargs)
class Exchange(WeirdGloop):
"""
This class extends the ``WeirdGloop`` class to make queries to the exchange history endpoint of the Weird Gloop API.
Args:
game (str): The game to query in the Weird Gloop API. Valid options are ``'rs'``, ``'rs-fsw-2022'``,
``'osrs'``, ``'osrs-fsw-2022'``.
endpoint (str): The endpoint of the Weird Gloop API to query. Valid options are ``'latest'``, ``'all'``,
``'last90d'``, and ``'sample'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Keyword Args:
id (str): The itemID or a trade index like *GE Common Trade Index*
name (str): The exact Grand Exchange item name.
Note:
* Only ``id`` or ``name`` can be provided as kwargs, not both.
* If using the ``latest`` endpoint, multiple items can be specified using pipes "|" like ``id='2|6'``
* If using ``all``, ``last90d``, and ``sample`` endpoints, multiple item ID or names cannot be provided.
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
json (dict): The raw JSON formatted response from the API
content (dict): The parsed content of the request. Formatted as follows. ``item`` is either the item name or
item ID that you provided to the request.
Sample content format::
{
item (str):
[
{
'id' : item_id (str),
'timestamp': timestamp (str),
'price': price (int),
'volume': volume (int),
},
# A new dict for each timestamp
]
}
Examples:
Example usage of ``latest`` endpoint::
>>> query = Exchange('osrs', 'latest', user_agent='My Project - me@example.com', id='2|6')
>>> query.content['2'][0]['id']
'2'
Example usage of ``all`` endpoint::
>>> query = Exchange('osrs', 'all', user_agent='My Project - me@example.com', name='Coal')
>>> query.content['Coal'][0]['id']
'453'
"""
def __init__(self, game, endpoint, user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
# https://api.weirdgloop.org/#/ for full documentation
super().__init__('exchange/history/', game, endpoint, user_agent, **kwargs)
self.json = self.response.json()
self.content = self.json
if endpoint == 'latest':
# To standardize the format of content
self.content = {key: [value] for key, value in self.content.items()}
class Runescape(WeirdGloop):
"""
This class extends the ``WeirdGloop`` class to make queries to the general endpoints of the Weird Gloop API for
Runescape information.
Args:
endpoint (str): The endpoint of the Weird Gloop API to query. Valid options are ``"vos"``, ``'vos/history'``,
``'social'``, ``'social/last'``, ``'tms/current'``, ``'tms/next'``, and ``'tms/search'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Keyword Args:
lang (str): Required for ``'tms/current'`` and ``'tms/next'`` endpoints. Optional for ``'tms/search'``. Valid
values are ``'en'``, ``'pt'``, ``'id'`` (item IDs only), and ``'full'`` (all information).
page (str): Required for ``'vos/history'`` and ``'social'`` endpoints. The page number is formatted as a string.
start (str): Required for ``'tms/search'``. Date string or ``'today'``.
id (str): Optional for ``'tms/search'``. The item ID to search for.
name (str): Optional for ``'tms/search'``. The item name to search for.
number (str): Optional for ``'tms/search'``. The number of results to return
end (str): Optional for ``'tms/search'``. Date string or ``'today'``.
Note:
For ``'tms/search'`` endpoint, either ``'id'`` or ``'name'`` is required (but not both) and either ``'end'``
or ``'number'`` is required (but not both).
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
json (dict): The raw JSON formatted response from the API
content: The parsed content of the request. Generally either a list of multiple results or a single
result in ``dict`` format, depending on the endpoint used.
Examples:
Example usage of ``tms/search`` endpoint to search for instances where item ID 42274 was active::
>>> query = Runescape('tms/search', user_agent='My Project - me@example.com', lang='full', start='2022-01-01', end='2022-01-07', id='42274')
>>> query.content[0]['items'][0]
{'id': '42274', 'en': 'Uncharted island map (Deep Sea Fishing)', 'pt': 'Mapa da ilha inexplorada (Pesca em Alto Mar)'}
Example usage of ``social`` endpoint::
>>> query = Runescape('social', user_agent='My Project - me@example.com', page='1')
>>> query.content[0].keys()
dict_keys(['id', 'url', 'title', 'excerpt', 'author', 'curator', 'source', 'image', 'icon', 'expiryDate', 'datePublished', 'dateAdded'])
"""
def __init__(self, endpoint, user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
# Used for the general endpoints for Runescape information
if endpoint == 'tms/search':
if not self._check_kwargs(**kwargs):
print('Keyword Arguments did not pass check, see documentation for allowable args')
self.json = None
self.content = None
return
super().__init__('runescape/', game="", endpoint=endpoint, user_agent=user_agent, **kwargs)
self.json = self.response.json()
# tms data can be a list or dict, depending on the kwargs used in lang
if isinstance(self.json, list):
self.content = self.json
elif 'data' in self.json.keys():
self.content = self.json['data']
else:
self.content = self.json
@staticmethod
def _check_kwargs(**kwargs):
"""
This method checks the keyword arguments passed to the ``Runescape`` class for the ``'tms/search'`` endpoint to
ensure that they are valid and do not conflict with each other.
Args:
``**kwargs``: The keyword arguments to check.
Returns:
bool: Whether the keyword arguments are valid and do not conflict.
"""
required_args = ['start', 'number', 'name', 'id']
conflicts = [['end', 'number'], ['name', 'id']]
if not any(arg in kwargs for arg in required_args):
return False
for conflict in conflicts:
if all(arg in kwargs for arg in conflict):
return False
return True
class MediaWiki(WikiQuery):
"""
This class is used to access the MediaWiki API for pulling information from the Wiki. An empty instance can be
created for using the various helper methods built into this class, or the query can be made directly from
this interface using the appropriate kwargs.
Args:
game (str, optional): The game RSWiki should refer to. Valid options are ``'osrs'`` or ``'rs3'``.
user_agent (str): The user agent string to use in the query. Default is
``'RS Wiki API Python Wrapper - Default'``.
Attributes:
headers (dict): The headers sent with the request object. Created from ``user_agent``
response (:obj:`Response`): The response object provided by the ``requests`` library.
json (dict): The raw JSON formatted response from the API
content: The parsed content of the request. If created via the constructor method, it will be identical
to the json content. If created via the built-in methods, it will be formatted to contain the requested
data with minimal data wrangling required.
"""
def __init__(self, game, user_agent='RS Wiki API Python Wrapper - Default', **kwargs):
assert game in ['osrs', 'rs3'], 'Invalid game; choose osrs or rs3'
if game == 'osrs':
self.base_url = 'https://oldschool.runescape.wiki/api.php'
else:
self.base_url = 'https://runescape.wiki/api.php'
if kwargs:
super().__init__(self.base_url, user_agent=user_agent, **kwargs)
self.json = self.response.json()
self.content = self.json
else:
super().__init__(user_agent=user_agent)
self.json = None
self.content = None
# Use the ASK route
def ask(self, result_format: str = 'json', conditions: list[str] = None, printouts: list[str] = None,
offset: str = None, **kwargs):
"""
This method sends an ASK query to the MediaWiki API using the specified ``conditions`` and ``printouts``
parameters, and optional ``offset`` parameter. The ``result_format`` specifies the format in which the
response is returned.
Args:
result_format (str, optional): The format in which the response is returned. Default is ``'json'``.
conditions (list[str]): The conditions to match in the ASK query.
printouts (list[str]): The printouts (results) to provide from the ASK query.
offset (str, optional): The offset in results. Typical ASK queries provide 50 results, so ``offset='50'``
will provide results 51-100 (or lower if there are less than 100 results).
Note:
This route only updates the ``.json`` attribute due to the variety of possible printouts. To get the
``.content``, first try the ``.get_ask_content()`` method which parses the conditions and printouts.
Examples:
Example usage of the ASK query to find production JSON data for all items::
>>> query = MediaWiki('osrs', user_agent='My Project - me@example.com')
>>> query.ask(conditions=['Category:Items', 'Production JSON::+'], printouts=['Production JSON'])
>>> query.json['query']['results']['Abyssal bludgeon']['printouts']['Production JSON'][0]
'{"ticks":"","materials":[{"name":"Bludgeon axon","quantity":"1"},{"name":"Bludgeon claw","quantity":"1"},
{"name":"Bludgeon spine","quantity":"1"}],"facilities":"The Overseer","skills":[],"members":"Yes","output":
{"cost":11131623,"quantity":"1","name":"Abyssal bludgeon","image":
"[[File:Abyssal bludgeon.png|link=Abyssal bludgeon]]"}}'
Hint:
If trying to access the Production JSON or Exchange JSON information, use the included ``ask_production``
and ``ask_exchange`` methods below, which make the result navigation much simpler.
"""
kwargs['action'] = 'ask'
kwargs['format'] = result_format
if isinstance(conditions, list):
# Join the conditions with ']][[' and remove the brackets from each element
query = '[[' + ']][['.join([x.replace('[', '').replace(']', '') for x in conditions]) + ']]'
if isinstance(printouts, list):
# Join the printouts with '|?' and remove the brackets and pipes from each element
query += '|?' + '|?'.join([x.replace('?', '').replace('|', '') for x in printouts])
# If the offset is not specified, set the query modification to an empty string
if offset is None:
query_mod = ''
# Otherwise, set the query modification to the specified offset
else:
query_mod = f'|offset={offset}'
kwargs['query'] = query + query_mod
# Send the ASK query to the API and update the response
self.update(self.base_url, **kwargs)
self.json = self.response.json()
def get_ask_content(self, conditions: list[str], printouts: list[str], get_all: bool = False) -> None:
"""
A helper function to retrieve content from an ASK query in the MediaWiki API.
Args:
conditions (list[str]): The conditions to match in the ASK query.
printouts (list[str]): The printouts (results) to provide from the ASK query.
get_all (bool, optional): Whether to retrieve all results from the ASK query recursively.
Warning:
Using get_all will recursively retrieve all results of the query. For some queries such as getting all
production JSON information for all items, this results in a long wait to retrieve the results. This is
because the wrapper has a limit of 1 query/second when recursively following the results to reduce load
on the API.
"""
# Iterate over the results of the query
for the_name, prods in self.json['query']['results'].items():
# Initialize an empty list to store the printout values for this result
self.content[the_name] = []
# Iterate over the printouts for this result
for printout in printouts:
# Iterate over the values for this printout
for prod in prods['printouts'][printout]:
# Append the parsed JSON value to the list
self.content[the_name].append(json.loads(prod))
# If we want to retrieve all results and the query has more than the default limit of 50 results
if get_all and self.json.get('query-continue-offset') is not None:
# Sleep 1s to limit hits to API
sleep(1)
# Make another ASK query to retrieve the remaining results, using the provided offset
self.ask(conditions=conditions, printouts=printouts, offset=self.json.get('query-continue-offset'))
# Process the results of this additional query
self.get_ask_content(conditions, printouts, get_all)
# Helper function to format a production JSON query for a specific item or category
# item can be 'Category:Items' or 'Cake' for example or None for all Production JSON
# All is whether to get all items (aka continue past limit of 50 items per query)
# Note: All=True may result in many queries
def ask_production(self, item: str = None, get_all: bool = False):
"""
Makes a query to the MediaWiki API to retrieve production data for a given item or category of items.
Args:
item (str, optional): The item name to search Production Information. Can also be a Category
``'Category:X'``. If no name is provided, all items with a valid Production JSON will be returned.
get_all (bool, optional): To recursively search for all matching items, or only provide the first page of
results, which by RSWiki convention is 50 results.
Returns:
None. Updates the ``.content`` attribute as follows. ``item`` is the name of the item provided in args or
all items matching the category selected.
Resulting content format::
{
item (str):
[
{
'ticks' : ticks (str),
'materials': [
{
'name': material_name (str),
'quantity': material_quantity (str)
}
# New dict for each material
],
'facilities': facilities (str),
'skills': [
{
'experience': experience_per (str),
'level': level_required (str),
'name': level_name (str),
'boostable': yes_no (str)
}
# New dict for each material
],
'members': yes_no (str),
'output': {
'cost': cost (int)
'quantity': quantity (str),
'name': name (str),
'image': img_link (str)
}
},
# A new dict for each production method
]
# A new key for each item name
}
Example:
Example of getting Production JSON information for Cake::
>>> query = MediaWiki('osrs', user_agent='My Project - me@example.com')
>>> query.ask_production('Cake')
>>> query.content['Cake'][0]['skills']
[{'experience': '180', 'level': '40', 'name': 'Cooking', 'boostable': 'Yes'}]
Warning:
Using get_all will recursively retrieve all results of the query. For some queries such as getting all
production JSON information for all items, this results in a long wait to retrieve the results. This is
because the wrapper has a limit of 1 query/second when recursively following the results to reduce load
on the API.
"""
if item is None:
conditions = ['Production JSON::+']
else:
conditions = [item, 'Production JSON::+']
printouts = ['Production JSON']
self.content = {}
self.ask(conditions=conditions, printouts=printouts)
self.get_ask_content(conditions, printouts, get_all)
def ask_exchange(self, item: str = None, get_all: bool = False):
"""
This method retrieves exchange data for the specified item or all items.
Args:
item (str, optional): The item name to search Exchange Information. If no name is provided,
all items with a valid Exchange JSON will be returned.
get_all (bool, optional): To recursively search for all matching items, or only provide the first page of
results, which by RSWiki convention is 50 results.
Warning:
Unlike the ask_production method, a category can not be specified. This is because the Exchange JSON is
actually formatted as ``[Exchange:Item]]``, and ``[[Exchange:Category:X]]`` is not a valid condition.
Returns:
None. Updates the ``.content`` attribute as follows. ``item`` is the name of the item provided in args.
Resulting content format::
{
'Exchange:Item' (str):
[
{
'historical' : historical_info (bool),
'id': ge_item_id (int),
'lowalch': low_alch (int),
'limit': ge_limit (int),
'isalchable': alchable (bool),
'value' : ge_value (int),
'info': module_page (str),
'name': item_name (str),
'highalch': high_alch (int),
},
# A new dict for any other exchange pages with this name
]
# A new key for each item name
}
Example:
Example of getting Exchange JSON information for Cake::
>>> query = MediaWiki('osrs', user_agent='My Project - me@example.com')
>>> query.ask_exchange('Cake')
>>> query.content['Exchange:Cake'][0]['info']
'Module:Exchange/Cake'
Warning:
Using get_all will recursively retrieve all results of the query. Retrieving all Exchange pages recursively
will result in a long wait to retrieve the results. This is because the wrapper has a limit of
1 query/second when recursively following the results to reduce load on the API.
"""
if item is None:
conditions = ['Exchange JSON::+']
else:
conditions = ['Exchange:' + item, 'Exchange JSON::+']
printouts = ['Exchange JSON']
self.content = {}
self.ask(conditions=conditions, printouts=printouts)
self.get_ask_content(conditions, printouts, get_all)
def browse(self, result_format: str = 'json', format_version: str = 'latest', **kwargs) -> None:
"""
Use the SMWbrowse API endpoint to browse Semantic MediaWiki data. This helper assists with the ``smwbrowse``
action.
Args:
result_format (str, optional): The format in which the response is returned. Default is ```json'``.
format_version (str, optional): The version of the chosen format to use. Default is ``'latest'``
``**kwargs``: Additional keyword arguments to pass as params to the query.
Note:
This route only updates the ``.json`` attribute due to the variety of possible printouts. To get the
``.content``, custom parsing is required.
Hint:
If trying to browse for properties of pages ``(Special:Browse)`` the ``browse_properties()`` method will
simplify the parsing of json content.
"""
# Add required kwargs for this endpoint
kwargs['action'] = 'smwbrowse'
kwargs['format'] = result_format
kwargs['formatversion'] = format_version
# Update the class and parse the json
self.update(self.base_url, **kwargs)
self.json = self.response.json()
# Helper to sub out built-in property names to readable versions
def _clean_properties(self):
"""
Clean the property keys in the `content` attribute by renaming them to more human-readable names.
"""
property_map = {
"_INST": "Category",
"_MDAT": "Modification Date",
"_SKEY": "Name",
"_SOBJ": "Subobject"
}
for old, new in property_map.items():
if old in self.content:
self.content[new] = self.content.pop(old)
def _dirty_properties(self):
"""
Revert the property keys in the `content` attribute to their original names.
"""
property_map = {
"Category": "_INST",
"Modification Date": "_MDAT",
"Name": "_SKEY",
"Subobject": "_SOBJ"
}
for old, new in property_map.items():
if old in self.content:
self.content[new] = self.content.pop(old)
def browse_properties(self, item: str):
"""
Retrieve property values for a given subject.
Args:
item (str): The page name to search properties for.
Returns:
None. Creates the ``.content`` attribute as a dict with ``keys`` of each property and ``values`` of the
value of each property.
Example:
Example of getting all property information for Cake::
>>> query = MediaWiki('osrs', user_agent='My Project - me@example.com')
>>> query.browse_properties('Cake')
>>> query.content.keys()
dict_keys(['All_Image', 'All_Is_members_only', 'All_Item_ID', 'All_Weight', 'Cooking_experience',
'Cooking_level', 'Default_version', 'Is_boostable', 'Is_members_only', 'Production_JSON', 'Store_price_delta',
'Uses_facility', 'Uses_material', 'Uses_skill', 'Version_count', '_INST', '_MDAT', '_SKEY', '_SOBJ'])
# Cleaning the `_X` properties to human-readable values.
>>> query._clean_properties()
dict_keys(['All_Image', 'All_Is_members_only', 'All_Item_ID', 'All_Weight', 'Cooking_experience',
'Cooking_level', 'Default_version', 'Is_boostable', 'Is_members_only', 'Production_JSON', 'Store_price_delta',
'Uses_facility', 'Uses_material', 'Uses_skill', 'Version_count', 'Category', 'Modification Date', 'Name', 'Subobject'])
"""
# Format the subject for the API request
browse_subject = '{"subject":"' + item.replace(" ", "_") + '","ns":0,"iw":"","subobject":"","options":{' \
'"dir":null,"lang":"en-gb","group":null,' \
'"printable":null,"offset":null,"including":false,' \
'"showInverse":false,"showAll":true,' \
'"showGroup":true,"showSort":false,"api":true,' \
'"valuelistlimit.out":"30",' \
'"valuelistlimit.in":"20"}} '
self.content = {}
# Make the API request and update the `self.json` attribute
self.browse(browse='subject', params=browse_subject)
# Iterate through the retrieved data items and format them for the `self.content` attribute
for prop in self.json['query']['data']:
if len(prop['dataitem']) == 1:
# If there is only one data item, store it in a single string
temp_property = prop['dataitem'][0]['item'].replace("#6##", "").replace("#14##", "").replace("#0##", "")
# If the string appears to be in JSON format, parse it to a dictionary
if "{" in temp_property:
temp_property = eval(temp_property)
else:
# If there are multiple data items, store them in a list
temp_property = []
for item in prop['dataitem']:
temp_property.append(item['item'].replace("#6##", "").replace("#14##", "").replace("#0##", ""))
# If the string appears to be in JSON format, parse it to a dictionary
if "{" in temp_property:
temp_property = eval(temp_property)
# Add the property and its value to the `self.content`
self.content[prop['property']] = temp_property | /rswiki-wrapper-0.0.6.tar.gz/rswiki-wrapper-0.0.6/rswiki_wrapper/wiki.py | 0.84891 | 0.289866 | wiki.py | pypi |
from __future__ import absolute_import, division, print_function
from os import errno
import serial
import struct
__version__ = '0.1.2'
__author__ = 'Eberhard Fahle'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Eberhard Fahle'
#Constants for the commands a wavtrigger understands
# Reading data back from a WavTrigger
# Firmware version
_WT_GET_VERSION = bytearray([0xF0,0xAA,0x05,0x01,0x55])
# Number of polyphonic voices and number of tracks on sd-card
_WT_GET_SYS_INFO = bytearray([0xF0,0xAA,0x05,0x02,0x55])
# List of currently playing tracks
_WT_GET_STATUS = bytearray([0xF0,0xAA,0x05,0x07,0x55])
# Timeout when waiting for the data from the Get-Status command
_WT_GET_STATUS_TIMEOUT = 0.25
# Playing individual tracks
_WT_TRACK_SOLO = bytearray([0xF0,0xAA,0x08,0x03,0x00,0x00,0x00,0x55])
_WT_TRACK_PLAY = bytearray([0xF0,0xAA,0x08,0x03,0x01,0x00,0x00,0x55])
_WT_TRACK_PAUSE = bytearray([0xF0,0xAA,0x08,0x03,0x02,0x00,0x00,0x55])
_WT_TRACK_RESUME = bytearray([0xF0,0xAA,0x08,0x03,0x03,0x00,0x00,0x55])
_WT_TRACK_STOP = bytearray([0xF0,0xAA,0x08,0x03,0x04,0x00,0x00,0x55])
_WT_TRACK_LOOP_ON = bytearray([0xF0,0xAA,0x08,0x03,0x05,0x00,0x00,0x55])
_WT_TRACK_LOOP_OFF = bytearray([0xF0,0xAA,0x08,0x03,0x06,0x00,0x00,0x55])
_WT_TRACK_LOAD = bytearray([0xF0,0xAA,0x08,0x03,0x07,0x00,0x00,0x55])
# Stopping and resuming several tracks at once
_WT_STOP_ALL = bytearray([0xF0,0xAA,0x05,0x04,0x55])
_WT_RESUME_ALL = bytearray([0xF0,0xAA,0x05,0x0B,0x55])
# Mixer settings and fader
_WT_VOLUME = bytearray([0xF0,0xAA,0x07,0x05,0x00,0x00,0x55])
_WT_TRACK_VOLUME = bytearray([0xF0,0xAA,0x09,0x08,0x00,0x00,0x00,0x00,0x55])
_WT_FADE = bytearray([0xF0,0xAA,0x0C,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x55])
# Pitch bending
_WT_SAMPLERATE = bytearray([0xF0,0xAA,0x07,0x0C,0x00,0x00,0x55])
# Switching the Power amp on or off (not implemented!)
_WT_AMP_POWER = bytearray([0xF0,0xAA,0x06,0x09,0x00,0x55])
class WavTrigger(object):
"""A controller for a RobertSonics WavTrigger
"""
def __init__(self,device, baud=57600, timeout=5.0):
"""Open a serial port to the device and read the
hardware version and info from the WavTrigger.
:param device: The serial port where the WavTrigger is listening.
:type device: str
:param baud: The baudrate to be used on the port. The value must match
the baudrate set in the init file of the WavTrigger. The default
value (57600) seems to be fast enough for all purposes
:type baud: int
:param timeout: A timeout for reading and writing on the port.
The default (5.0 seconds) is plenty. If this limit is reached
you can be quite sure to have lost the connection.
:type timeout: float
"""
self._wt=serial.Serial(port=device, baudrate=baud)
self._wt.timeout=timeout
if self._wt.isOpen():
self._version=self._getVersion()
self._voices,self._tracks=self._getSysInfo()
def close(self):
"""Closes the port to the WavTrigger. Does not stop playing tracks.
"""
self._wt.close()
def isOpen(self):
"""Test if a serial connection to the WavTrigger is established.
:returns: bool -- True if the device is open, False otherwise
"""
return self._wt.isOpen()
@property
def version(self):
"""Get the version string of the WavTrigger firmeware
:returns: str -- A string with the firmware version that runs on the WavTrigger
"""
return self._version
@property
def voices(self):
"""Get the number of polyphonic voices the WavTrigger can play simultanously.
:returns: int -- The number of voices that can be played simultanously
"""
return self._voices
@property
def tracks(self):
"""Get the number of tracks stored on SD-Card of the WavTrigger.
:returns: int -- The total number of tracks the WavTrigger found on the SD-Card.
"""
return self._tracks
def play(self,track):
"""Play a track
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PLAY,track)
self._wt.write(t)
def solo(self,track):
"""Play a track solo. Stops all currently playing tracks
and starts the solo track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_SOLO,track)
self._wt.write(t)
def stop(self,track):
"""Stop a playing track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_STOP,track)
self._wt.write(t)
def pause(self,track):
"""Pause a track. Stops a playing track until
'resume' is called for the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PAUSE,track)
self._wt.write(t)
def resume(self,track):
"""Resume playing a track that has been paused previously.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_RESUME,track)
self._wt.write(t)
def load(self,track):
"""Load a track into the memory of the WavTrigger and pause it.
The track can than be played using the :meth:`resume` or :meth:`resumeAll` commands
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_LOAD,track)
self._wt.write(t)
def loop(self,track):
"""Set loop flag for a track. When the track is started it is played
in a loop until it is stopped. But stopping it does not clear the loop flag.
If the track is started again, it will still loop. Use :meth:`unLoop` to clear
the loop flag
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_ON,track))
def unLoop(self,track):
"""Clear the loop flag for a track. see :meth:`loop`
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_OFF,track))
def stopAll(self):
"""Stop all playing tracks.
"""
self._wt.write(_WT_STOP_ALL)
def resumeAll(self):
"""Restart all resumed tracks.
"""
self._wt.write(_WT_RESUME_ALL)
def masterGain(self,gain):
"""
Sets the gain for the WavTrigger output.
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_VOLUME
g[4],g[5]=self._intToLsb(gain)
self._wt.write(g)
def trackGain(self, track, gain):
""" Set the gain for a specific track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_TRACK_VOLUME
g[4],g[5]=self._intToLsb(track)
g[6],g[7]=self._intToLsb(gain)
self._wt.write(g)
def masterVolume(self,volume):
"""Set the volume for the WavTrigger output. This method never
amplifies the signal as the :meth:`masterGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param volume: Volume for the WavTrigger output.
The valid range for the volume argument is 0..100
:type gain: int
"""
vol=_WT_VOLUME
vol[4],vol[5]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(vol)
def trackVolume(self,track,volume):
"""Set the volume for a track. This method never
amplifies the track signal as the :meth:`trackGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: Volume for the track.
The valid range for the volume argument is 0..100
:type gain: int
"""
tvol=_WT_TRACK_VOLUME
tvol[4],tvol[5]=self._intToLsb(track)
tvol[6],tvol[7]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(tvol)
def pitch(self,offset):
"""Set an offset for the samplerate that the WavTrigger uses.
A negative offset lowers the tone, a positive offset raises the tone
value.
:param offset: Offset to the samplerate.
The range of valid offset agrument values is -32767..+32767
:type offset: int
"""
if offset>32767 :
offset=32767
if offset < -32767:
ofset = -32767
pitch=_WT_SAMPLERATE
pitch[4],pitch[5]=self._intToLsb(offset)
self._wt.write(pitch)
def fade(self,track,volume,time):
"""Fade the track volume from the current volume level to
a lower or higer volume
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: The target volume for the track.
The valid range for the volume argument is 0..100
:type volume: int
:param time: The time in milliseconds for the fade from the current
to the target level
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(volume))
f[8],f[9]=self._intToLsb(time)
f[10]=0x00
self._wt.write(f)
def fadeOut(self,track, time):
"""Fade the track volume from the current volume level to zero,
than stop the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param time: The time in milliseconds for the fade out from the current
to silence
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(0))
f[8],f[9]=self._intToLsb(time)
f[10]=0x01
self._wt.write(f)
def playing(self):
"""
Get a list of the currently playing tracks on the WavTrigger.
:returns: list -- A list with the track numbers currently playing.
If no tracks are playing the empty list is returned.
If there is a problem reading the return value from the
WavTrigger `None` is returned.
"""
self._wt.write(_WT_GET_STATUS)
header=self._readFromPort(4)
if header[:2]!=b'\xF0\xAA' or header[3:4]!=b'\x83':
self._wt.flushInput()
return None
trackLen=ord(header[2:3])-4
t=self._readFromPort(trackLen)
if t[-1:]!=b'\x55':
return None
t=t[:-1]
tracks=[t[i:i+2] for i in range(0, len(t), 2)]
trackList=[]
for i in range(len(tracks)):
trackList.append(self._lsbToInt(tracks[i]))
return trackList
def amplifierOn(self):
"""Switch the on-board amplifier on.
"""
data=_WT_AMP_POWER
data[4]=0x01
self._wt.write(data)
def amplifierOff(self):
"""Switch the on-board amplifier off.
"""
data=_WT_AMP_POWER
data[4]=0x00
self._wt.write(data)
def _isValidTrackNumber(self,track):
"""Simple test for valid track numbers
"""
if track>0:
return True
return False
def _lsbToInt(self,lsbValue):
"""Convert track number from 2 bytes in lsb order to an int value
"""
return struct.unpack('<h',lsbValue)[0]
def _intToLsb(self,value):
"""Convert an int value to a 2 byte tuple in lsb order
"""
return (value & 0xFF, (value >> 8) & 0xFF)
def _setTrackForCommand(self,cmd,track):
"""All track commands need a track argument in the data sent
to the WavTrigger. We update the command data array in place
"""
cmd[5],cmd[6]=self._intToLsb(track)
return cmd
def _volumeToDb(self, vol):
"""Map a volume level of 0..100 to the gain level of -70..0 db
which is used by the WavTrigger
"""
if vol<0 or vol>100:
raise ValueError('Volume argument range is from 0 to 100')
return -70+int(vol/1.428)
def _getVersion(self):
"""Read version number from device
"""
if(self._wt.write(_WT_GET_VERSION) != len(_WT_GET_VERSION)):
return ''
v=self._readFromPort(25)
if(v[:4]!=b'\xF0\xAA\x19\x81' or v[-1:]!=b'\x55'):
return ''
vstr=v[4:-1].decode('utf8')
return vstr.strip()
def _getSysInfo(self):
"""Read system info from device. The current firmware reports
the number of polyphonic voice and the number of tracks found on the SD-card.
"""
if(self._wt.write(_WT_GET_SYS_INFO) != len(_WT_GET_SYS_INFO)):
return (0,0)
v=self._readFromPort(8)
if(v[:4]!=b'\xF0\xAA\x08\x82' or v[-1:]!=b'\x55'):
return (0,0)
return (ord(v[4:5]),self._lsbToInt(v[5:7]))
def _readFromPort(self, size):
"""Read data from the serial port. If the length of the data returned from
the WavTrigger does not match the requested size an OSError is raised
for the timeout condition.
"""
result=self._wt.read(size)
if len(result) != size:
raise OSError(errno.ETIMEDOUT,"Connection timed out");
return result
def __delete__(self):
"""Close the serial port if the class instance goes out of scope.
"""
self.close() | /rswt-0.1.2.tar.gz/rswt-0.1.2/rswt.py | 0.709221 | 0.18462 | rswt.py | pypi |
from __future__ import annotations
import math
import csv
import os
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from time import sleep
# Make sure this module doesn't crash if these aren't installed.
try:
import psutil
except ImportError:
psutil = None
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from src.logging.logger import Logger
# https://medium.com/survata-engineering-blog/monitoring-memory-usage-of-a-running-python-program-49f027e3d1ba
class ProcStats():
"""_summary_
Returns:
_type_: _description_
"""
headers = ['datetime', 'cpu_percent', 'resident_memory', 'virtual_memory', 'children', 'children_resident', 'children_virtual']
def __init__(self, cpu_percent, rss_raw: int, vms_raw: int, children: int, children_rss_raw: int, children_vms_raw: int):
"""_summary_
Args:
cpu_percent (_type_): _description_
rss_raw (int): _description_
vms_raw (int): _description_
children (int): _description_
children_rss_raw (int): _description_
children_vms_raw (int): _description_
"""
self.datetime = datetime.now()
self.cpu_percent = cpu_percent
self.children = children
self.rss = rss_raw / float(2 ** 20)
self.vms = vms_raw / float(2 ** 20)
self.children_rss = children_rss_raw / float(2 ** 20)
self.children_vms = children_vms_raw / float(2 ** 20)
def row(self):
"""_summary_
Returns:
_type_: _description_
"""
return [
self.datetime.strftime('%Y-%m-%d %H:%M:%S'),
math.ceil(self.cpu_percent),
f"{self.rss:.2f}",
f"{self.vms:.2f}",
f"{self.children}",
f"{self.rss:.2f}",
f"{self.vms:.2f}"
]
def toString(self) -> str:
"""_summary_
Returns:
_type_: _description_
"""
return f"datetime: {self.datetime.strftime('%Y-%m-%d %H:%M:%S')}, cpu_percent: {math.ceil(self.cpu_percent)}, mem_resident: {self.rss:.2f}Mb, mem_virtual: {self.vms:.2f}Mb, num_children: {self.children}, mem_children_resident: {self.rss:.2f}Mb, mem_children_virtual: {self.vms:.2f}Mb"
def max(self, tick: ProcStats):
"""_summary_
Args:
tick (ProcStats): _description_
"""
self.cpu_percent = max(self.cpu_percent, tick.cpu_percent)
self.children = max(self.children, tick.children)
self.rss = max(self.rss, tick.rss)
self.vms = max(self.vms, tick.vms)
self.children_rss = max(self.children_rss, tick.children_rss)
self.children_vms = max(self.children_vms, tick.children_vms)
class MemoryMonitor:
"""_summary_
"""
def __init__(self, logfile: str, loop_delay=1):
if not psutil:
return
self.keep_measuring = True
self.filepath = logfile
self.loop_delay = loop_delay
self.process = psutil.Process(os.getpid())
self.headers_written = False
self.max_stats = ProcStats(0, 0, 0, 0, 0, 0)
def write_line(self, arr, mode='a'):
"""_summary_
Args:
arr (_type_): _description_
mode (str, optional): _description_. Defaults to 'a'.
"""
with open(self.filepath, mode, newline='', encoding='utf-8') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(arr)
def getstats(self) -> ProcStats:
"""_summary_
Returns:
ProcStats: _description_
"""
cpu_percent = self.process.cpu_percent()
mem_info = self.process.memory_info()
children = 0
children_rss = 0
children_vms = 0
for child in self.process.children():
child_mem = child.memory_info()
children_rss += child_mem.rss
children_vms += child_mem.vms
children += 1
stats = ProcStats(cpu_percent, mem_info.rss, mem_info.vms, children, children_rss, children_vms)
self.max_stats.max(stats)
return stats
def measure_usage(self):
"""_summary_
Returns:
_type_: _description_
"""
self.write_line(ProcStats.headers, 'w')
while self.keep_measuring:
self.write_line(self.getstats().row())
sleep(self.loop_delay)
return self.max_stats
def write_plot(self, imgpath: str):
"""_summary_
Args:
imgpath (str): _description_
"""
log = Logger('write_plot')
if not plt:
log.error('You need Matplotlib to run write_plot')
return
x = []
data = {}
with open(self.filepath, encoding='utf8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row.keys():
if key == 'datetime':
x.append(row[key])
else:
if key in ['children', 'cpu_percent']:
val = int(row[key])
else:
val = float(row[key])
if key not in data:
data[key] = [val]
else:
data[key].append(val)
chart_title = 'Process stats'
xlabel = 'time'
ylabel = 'Mb'
plt.clf()
fig, plt_ax = plt.subplots()
plt_ax.title.set_text(chart_title)
ax2 = plt_ax.twinx()
# ['datetime', 'cpu_percent', 'resident_memory', 'virtual_memory', 'children', 'children_resident', 'children_virtual']
for key in ['children_resident', 'children_virtual', 'resident_memory', 'virtual_memory']:
if key in data:
plt_ax.plot(x, data[key], label=key)
ax2._get_lines.get_next_color()
plt_ax.set_xlabel(xlabel)
plt_ax.set_ylabel(ylabel)
for key in ['cpu_percent', 'children']:
if key in data:
ax2.plot(x, data[key], label=key)
plt_ax.legend(loc='lower left')
ax2.legend(loc='lower right')
freq = math.floor(len(x) / 10)
if freq == 0:
freq = 1
plt_ax.set_xticks(x[::freq])
plt_ax.set_xticklabels(x[::freq], rotation=45)
plt_ax.grid(True)
# plt.tight_layout()
fig.set_size_inches(8, 6)
fig.savefig(imgpath, format='png', dpi=300)
def thread_run(callback, memlogfile: str, *args, **kwargs):
"""_summary_
Args:
callback (function): _description_
memlogfile (str): _description_
Returns:
_type_: _description_
"""
log = Logger('Debug')
if not psutil:
log.error('You need "psutil" to run the debug tools')
return
memmon = MemoryMonitor(memlogfile, 1)
result = None
max_obj = None
try:
with ThreadPoolExecutor() as executor:
mem_thread = executor.submit(memmon.measure_usage)
try:
fn_thread = executor.submit(callback, *args, **kwargs)
result = fn_thread.result()
except Exception as err_in:
log.error(f"Error executing code: {err_in}")
finally:
memmon.keep_measuring = False
max_obj = mem_thread.result()
log.debug(f'MaxStats: {max_obj}')
except Exception as err_out:
# Make sure we always return so that we don't have to debug our debugger
log.error(err_out)
try:
memmon.write_plot(os.path.splitext(memlogfile)[0] + '.png')
except Exception as err:
log.error(f'Error Writing memory plot: {err}')
ret_val = max_obj.toString() if max_obj is not None else "process no longer exists"
return result, ret_val | /rsxml-0.0.1a0.tar.gz/rsxml-0.0.1a0/src/debug/debug_proc.py | 0.704567 | 0.235966 | debug_proc.py | pypi |
from __future__ import annotations
import csv
from typing import Dict, List, Tuple
from .timer import Timer
class TimerBucketsBorg:
"""_summary_
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class TimerBuckets(TimerBucketsBorg):
"""
"""
timers: Dict(str, float)
ticks: TickType
"""Useful for when you want to time things into buckets. Includes tick function for using within loops
with TimerBuckets("bucketname"):
EVERYTHING IN HERE GETS COUNTED IN THIS BUCKET
"""
class ColTypes:
UNKNOWN = 'UNKNOWN'
INT = 'INTEGER'
REAL = 'REAL'
TEXT = 'TEXT'
class Tick:
timers: Dict(str, float) = {}
total = 0
meta = {}
def __init__(self, timers, total, meta):
self.timers = timers
self.total = total
self.meta = meta
def __init__(self, key: str = None, table_name: str = None, csv_path: str = None, meta: Dict = None, active: bool = True, reset: bool = False):
"""_summary_
Args:
key (str, optional): When using the "with TimerBuckets(key='MyKey')".
table_name (str, optional): If you want sqlite writes you need this set to a string.
csv_path (str, optional): Optional. Can be passed during write_csv as well.
meta (Dict, optional): Metadata key=value pairs dictionary.
active (bool, optional): if active=false this class won't do anything.
reset (bool, optional): resets the borg singleton so we can use this in another loop.
"""
TimerBucketsBorg.__init__(self)
if "timers" not in self.__dict__ or reset is True:
# Pass in a debug flag to active to prevent anything from happening in this
self.active = active
self.table_name = table_name if table_name is not None else 'DEBUG'
self.timers = {}
self.meta = {}
self.total = 0
self.tick_total = 0
self.ticks = []
self.timer = Timer()
if self.active is False:
return
if csv_path is not None:
self.csv_path = csv_path
self.key = key
if meta is not None:
self.meta = meta
def tick(self, meta: Dict = None):
""" For "for" loops you can call this to freeze these timers to a row
Args:
meta (Dict, optional): meta takes the form { "keyname": (value, "SQLITETYPE") }
"""
if self.active is False:
return
# If we only tick when there's something to do then we can tick at the top or the bottom of the loop
# this is helpful to catch errors partway through the loop
if self.tick_total > 0 or len(self.timers.keys()) > 0:
self.ticks.append(TimerBuckets.Tick(self.timers, self.tick_total, self.meta))
self.meta = meta if meta is not None else {}
self.timers = {}
self.tick_total = 0
def __enter__(self):
"""Behaviour on open when using the "with TimerBuckets():" Syntax
"""
if self.active is False:
return
if self.key is not None:
if self.key not in self.timers:
self.timers[self.key] = 0
self.timer.reset()
def __exit__(self, _type, _value, _traceback):
"""Behaviour on close when using the "with TimerBuckets():" Syntax
"""
if self.active is False:
return
if self.key is not None:
self.timers[self.key] += self.timer.elapsed()
self.total += self.timer.elapsed()
self.tick_total += self.timer.elapsed()
def generate_table(self) -> Tuple(List[str, str], List):
""" return something we can either write to a CSV or to a SQLite DB
Returns:
_type_: _description_
"""
if self.active is False:
return {}, []
meta_columns = {"tick": 'INTEGER'}
tmr_columns = {}
# First add all the meta keys to the dictionary
csv_arr = []
# First go through and find out all the data types
for idx, row in enumerate(self.ticks):
csv_row = {"tick": idx}
for k in row.meta.keys():
if isinstance(row.meta[k], type(None)):
if k not in meta_columns:
meta_columns[k] = None
elif isinstance(row.meta[k], int):
if k not in meta_columns or meta_columns[k] is None:
meta_columns[k] = TimerBuckets.ColTypes.INT
elif isinstance(row.meta[k], float):
if k not in meta_columns or meta_columns[k] is None:
meta_columns[k] = TimerBuckets.ColTypes.REAL
# Just default back to text for everything else
else:
if k not in meta_columns or meta_columns[k] is None or meta_columns[k] is not TimerBuckets.ColTypes.TEXT:
meta_columns[k] = TimerBuckets.ColTypes.TEXT
csv_row[k] = row.meta[k]
csv_row[k] = row.meta[k]
# Timers get appended as REAL
for k in row.timers.keys():
kname = f'tmr_{k}'
if kname not in tmr_columns:
tmr_columns[kname] = 'REAL'
csv_row[kname] = round(row.timers[k], 1)
csv_arr.append(csv_row)
csv_row["timer_total"] = round(row.total, 1)
columns = [(k, v) for k, v in meta_columns.items()] + \
[(k, v) for k, v in tmr_columns.items()] + \
[('timer_total', TimerBuckets.ColTypes.REAL)]
values = []
for row in csv_arr:
row_arr = []
for col_name, col_type in columns:
cell_val = row[col_name] if col_name in row else None
if col_type == TimerBuckets.ColTypes.TEXT and cell_val is not None and not isinstance(cell_val, str):
cell_val = str(cell_val)
row_arr.append(cell_val)
values.append(row_arr)
return (columns, values)
def write_csv(self, csv_file_path: str = None):
"""Write all our Timer ticks to a CSV file
Args:
csv_path (str): _description_
"""
if self.active is False:
return
final_path = csv_file_path if csv_file_path is not None else self.csv_path
if final_path is None:
return
columns, csv_arr = self.generate_table()
with open(final_path, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([k for k, v in columns]) # header row
for row in csv_arr:
writer.writerow(row)
def write_sqlite(self, conn):
"""Write the data to a sqlite file (or geopackage)
Args:
conn (_type_): _description_
"""
if self.active is False or self.table_name is None:
return
curs = conn.cursor()
is_gpkg = curs.execute("SELECT name FROM sqlite_master WHERE type='table' and name='gpkg_contents';").fetchone()
if is_gpkg:
has_row = curs.execute("SELECT * from gpkg_contents WHERE table_name=?;", [self.table_name, ]).fetchone()
if has_row is None:
curs.execute("""
INSERT INTO gpkg_contents
(table_name, data_type, identifier, description, last_change, min_x, min_y, max_x, max_y, srs_id)
VALUES
(:table_name, 'attributes', :table_name, '', '2022-11-12T06:14:22.287Z', null, null, null, null, null);
""", {"table_name": self.table_name})
columns, csv_arr = self.generate_table()
# Blow away the old one
curs.execute(f'DROP TABLE IF EXISTS {self.table_name}')
create_string = f"""
CREATE TABLE {self.table_name}
(
tick INTEGER not null primary key autoincrement,
{", ".join([f"{cname} {ctype}" for cname, ctype in columns if cname != 'tick'])}
)
"""
curs.execute(create_string)
value_sub = ','.join(['?' for x in columns])
curs.executemany(f'INSERT INTO {self.table_name} VALUES({value_sub});', csv_arr)
conn.commit()
TickType = List[TimerBuckets.Tick] | /rsxml-0.0.1a0.tar.gz/rsxml-0.0.1a0/src/debug/timer_buckets.py | 0.861786 | 0.230909 | timer_buckets.py | pypi |
# R# - RSX
An interpreted statically typed multi paradigm general purpose programming language designed for cross platform applications.
# RSX Logo

# RSX Icon

# Requirements
- Python 3.10 or higher
# Getting Started
## How to install
### Windows (Compiles RSX source files and places them in C:\RSX, also installs the RSX python library)
```
.\install.bat
```
### Linux (Compiles RSX source files and installs the RSX python library)
```
./install.sh
```
### Difference between rsxpy and rsx
- rsx only works on Windows
- rsxpy requires python but rsx doesn't
## How to [run/build] a RSX program
```
[rsxpy/rsx] main.rsx [run/build]
```
## How to [run/build] a RSX bytecode
```
[rsxpy/rsx] main.rsxc [run/build]
```
## How to make a RSX library with python
### Using RSX Tools
```python
from rsxpy.tools import *
create_library("library")
@create_function("VOID", {"message": "STRING"})
def log(environment):
print(environment["args"]["message"], flush = True)
library = pack_library()
```
### Using RSXLib
```python
from rsxpy import rsxlib
rsxlib.begin()
def log(message: str) -> None:
print(environment["args"]["message"], flush = True)
rsxlib.end()
```
## How to make a RSX library with RSX header files
### library.rsxh
```c++
include "rsxio" : *;
void log(string message) {
std::rout(message + std::endl());
}
```
### main.rsx
```c++
include "library.rsxh";
int main(string[] args) {
library::log("Hello, World!");
}
```
## How to add an include folder
```
[rsxpy/rsx] main.rsx run -Imy-include-folder
```
# Command line arguments
- -I[dir]
- -rmI[dir]
- -timeit=[true/false]
- -gettok=[true/false]
- -getast=[true/false]
- -bytecode=[true/false]
- -noconsole=[true/false]
- -console=[true/false]
- run
- build
# Examples
## Hello, World!
```c++
include "rsxio" : *;
// using namespace std;
int main(string[] args) {
std::rout("Hello, World!" + std::endl());
return 0;
}
```
## Builder
```c++
include "rsxbuild", "rsxsys", "rsxio" : *;
int main(string[] args) {
std::rout("file name > ");
std::build_program(
std::rin(),
{std::getdir() + "/include/"},
true, std::getdir() + "/icon.ico"
); return 0;
}
```
## Web Server
```c++
include "rsxsocket", "rsxio" : *;
int main(string[] args) {
auto server = std::socket(std::AF_INET, std::SOCK_STREAM);
std::bind(server, "localhost", 5656);
std::listen(server);
string index = "HTTP/1.1 200 OK\n\n<p>Hello, World!</p>";
while (true) {
auto connection = std::accept(server);
string response = std::recv(connection, 1024);
std::rout(response + std::endl());
std::send(connection, index);
std::close(connection);
}
return 0;
}
```
## Raylib
```c++
include "rsxraylib" : *;
int main(string[] args) {
InitWindow(1200, 600, "R#");
// SetTargetFPS(60);
while (!WindowShouldClose()) {
BeginDrawing();
ClearBackground(RAYWHITE);
DrawFPS(10, 10);
EndDrawing();
}
CloseWindow();
return 0;
}
```
## 3D Projection Using Raylib
```c++
include "rsxio", "rsxraylib" : *;
float[] project_vertices(float[] vertices, float[] position, float focal_length) {
float[(int) (vertices.length() / 3 * 2)] projected_vertices;
int index = 0;
for (int i = 0; i < vertices.length(); i += 3) {
if ((focal_length + (vertices[i + 2] + position[2])) == 0 || (focal_length * (vertices[i] + position[0])) == 0 || (focal_length * (vertices[i + 1] + position[1])) == 0)
continue;
float x_projected = (focal_length * (vertices[i] + position[0])) / (focal_length + (vertices[i + 2] + position[2]));
float y_projected = (focal_length * (vertices[i + 1] + position[1])) / (focal_length + (vertices[i + 2] + position[2]));
projected_vertices[index++] = x_projected; projected_vertices[index++] = y_projected;
}
return projected_vertices;
}
float[] scale_vertices(float[] vertices, float scale) {
float[vertices.length()] new;
for (int i = 0; i < vertices.length(); i++) {
new[i] = vertices[i] * scale;
}
return new;
}
int main() {
const int width = 1200, height = 600;
InitWindow(width, height, "RSX");
SetTargetFPS(60);
float[] vertices = scale_vertices({
-0.5f, -0.5f, 0.0f,
-0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, -0.5f,
-0.5f, 0.5f, -0.5f,
0.5f, 0.5f, -0.5f,
0.5f, -0.5f, -0.5f,
}, 100);
int[] indices = {
0, 1,
1, 2,
2, 3,
3, 0,
4, 5,
5, 6,
6, 7,
7, 4,
0, 4,
1, 5,
2, 6,
3, 7
};
int[] offset = {width / 2, height / 2};
float[] position = {0.0f, 0.0f, 0.0f};
float speed = 0.0f;
float focal_length = 100.0f;
while (!WindowShouldClose()) {
BeginDrawing();
ClearBackground(BLACK);
DrawText((string) GetFPS() + " FPS", 10, 10, 20, RAYWHITE);
float[] projected_vertices = project_vertices(vertices, position, focal_length);
if (IsKeyDown(KEY_W)) position[2] -= speed;
if (IsKeyDown(KEY_S)) position[2] += speed;
if (IsKeyDown(KEY_A)) position[0] += speed;
if (IsKeyDown(KEY_D)) position[0] -= speed;
if (IsKeyDown(KEY_SPACE)) position[1] += speed;
if (IsKeyDown(KEY_LEFT_SHIFT)) position[1] -= speed;
if (IsKeyDown(KEY_LEFT_CONTROL)) speed = 3.0f;
else speed = 2.0f;
for (int i = 0; i < projected_vertices.length(); i += 2)
DrawCircle(projected_vertices[i] + offset[0], projected_vertices[i + 1] + offset[1], 5, RAYWHITE);
for (int i = 0; i < indices.length(); i += 2) {
if (projected_vertices.length() >= indices[i] * 2) {
DrawLine(projected_vertices[indices[i] * 2] + offset[0], projected_vertices[indices[i] * 2 + 1] + offset[1],
projected_vertices[indices[i + 1] * 2] + offset[0], projected_vertices[indices[i + 1] * 2 + 1] + offset[1], RAYWHITE);
}
}
EndDrawing();
}
CloseWindow();
return 0;
}
```
# The RSX Package Manager: Raid
A package manager for RSX
# Raid Logo

## How to install
Raid comes with RSX no need to install anything
### Difference between raidpy and raid
- raid only works on Windows
- raidpy requires python but raid doesn't
## How to create a new Raid project
```
[raidpy/raid] new console my_console_project
```
## How to [run/build] a Raid project
```
[raidpy/raid] [run/build] my_console_project
```
# Libraries
- rsxbuild
- rsxthread
- rsxio
- rsxf
- rsxgui
- rsxmath
- rsxmixer
- rsxrand
- rsxraylib
- rsxstr
- rsxstd
- rsxsys
- rsxterm
- rsxtime
- rsxos
- rsxsocket
- rsxsdl2 | /rsxpy-0.1.1.tar.gz/rsxpy-0.1.1/README.md | 0.402979 | 0.849441 | README.md | pypi |
# Standard library modules.
import logging
import os
import re
# External dependencies.
from humanfriendly import compact
from property_manager import (
PropertyManager,
mutable_property,
required_property,
set_property,
)
# Modules included in our package.
from rsync_system_backup.exceptions import (
InvalidDestinationError,
ParentDirectoryUnavailable,
)
RSYNCD_PORT = 873
"""
The default port of the `rsync daemon`_ (an integer).
.. _rsync daemon: https://manpages.debian.org/rsyncd.conf
"""
LOCAL_DESTINATION = re.compile('^(?P<directory>.+)$')
"""
A compiled regular expression pattern to parse local destinations,
used as a fall back because it matches any nonempty string.
"""
SSH_DESTINATION = re.compile('''
^ ( (?P<username> [^@]+ ) @ )? # optional username
(?P<hostname> [^:]+ ) : # mandatory host name
(?P<directory> .* ) # optional pathname
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations
of the form ``[USER@]HOST:DEST`` (using an SSH connection).
"""
SIMPLE_DAEMON_DESTINATION = re.compile('''
^ ( (?P<username> [^@]+ ) @ )? # optional username
(?P<hostname> [^:]+ ) :: # mandatory host name
(?P<module> [^/]+ ) # mandatory module name
( / (?P<directory> .* ) )? $ # optional pathname (without leading slash)
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations of the
form ``[USER@]HOST::MODULE[/DIRECTORY]`` (using an rsync daemon connection).
"""
ADVANCED_DAEMON_DESTINATION = re.compile(r'''
^ rsync:// # static prefix
( (?P<username>[^@]+) @ )? # optional username
(?P<hostname> [^:/]+ ) # mandatory host name
( : (?P<port_number> \d+ ) )? # optional port number
/ (?P<module> [^/]+ ) # mandatory module name
( / (?P<directory> .* ) )? $ # optional pathname (without leading slash)
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations of the form
``rsync://[USER@]HOST[:PORT]/MODULE[/DIRECTORY]`` (using an rsync daemon
connection).
"""
DESTINATION_PATTERNS = [
ADVANCED_DAEMON_DESTINATION,
SIMPLE_DAEMON_DESTINATION,
SSH_DESTINATION,
LOCAL_DESTINATION,
]
"""
A list of compiled regular expression patterns to match destination
expressions. The patterns are ordered by decreasing specificity.
"""
# Public identifiers that require documentation.
__all__ = (
'logger',
'RSYNCD_PORT',
'LOCAL_DESTINATION',
'SSH_DESTINATION',
'SIMPLE_DAEMON_DESTINATION',
'ADVANCED_DAEMON_DESTINATION',
'DESTINATION_PATTERNS',
'Destination',
)
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class Destination(PropertyManager):
"""
The :class:`Destination` class represents a location where backups are stored.
The :attr:`expression` property is a required property whose value is
parsed to populate the values of the :attr:`username`, :attr:`hostname`,
:attr:`port_number`, :attr:`module` and :attr:`directory` properties.
When you read the value of the :attr:`expression` property you get back a
computed value based on the values of the previously mentioned properties.
This makes it possible to manipulate the destination before passing it on
to rsync.
"""
@required_property
def expression(self):
"""
The destination in rsync's command line syntax (a string).
:raises: :exc:`.InvalidDestinationError` when you try to set
this property to a value that cannot be parsed.
"""
if not (self.hostname or self.directory):
# This is a bit tricky: Returning None here ensures that a
# TypeError will be raised when a Destination object is
# created without specifying a value for `expression'.
return None
value = 'rsync://' if self.module else ''
if self.hostname:
if self.username:
value += self.username + '@'
value += self.hostname
if self.module:
if self.port_number:
value += ':%s' % self.port_number
value += '/' + self.module
else:
value += ':'
if self.directory:
value += self.directory
return value
@expression.setter
def expression(self, value):
"""Automatically parse expression strings."""
for pattern in DESTINATION_PATTERNS:
match = pattern.match(value)
if match:
captures = match.groupdict()
non_empty = dict((n, c) for n, c in captures.items() if c)
self.set_properties(**non_empty)
break
else:
msg = "Failed to parse expression! (%s)"
raise InvalidDestinationError(msg % value)
@mutable_property
def directory(self):
"""The pathname of the directory where the backup should be written (a string)."""
return ''
@mutable_property
def hostname(self):
"""The host name or IP address of a remote system (a string)."""
return ''
@mutable_property
def module(self):
"""The name of a module exported by an `rsync daemon`_ (a string)."""
return ''
@mutable_property
def parent_directory(self):
"""
The pathname of the parent directory of the backup directory (a string).
:raises: :exc:`.ParentDirectoryUnavailable` when the parent directory
can't be determined because :attr:`directory` is empty or '/'.
"""
directory = os.path.dirname(self.directory.rstrip('/'))
if not directory:
raise ParentDirectoryUnavailable(compact("""
Failed to determine the parent directory of the destination
directory! This makes it impossible to create and rotate
snapshots for the destination {dest}.
""", dest=self.expression))
return directory
@mutable_property
def port_number(self):
"""
The port number of a remote `rsync daemon`_ (a number).
When :attr:`ssh_tunnel` is set the value of :attr:`port_number`
defaults to :attr:`executor.ssh.client.SecureTunnel.local_port`,
otherwise it defaults to :data:`RSYNCD_PORT`.
"""
return self.ssh_tunnel.local_port if self.ssh_tunnel is not None else RSYNCD_PORT
@port_number.setter
def port_number(self, value):
"""Automatically coerce port numbers to integers."""
set_property(self, 'port_number', int(value))
@mutable_property
def ssh_tunnel(self):
"""A :class:`~executor.ssh.client.SecureTunnel` object or :data:`None` (defaults to :data:`None`)."""
@mutable_property
def username(self):
"""The username for connecting to a remote system (a string)."""
return ''
def __enter__(self):
"""Automatically open :attr:`ssh_tunnel` when required."""
if self.ssh_tunnel:
self.ssh_tunnel.__enter__()
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""Automatically close :attr:`ssh_tunnel` when required"""
if self.ssh_tunnel:
self.ssh_tunnel.__exit__(exc_type, exc_value, traceback) | /rsync-system-backup-1.1.tar.gz/rsync-system-backup-1.1/rsync_system_backup/destinations.py | 0.744378 | 0.235724 | destinations.py | pypi |
# Standard library modules.
import logging
import os
import time
# External dependencies.
from executor import quote
from executor.contexts import LocalContext, create_context
from humanfriendly import Timer, compact, concatenate
from linux_utils.crypttab import parse_crypttab
from linux_utils.luks import cryptdisks_start, cryptdisks_stop
from proc.notify import notify_desktop
from property_manager import (
PropertyManager,
cached_property,
clear_property,
lazy_property,
mutable_property,
required_property,
set_property,
)
from rotate_backups import Location, RotateBackups
# Modules included in our package.
from rsync_system_backup.destinations import Destination
from rsync_system_backup.exceptions import (
DestinationContextUnavailable,
FailedToMountError,
FailedToUnlockError,
InvalidDestinationDirectory,
MissingBackupDiskError,
UnsupportedPlatformError,
)
# Semi-standard module versioning.
__version__ = '1.1'
# Public identifiers that require documentation.
__all__ = (
'DEFAULT_ROTATION_SCHEME',
'RsyncSystemBackup',
'__version__',
'ensure_trailing_slash',
'logger',
)
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
DEFAULT_ROTATION_SCHEME = dict(hourly=24, daily=7, weekly=4, monthly='always')
"""The default rotation scheme for system backup snapshots (a dictionary)."""
class RsyncSystemBackup(PropertyManager):
"""
Python API for the ``rsync-system-backup`` program.
The :func:`execute()` method is the main entry point.
If you're looking for finer grained control refer to
:func:`unlock_device()`, :func:`mount_filesystem()`,
:func:`transfer_changes()`, :func:`create_snapshot()`
and :func:`rotate_snapshots()`.
"""
@mutable_property
def backup_enabled(self):
""":data:`True` to enable :func:`transfer_changes()`, :data:`False` otherwise."""
return True
@mutable_property
def crypto_device(self):
"""The name of the encrypted filesystem to use (a string or :data:`None`)."""
@property
def crypto_device_available(self):
"""
:data:`True` if the encrypted filesystem is available, :data:`False` otherwise.
This property is an alias for the
:attr:`~linux_utils.crypttab.EncryptedFileSystemEntry.is_available`
property of :attr:`crypttab_entry`.
"""
return self.crypttab_entry.is_available if self.crypttab_entry else False
@property
def crypto_device_unlocked(self):
"""
:data:`True` if the encrypted filesystem is unlocked, :data:`False` otherwise.
This property is an alias for the
:attr:`~linux_utils.crypttab.EncryptedFileSystemEntry.is_unlocked`
property of :attr:`crypttab_entry`.
"""
return self.crypttab_entry.is_unlocked if self.crypttab_entry else False
@cached_property
def crypttab_entry(self):
"""
The entry in ``/etc/crypttab`` corresponding to :attr:`crypto_device`.
The value of this property is computed automatically by parsing
``/etc/crypttab`` and looking for an entry whose `target` (the
first of the four fields) matches :attr:`crypto_device`.
When an entry is found an
:class:`~linux_utils.crypttab.EncryptedFileSystemEntry` object is
constructed, otherwise the result is :data:`None`.
"""
if self.crypto_device:
logger.debug("Parsing /etc/crypttab to determine device file of encrypted filesystem %r ..",
self.crypto_device)
for entry in parse_crypttab(context=self.destination_context):
if entry.target == self.crypto_device:
return entry
@required_property
def destination(self):
"""
The destination where backups are stored (a :class:`.Destination` object).
The value of :attr:`destination` defaults to the value of the
environment variable ``$RSYNC_MODULE_PATH`` which is set by the `rsync
daemon`_ before it runs the ``post-xfer exec`` command.
"""
rsync_module_path = os.environ.get('RSYNC_MODULE_PATH')
return (Destination(expression=rsync_module_path)
if rsync_module_path else None)
@destination.setter
def destination(self, value):
"""Automatically coerce strings to :class:`.Destination` objects."""
if not isinstance(value, Destination):
value = Destination(expression=value)
set_property(self, 'destination', value)
clear_property(self, 'destination_context')
@cached_property
def destination_context(self):
"""
The execution context of the system that stores the backup (the destination).
This is an execution context created by :mod:`executor.contexts`.
:raises: :exc:`.DestinationContextUnavailable` when the destination is
an rsync daemon module (which doesn't allow arbitrary command
execution).
"""
if self.destination.module:
raise DestinationContextUnavailable(compact("""
Error: The execution context of the backup destination isn't
available because the destination ({dest}) is an rsync daemon
module! (tip: reconsider your command line options)
""", dest=self.destination.expression))
else:
context_opts = dict(sudo=self.sudo_enabled)
if self.destination.hostname:
context_opts['ssh_alias'] = self.destination.hostname
context_opts['ssh_user'] = self.destination.username
return create_context(**context_opts)
@mutable_property
def dry_run(self):
""":data:`True` to simulate the backup without writing any files, :data:`False` otherwise."""
return False
@mutable_property
def multi_fs(self):
"""
:data:`True` to allow rsync to cross filesystem boundaries, :data:`False` otherwise.
This property has the opposite effect of the rsync command line
option ``--one-file-system`` because :attr:`multi_fs` defaults to
:data:`False` which means rsync is run with ``--one-file-system``.
You can set :attr:`multi_fs` to :data:`True` to omit
``--one-file-system`` from the rsync command line.
"""
return False
@lazy_property(writable=True)
def exclude_list(self):
"""
A list of patterns (strings) that are excluded from the system backup.
The patterns in :attr:`exclude_list` are passed on to rsync using
the ``--exclude`` option.
"""
return []
@lazy_property(writable=True)
def excluded_roots(self):
"""
A list of patterns (strings) that are excluded from the system backup.
All of the patterns in this list will be rooted to the top of the
filesystem hierarchy when they're given the rsync, to avoid
unintentionally excluding deeply nested directories that happen to
match names in this list. This is done using the ``--filter=-/
PATTERN`` option.
"""
return [
'/dev/',
'/home/*/.cache/',
'/media/',
'/mnt/',
'/proc/',
'/run/',
'/sys/',
'/tmp/',
'/var/cache/',
'/var/tmp/',
]
@mutable_property
def force(self):
""":data:`True` to run `rsync-system-backup` on unsupported platforms, :data:`False` otherwise."""
return False
@mutable_property
def ionice(self):
"""
The I/O scheduling class for rsync (a string or :data:`None`).
When this property is set ionice_ will be used to set the I/O
scheduling class for rsync. This can be useful to reduce the
impact of backups on the rest of the system.
The value of this property is expected to be one of
the strings 'idle', 'best-effort' or 'realtime'.
.. _ionice: https://manpages.debian.org/ionice
"""
@mutable_property
def mount_point(self):
"""The pathname of the mount point to use (a string or :data:`None`)."""
@property
def mount_point_active(self):
""":data:`True` if :attr:`mount_point` is mounted already, :data:`False` otherwise."""
return (self.destination_context.test('mountpoint', self.mount_point)
if self.mount_point else False)
@mutable_property
def notifications_enabled(self):
"""
Whether desktop notifications are used (a boolean).
By default desktop notifications are enabled when a real backup is
being made but disabled during dry runs.
"""
return not self.dry_run
@mutable_property
def rotation_scheme(self):
"""The rotation scheme for snapshots (a dictionary, defaults to :data:`DEFAULT_ROTATION_SCHEME`)."""
return DEFAULT_ROTATION_SCHEME
@mutable_property
def snapshot_enabled(self):
""":data:`True` to enable :func:`create_snapshot()`, :data:`False` otherwise."""
return True
@mutable_property
def source(self):
"""The pathname of the directory to backup (a string, defaults to '/')."""
return '/'
@lazy_property(writable=True)
def source_context(self):
"""
The execution context of the system that is being backed up (the source).
This is expected to be an execution context created by
:mod:`executor.contexts`. It defaults to
:class:`executor.contexts.LocalContext`.
"""
return LocalContext()
@mutable_property
def rotate_enabled(self):
""":data:`True` to enable :func:`rotate_snapshots()`, :data:`False` otherwise."""
return True
@mutable_property
def sudo_enabled(self):
""":data:`True` to run ``rsync`` and snapshot creation with superuser privileges, :data:`False` otherwise."""
return True
def execute(self):
"""
Execute the requested actions (backup, snapshot and/or rotate).
The :func:`execute()` method defines the high level control flow
of the backup / snapshot / rotation process according to
the caller's requested configuration:
1. When :attr:`backup_enabled` is set :func:`notify_starting()` shows a
desktop notification to give the user a heads up that a system
backup is about to start (because the backup may have a noticeable
impact on system performance).
2. When :attr:`crypto_device` is set :func:`unlock_device()` ensures
that the configured encrypted device is unlocked.
3. When :attr:`mount_point` is set :func:`mount_filesystem()` ensures
that the configured filesystem is mounted.
4. When :attr:`backup_enabled` is set :func:`transfer_changes()`
creates or updates the system backup on :attr:`destination`
using rsync.
5. When :attr:`snapshot_enabled` is set :func:`create_snapshot()`
creates a snapshot of the :attr:`destination` directory.
6. When :attr:`rotate_enabled` is set :func:`rotate_snapshots()`
rotates snapshots.
7. When :attr:`backup_enabled` is set :func:`notify_finished()` shows
a desktop notification to give the user a heads up that the
system backup has finished (or failed).
"""
self.ensure_supported_platform()
try:
# We use a `with' statement to enable cleanup commands that
# are run before this method returns. The unlock_device()
# and mount_filesystem() methods depend on this.
with self.destination_context:
self.execute_helper()
except DestinationContextUnavailable:
# When the destination is an rsync daemon module we can't just
# assume that the same server is also accessible over SSH, so in
# this case no destination context is available.
self.execute_helper()
def ensure_supported_platform(self):
"""
Make sure we're running on a supported platform.
:raises: :exc:`.UnsupportedPlatformError` when the output of the
``uname`` command doesn't include the word 'Linux' and
:attr:`force` is :data:`False`.
When :attr:`force` is :data:`True` this method logs a warning message
instead of raising an exception.
"""
uname_output = self.source_context.capture('uname', capture=True, check=False, shell=False)
if 'linux' not in uname_output.lower():
if self.force:
logger.warning(compact("""
It looks like you aren't running Linux (which is the only
platform supported by rsync-system-backup) however the -f,
--force option was given so I will continue anyway. Please
note that you are on your own if things break!
"""))
else:
raise UnsupportedPlatformError(compact("""
It looks like you aren't running Linux, which is the only
platform supported by rsync-system-backup! You can use the
-f, --force option to override this sanity check. Please
note that you are on your own if things break.
"""))
def execute_helper(self):
"""Helper for :func:`execute()`."""
timer = Timer()
actions = []
if self.crypto_device and not self.crypto_device_available:
msg = "Encrypted filesystem %s isn't available! (the device file %s doesn't exist)"
raise MissingBackupDiskError(msg % (self.crypto_device, self.crypttab_entry.source_device))
if self.backup_enabled:
self.notify_starting()
self.unlock_device()
try:
self.mount_filesystem()
if self.backup_enabled:
self.transfer_changes()
actions.append('create backup')
if self.snapshot_enabled:
self.create_snapshot()
actions.append('create snapshot')
if self.rotate_enabled:
self.rotate_snapshots()
actions.append('rotate old snapshots')
except Exception:
self.notify_failed(timer)
raise
else:
if self.backup_enabled:
self.notify_finished(timer)
if actions:
logger.info("Took %s to %s.", timer, concatenate(actions))
def notify_starting(self):
"""Notify the desktop environment that a system backup is starting."""
if self.notifications_enabled:
body = "Starting dry-run" if self.dry_run else "Starting backup"
notify_desktop(summary="System backups", body=body)
def notify_finished(self, timer):
"""Notify the desktop environment that a system backup has finished."""
if self.notifications_enabled:
body = "Finished backup in %s." % timer
notify_desktop(summary="System backups", body=body)
def notify_failed(self, timer):
"""Notify the desktop environment that a system backup has failed."""
if self.notifications_enabled:
body = "Backup failed after %s! Review the system logs for details." % timer
notify_desktop(summary="System backups", body=body, urgency='critical')
def unlock_device(self):
"""
Automatically unlock the encrypted filesystem to which backups are written.
:raises: The following exceptions can be raised:
- :exc:`.DestinationContextUnavailable`, refer
to :attr:`destination_context` for details.
- :exc:`~executor.ExternalCommandFailed` when the
cryptdisks_start_ command reports an error.
When :attr:`crypto_device` is set this method uses
:func:`~linux_utils.luks.cryptdisks_start()` to unlock the encrypted
filesystem to which backups are written before the backup starts. When
:func:`~linux_utils.luks.cryptdisks_start()` was called before the
backup started, :func:`~linux_utils.luks.cryptdisks_stop()` will be
called when the backup finishes.
To enable the use of :func:`~linux_utils.luks.cryptdisks_start()` and
:func:`~linux_utils.luks.cryptdisks_stop()` you need to create an
`/etc/crypttab`_ entry that maps your physical device to a symbolic
name. If you want this process to run fully unattended you can
configure a key file in `/etc/crypttab`_, otherwise you will be asked
for the password when the encrypted filesystem is unlocked.
.. _/etc/crypttab: https://manpages.debian.org/crypttab
.. _cryptdisks_start: https://manpages.debian.org/cryptdisks_start
"""
if self.crypto_device:
if self.crypto_device_unlocked:
logger.info("Encrypted filesystem is already unlocked (%s) ..", self.crypto_device)
else:
cryptdisks_start(
context=self.destination_context,
target=self.crypto_device,
)
if not self.crypto_device_unlocked:
msg = "Failed to unlock encrypted filesystem! (%s)"
raise FailedToUnlockError(msg % self.crypto_device)
self.destination_context.cleanup(
cryptdisks_stop,
context=self.destination_context,
target=self.crypto_device,
)
def mount_filesystem(self):
"""
Automatically mount the filesystem to which backups are written.
:raises: The following exceptions can be raised:
- :exc:`.DestinationContextUnavailable`, refer
to :attr:`destination_context` for details.
- :exc:`~executor.ExternalCommandFailed` when
the mount_ command reports an error.
When :attr:`mount_point` is set this method uses the mount_ command to
mount the filesystem to which backups are written before the backup
starts. When mount_ was called before the backup started, umount_ will
be called when the backup finishes. An entry for the mount point needs
to be defined in `/etc/fstab`_.
.. _mount: https://manpages.debian.org/mount
.. _umount: https://manpages.debian.org/umount
.. _/etc/fstab: https://manpages.debian.org/fstab
"""
if self.mount_point:
if self.mount_point_active:
logger.info("Filesystem is already mounted (%s) ..", self.mount_point)
else:
logger.info("Mounting filesystem (%s) ..", self.mount_point)
self.destination_context.execute('mount', self.mount_point, sudo=True)
if not self.mount_point_active:
msg = "Failed to mount filesystem! (%s)"
raise FailedToMountError(msg % self.crypto_device)
self.destination_context.cleanup('umount', self.mount_point, sudo=True)
def transfer_changes(self):
"""
Use rsync to synchronize the files on the local system to the backup destination.
:raises: :exc:`.InvalidDestinationDirectory` when :attr:`mount_point`
is set and :attr:`destination` is a local directory that is
not located under :attr:`mount_point`.
"""
# Attempt to ensure that the destination directory is located under the
# mount point to prevent the user from shooting themselves in the foot.
if self.mount_point and not self.destination.hostname:
mount_point = os.path.abspath(self.mount_point)
destination = os.path.abspath(self.destination.directory)
common_prefix = os.path.commonprefix([mount_point, destination])
if os.path.abspath(common_prefix) != mount_point:
msg = "Destination directory (%s) not located under mount point (%s)!"
raise InvalidDestinationDirectory(msg % (destination, mount_point))
# The following `with' statement enables rsync daemon connections
# tunneled over SSH. For this use case we spawn a local SSH client with
# port forwarding configured, wait for the forwarded port to become
# connected, have rsync connect through the tunnel and shut down the
# SSH client after rsync is finished.
with self.destination:
rsync_command = ['rsync']
if self.dry_run:
rsync_command.append('--dry-run')
rsync_command.append('--verbose')
# The following rsync options delete files in the backup
# destination that no longer exist on the local system.
# Due to snapshotting this won't cause data loss.
rsync_command.append('--delete')
rsync_command.append('--delete-excluded')
# The following rsync options are intended to preserve
# as much filesystem metadata as possible.
rsync_command.append('--acls')
rsync_command.append('--archive')
rsync_command.append('--hard-links')
rsync_command.append('--numeric-ids')
rsync_command.append('--xattrs')
# The following rsync option avoids including mounted external
# drives like USB sticks in system backups.
if not self.multi_fs:
rsync_command.append('--one-file-system')
# The following rsync options exclude irrelevant directories (to my
# subjective mind) from the system backup.
for pattern in self.excluded_roots:
rsync_command.append('--filter=-/ %s' % pattern)
# The following rsync options allow user defined exclusion.
for pattern in self.exclude_list:
rsync_command.append('--exclude=%s' % pattern)
# Source the backup from the root of the local filesystem
# and make sure the pathname ends in a trailing slash.
rsync_command.append(ensure_trailing_slash(self.source))
# Target the backup at the configured destination.
rsync_command.append(ensure_trailing_slash(self.destination.expression))
# Automatically create missing destination directories.
try:
if not self.destination_context.is_directory(self.destination.directory):
logger.info("Creating missing destination directory: %s", self.destination.directory)
self.destination_context.execute('mkdir', '-p', self.destination.directory, tty=False)
except DestinationContextUnavailable:
# Don't fail when the destination doesn't allow for this
# (because its an rsync daemon module).
pass
# Execute the rsync command.
timer = Timer()
logger.info("Creating system backup using rsync ..")
cmd = self.source_context.execute(*rsync_command, **dict(
# Don't raise an exception when rsync exits with
# a nonzero status code. From `man rsync':
# - 23: Partial transfer due to error.
# - 24: Partial transfer due to vanished source files.
# This can be expected on a running system
# without proper filesystem snapshots :-).
check=False,
# Clear $HOME so that rsync ignores ~/.cvsignore.
environment=dict(HOME=''),
# Run rsync under ionice.
ionice=self.ionice,
# Run rsync with superuser privileges so that it has read
# access to all files on the local filesystem?
sudo=self.sudo_enabled,
))
if cmd.returncode in (0, 23, 24):
logger.info("Took %s to create backup.", timer)
if cmd.returncode != 0:
logger.warning(
"Ignoring `partial transfer' warnings (rsync exited with %i).",
cmd.returncode,
)
else:
logger.error("Backup failed after %s! (rsync exited with %i)",
timer, cmd.returncode)
raise cmd.error_type(cmd)
def create_snapshot(self):
"""
Create a snapshot of the destination directory.
:raises: The following exceptions can be raised:
- :exc:`.DestinationContextUnavailable`, refer
to :attr:`destination_context` for details.
- :exc:`.ParentDirectoryUnavailable`, refer
to :attr:`.parent_directory` for details.
- :exc:`~executor.ExternalCommandFailed` when
the ``cp`` command reports an error.
"""
# Compose the `cp' command needed to create a snapshot.
snapshot = os.path.join(self.destination.parent_directory,
time.strftime('%Y-%m-%d %H:%M:%S'))
cp_command = [
'cp', '--archive', '--link',
self.destination.directory,
snapshot,
]
# Execute the `cp' command?
if self.dry_run:
logger.info("Snapshot command: %s", quote(cp_command))
else:
timer = Timer()
logger.info("Creating snapshot: %s", snapshot)
self.destination_context.execute(*cp_command, ionice=self.ionice)
logger.info("Took %s to create snapshot.", timer)
def rotate_snapshots(self):
"""
Rotate system backup snapshots using :mod:`.rotate_backups`.
:raises: The following exceptions can be raised:
- :exc:`.DestinationContextUnavailable`, refer
to :attr:`destination_context` for details.
- :exc:`.ParentDirectoryUnavailable`, refer
to :attr:`.parent_directory` for details.
- Any exceptions raised by :mod:`.rotate_backups`.
The values of the :attr:`dry_run`, :attr:`ionice` and
:attr:`rotation_scheme` properties are passed on to the
:class:`~rotate_backups.RotateBackups` class.
"""
helper = RotateBackups(
dry_run=self.dry_run,
io_scheduling_class=self.ionice,
rotation_scheme=self.rotation_scheme,
)
helper.rotate_backups(Location(
context=self.destination_context,
directory=self.destination.parent_directory,
))
def ensure_trailing_slash(expression):
"""
Add a trailing slash to rsync source/destination locations.
:param expression: The rsync source/destination expression (a string).
:returns: The same expression with exactly one trailing slash.
"""
if expression:
# Strip any existing trailing slashes.
expression = expression.rstrip('/')
# Add exactly one trailing slash.
expression += '/'
return expression | /rsync-system-backup-1.1.tar.gz/rsync-system-backup-1.1/rsync_system_backup/__init__.py | 0.797911 | 0.178848 | __init__.py | pypi |
"""rsync-time-machine.py: A script for creating and managing time-stamped backups using rsync."""
import argparse
import asyncio
import os
import re
import signal
import sys
import time
from datetime import datetime
from types import FrameType
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple
APPNAME = "rsync-time-machine.py"
VERBOSE = False
class SSH(NamedTuple):
"""SSH connection details."""
src_folder_prefix: str
dest_folder_prefix: str
cmd: str
src_folder: str
dest_folder: str
port: str
id_rsa: Optional[str]
COLORS = {
"green": "\033[92m",
"magenta": "\033[95m",
"yellow": "\033[93m",
"red": "\033[91m",
"orange": "\033[33m",
}
def style(text: str, color: Optional[str] = None, *, bold: bool = False) -> str:
"""Return styled text."""
color_code = COLORS.get(color, "") # type: ignore[arg-type]
bold_code = "\033[1m" if bold else ""
reset_code = "\033[0m"
return f"{bold_code}{color_code}{text}{reset_code}"
def sanitize(s: str) -> str:
"""Return a sanitized version of the string."""
# See https://github.com/basnijholt/rsync-time-machine.py/issues/1
return s.encode("utf-8", "surrogateescape").decode("utf-8", "replace")
def log(message: str, level: str = "info") -> None:
"""Log a message with the specified log level."""
levels = {"info": "", "warning": "[WARNING] ", "error": "[ERROR] "}
output = sys.stderr if level in {"warning", "error"} else sys.stdout
message = sanitize(message)
print(f"{style(APPNAME, bold=True)}: {levels[level]}{message}", file=output)
def log_info(message: str) -> None:
"""Log an info message to stdout."""
log(message, "info")
def log_warn(message: str) -> None:
"""Log a warning message to stderr."""
log(style(message, "orange"), "warning")
def log_error(message: str) -> None:
"""Log an error message to stderr."""
log(style(message, "red", bold=True), "error")
def log_info_cmd(message: str, ssh: Optional[SSH] = None) -> None:
"""Log an info message to stdout, including the SSH command if applicable."""
if ssh is not None:
message = f"{ssh.cmd} '{message}'"
log_info(message)
def terminate_script(
_signal_number: int,
_frame: Optional[FrameType],
) -> None:
"""Terminate the script when CTRL+C is pressed."""
log_info("SIGINT caught.")
sys.exit(1)
def parse_arguments() -> argparse.Namespace: # pragma: no cover
"""Parse command-line arguments and return the parsed arguments.
(Replaces argument parsing part in the Bash script).
"""
parser = argparse.ArgumentParser(
description="A script for creating and managing time-stamped backups using rsync.",
)
parser.add_argument("-p", "--port", default="22", help="SSH port.")
parser.add_argument("-i", "--id_rsa", help="Specify the private ssh key to use.")
parser.add_argument(
"--rsync-get-flags",
action="store_true",
help="Display the default rsync flags that are used for backup. If using remote drive over SSH, --compress will be added.",
)
parser.add_argument(
"--rsync-set-flags",
help="Set the rsync flags that are going to be used for backup.",
)
parser.add_argument(
"--rsync-append-flags",
help="Append the rsync flags that are going to be used for backup.",
)
log_dir_default = "$HOME/.rsync-time-backup"
parser.add_argument(
"--log-dir",
default=log_dir_default,
help="Set the log file directory. If this flag is set, generated files will not be managed by the script - in particular they will not be automatically deleted. Default: $HOME/.rsync-time-backup", # noqa: E501
)
parser.add_argument(
"--strategy",
default="1:1 30:7 365:30",
help='Set the expiration strategy. Default: "1:1 30:7 365:30" means after one day, keep one backup per day. After 30 days, keep one backup every 7 days. After 365 days keep one backup every 30 days.', # noqa: E501
)
parser.add_argument(
"--no-auto-expire",
action="store_true",
help="Disable automatically deleting backups when out of space. Instead, an error is logged, and the backup is aborted.",
)
parser.add_argument(
"--allow-host-only",
action="store_true",
help="By default, the script expects a 'USER@HOST' pattern for specifying SSH connections."
" When this flag is used, it allows for the 'HOST' pattern without a specified user."
" This is useful if you want to use configurations from the `.ssh/config` file or rely on the current username."
" Note: this option will not enforce SSH usage, it only broadens the accepted input formats.",
)
parser.add_argument(
"src_folder",
help="Source folder for backup. Format: [USER@HOST:]SOURCE",
)
parser.add_argument(
"dest_folder",
help="Destination folder for backup. Format: [USER@HOST:]DESTINATION",
)
parser.add_argument(
"exclusion_file",
nargs="?",
help="Path to the file containing exclude patterns."
" Cannot be used together with `--exclude-from`.",
)
parser.add_argument(
"--exclude-from",
dest="exclude_from",
help="Path to the file containing exclude patterns."
" Alternative to the positional `exclusion_file`."
" Not to be used with `exclusion_file`.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable verbose output. This will slow down the backup process (in simple tests by 2x).",
)
args = parser.parse_args()
# If both positional exclusion_file and optional --exclude-from are provided, raise an error
if args.exclusion_file and args.exclude_from:
parser.error(
"Both positional `exclusion_file` and `--exclude-from` were"
" provided. Please use only one of them.",
)
# If --exclude-from is provided, set exclusion_file to its value
if args.exclude_from:
args.exclusion_file = args.exclude_from
args._auto_delete_log = args.log_dir == log_dir_default
return args
def parse_ssh_pattern(
folder: str,
*,
allow_host_only: bool = False,
) -> Optional[Dict[str, str]]:
"""Parse the source or destination folder for SSH usage."""
pattern = r"^(?:(?P<user>[a-z0-9\._\-]+)@)?(?P<host>[A-Za-z0-9.\-]+):(?P<path>.+)$"
match = re.match(pattern, folder)
if match:
result = match.groupdict()
if not allow_host_only and result["user"] is None:
return None
return result
return None
def parse_ssh(
src_folder: str,
dest_folder: str,
*,
ssh_port: str,
id_rsa: Optional[str],
allow_host_only: bool,
) -> Optional[SSH]:
"""Parse the source and destination folders for SSH usage."""
ssh_src = parse_ssh_pattern(src_folder, allow_host_only=allow_host_only)
ssh_dest = parse_ssh_pattern(dest_folder, allow_host_only=allow_host_only)
if ssh_src or ssh_dest:
ssh = ssh_src or ssh_dest
assert ssh is not None
ssh_user = ssh["user"] if ssh["user"] else ""
ssh_host = ssh["host"]
auth = f"{ssh_user}@{ssh_host}" if ssh_user else ssh_host
ssh_cmd = f"ssh -p {ssh_port} {'-i ' + id_rsa if id_rsa else ''}{auth}"
ssh_src_folder_prefix = f"{auth}:" if ssh_src else ""
ssh_dest_folder_prefix = f"{auth}:" if ssh_dest else ""
ssh_src_folder = ssh_src["path"] if ssh_src else src_folder
ssh_dest_folder = ssh_dest["path"] if ssh_dest else dest_folder
return SSH(
ssh_src_folder_prefix,
ssh_dest_folder_prefix,
ssh_cmd,
ssh_src_folder,
ssh_dest_folder,
ssh_port,
id_rsa,
)
return None
def parse_date_to_epoch(date_str: str) -> int:
"""Parse a date string and return the Unix Epoch."""
# Attempt to parse the date with the format YYYY-MM-DD-HHMMSS
dt = datetime.strptime(date_str, "%Y-%m-%d-%H%M%S") # noqa: DTZ007
# Convert the datetime object to Unix Epoch
return int(time.mktime(dt.timetuple()))
def find_backups(dest_folder: str, ssh: Optional[SSH] = None) -> List[str]:
"""Return a list of all available backups in the destination folder, sorted by date.
(Replaces 'fn_find_backups' in the Bash script).
"""
cmd = f"find '{dest_folder}/' -maxdepth 1 -type d -name '????-??-??-??????' -prune | sort -r"
return run_cmd(cmd, ssh).stdout.splitlines()
def expire_backup(
backup_path: str,
ssh: Optional[SSH],
) -> None:
"""Expire the given backup folder after checking if it's on a backup destination."""
parent_dir = os.path.dirname(backup_path)
# Double-check that we're on a backup destination to be completely
# sure we're deleting the right folder
if not find_backup_marker(parent_dir, ssh):
log_error(f"{backup_path} is not on a backup destination - aborting.")
sys.exit(1)
log_info(f"Expiring {backup_path}")
rm_dir(backup_path, ssh)
def expire_backups(
dest_folder: str,
expiration_strategy: str,
backup_to_keep: str,
ssh: Optional[SSH],
) -> None:
"""Expire backups according to the expiration strategy."""
current_timestamp = int(datetime.now().timestamp())
last_kept_timestamp = 9999999999
backups = find_backups(dest_folder, ssh)
# We will also keep the oldest backup
oldest_backup_to_keep = sorted(backups)[0] if backups else None
# Process each backup dir from the oldest to the most recent
for backup_dir in sorted(backups):
backup_date = os.path.basename(backup_dir)
backup_timestamp = parse_date_to_epoch(backup_date)
# Skip if failed to parse date...
if backup_timestamp is None:
log_warn(f"Could not parse date: {backup_dir}")
continue
if backup_dir == backup_to_keep:
# This is the latest backup requested to be kept. We can finish pruning
break
if backup_dir == oldest_backup_to_keep:
# We don't want to delete the oldest backup. It becomes the first "last kept" backup
last_kept_timestamp = backup_timestamp
# As we keep it, we can skip processing it and go to the next oldest one in the loop
continue
# Find which strategy token applies to this particular backup
for strategy_token in sorted(expiration_strategy.split(), reverse=True):
t = list(map(int, strategy_token.split(":")))
# After which date (relative to today) this token applies (X) - we use seconds to get exact cut off time
cut_off_timestamp = current_timestamp - t[0] * 86400
# Every how many days should a backup be kept past the cut off date (Y) - we use days (not seconds)
cut_off_interval_days = t[1]
# If we've found the strategy token that applies to this backup
if backup_timestamp <= cut_off_timestamp:
# Special case: if Y is "0" we delete every time
if cut_off_interval_days == 0:
expire_backup(backup_dir, ssh)
break
# We calculate days number since the last kept backup
last_kept_timestamp_days = last_kept_timestamp // 86400
backup_timestamp_days = backup_timestamp // 86400
interval_since_last_kept_days = (
backup_timestamp_days - last_kept_timestamp_days
)
# Check if the current backup is in the interval between
# the last backup that was kept and Y
# to determine what to keep/delete we use days difference
if interval_since_last_kept_days < cut_off_interval_days:
# Yes: Delete that one
expire_backup(backup_dir, ssh)
# Backup deleted, no point to check shorter timespan strategies - go to the next backup
break
# No: Keep it.
# This is now the last kept backup
last_kept_timestamp = backup_timestamp
# And go to the next backup
break
def backup_marker_path(folder: str) -> str:
"""Return the path to the backup marker file."""
return os.path.join(folder, "backup.marker")
def find_backup_marker(folder: str, ssh: Optional[SSH] = None) -> Optional[str]:
"""Find the backup marker file in the given folder."""
marker_path = backup_marker_path(folder)
output = find(marker_path, ssh)
return marker_path if output else None
class CmdResult(NamedTuple):
"""Command result."""
stdout: str
stderr: str
returncode: int
async def async_run_cmd(
cmd: str,
ssh: Optional[SSH] = None,
) -> CmdResult:
"""Run a command locally or remotely."""
if VERBOSE:
log_info(
f"Running {'local' if ssh is None else 'remote'} command: {style(cmd, 'green', bold=True)}",
)
if ssh is not None:
process = await asyncio.create_subprocess_shell(
f"{ssh.cmd} '{cmd}'",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Should not be None because of asyncio.subprocess.PIPE
assert process.stdout is not None, "Process stdout is None"
assert process.stderr is not None, "Process stderr is None"
stdout, stderr = await asyncio.gather(
read_stream(process.stdout, log_info, "magenta"),
read_stream(process.stderr, log_info, "red"),
)
await process.wait()
assert process.returncode is not None, "Process has not returned"
if VERBOSE and process.returncode != 0:
msg = style(str(process.returncode), "red", bold=True)
log_error(f"Command exit code: {msg}")
return CmdResult(stdout, stderr, process.returncode)
async def read_stream(
stream: asyncio.StreamReader,
callback: Callable[[str], None],
color: str,
) -> str:
"""Read each line from the stream and pass it to the callback."""
output = []
while True:
line = await stream.readline()
if line:
line_str = line.decode("utf-8", "replace").rstrip()
output.append(line_str)
if VERBOSE:
callback(f"Command output: {style(line_str, color, bold=True)}")
else:
break
return "\n".join(output)
def run_cmd(
cmd: str,
ssh: Optional[SSH] = None,
) -> CmdResult:
"""Synchronously run a command locally or remotely."""
return asyncio.run(async_run_cmd(cmd, ssh))
def find(path: str, ssh: Optional[SSH] = None, maxdepth: Optional[int] = None) -> str:
"""Find files in the given path, using the `find` command."""
cmd = f"find '{path}'"
if maxdepth is not None:
cmd += f" -maxdepth {maxdepth}"
return run_cmd(cmd, ssh).stdout
def get_absolute_path(path: str, ssh: Optional[SSH] = None) -> str:
"""Get the absolute path of the given path."""
return run_cmd(f"cd '{path}';pwd", ssh).stdout
def mkdir(path: str, ssh: Optional[SSH] = None) -> None:
"""Create a directory."""
run_cmd(f"mkdir -p -- '{path}'", ssh)
def rm_file(path: str, ssh: Optional[SSH] = None) -> None:
"""Remove a file."""
run_cmd(f"rm -f -- '{path}'", ssh)
def rm_dir(path: str, ssh: Optional[SSH] = None) -> None:
"""Remove a directory."""
run_cmd(f"rm -rf -- '{path}'", ssh)
def ln(src: str, dest: str, ssh: Optional[SSH] = None) -> None:
"""Create a symlink."""
run_cmd(f"ln -s -- '{src}' '{dest}'", ssh)
def test_file_exists_src(path: str) -> bool:
"""Test if a file exists."""
return run_cmd(f"test -e '{path}'", None).returncode == 0
def get_file_system_type(path: str, ssh: Optional[SSH] = None) -> str:
"""Get the filesystem type of the given path."""
lines = run_cmd(f"df -T '{path}'", ssh).stdout.split("\n")
if len(lines) > 1:
return lines[1].split()[1] # filesystem type is in the second column
return ""
def check_dest_is_backup_folder(
dest_folder: str,
ssh: Optional[SSH],
) -> None:
"""Check if the destination is a backup folder or drive."""
marker_path = backup_marker_path(dest_folder)
if not find_backup_marker(dest_folder, ssh):
log_info(
style(
"Safety check failed - the destination does not appear to be a backup folder or drive (marker file not found).",
"yellow",
),
)
log_info(
style(
"If it is indeed a backup folder, you may add the marker file by running the following command:",
"yellow",
),
)
log_info_cmd(
style(
f'mkdir -p -- "{dest_folder}" ; touch "{marker_path}"',
"green",
bold=True,
),
ssh,
)
sys.exit(1)
def get_link_dest_option(
previous_dest: Optional[str],
ssh: Optional[SSH],
) -> str:
"""Get the --link-dest option for rsync."""
link_dest_option = ""
if not previous_dest:
log_info("No previous backup - creating new one.")
else:
previous_dest = get_absolute_path(previous_dest, ssh)
_full_previous_dest = (
f"{ssh.dest_folder_prefix}{previous_dest}" if ssh else previous_dest
)
log_info(
style(
f"Previous backup found - doing incremental backup from {style(_full_previous_dest, bold=True)}",
"yellow",
),
)
link_dest_option = f"--link-dest='{previous_dest}'"
return link_dest_option
def handle_ssh(
src_folder: str,
dest_folder: str,
*,
ssh_port: str,
id_rsa: Optional[str],
exclusion_file: str,
allow_host_only: bool,
) -> Tuple[str, str, Optional[SSH]]:
"""Handle SSH-related things for in the `main` function."""
ssh = parse_ssh(
src_folder,
dest_folder,
ssh_port=ssh_port,
id_rsa=id_rsa,
allow_host_only=allow_host_only,
)
if ssh is not None:
if ssh.dest_folder:
dest_folder = ssh.dest_folder
if ssh.src_folder:
src_folder = ssh.src_folder
dest_folder = dest_folder.rstrip("/")
src_folder = src_folder.rstrip("/")
if not src_folder or not dest_folder:
log_error("Source and destination folder cannot be empty.")
sys.exit(1)
if (
"'" in src_folder
or "'" in dest_folder
or (exclusion_file and "'" in exclusion_file)
):
log_error(
"Source and destination directories may not contain single quote characters.",
)
sys.exit(1)
return (
src_folder,
dest_folder,
ssh,
)
def get_rsync_flags(
src_folder: str,
dest_folder: str,
rsync_set_flags: str,
rsync_append_flags: str,
ssh: Optional[SSH],
) -> List[str]:
"""Get the rsync flags."""
rsync_flags = [
"-D",
"--numeric-ids",
"--links",
"--hard-links",
"--one-file-system",
"--itemize-changes",
"--times",
"--recursive",
"--perms",
"--owner",
"--group",
"--stats",
"--human-readable",
]
if rsync_set_flags:
rsync_flags = rsync_set_flags.split()
if rsync_append_flags:
rsync_flags += rsync_append_flags.split()
if (
get_file_system_type(src_folder).lower() == "fat"
or get_file_system_type(dest_folder, ssh).lower() == "fat"
):
log_info("File-system is a version of FAT.")
log_info("Using the --modify-window rsync parameter with value 2.")
rsync_flags.append("--modify-window=2")
if ssh is not None:
rsync_flags.append("--compress")
return rsync_flags
def exit_if_pid_running(running_pid: str, ssh: Optional[SSH] = None) -> None:
"""Exit if another instance of this script is already running."""
if sys.platform == "cygwin":
cmd = f"procps -wwfo cmd -p {running_pid} --no-headers | grep '{APPNAME}'"
running_cmd = run_cmd(cmd, ssh)
if running_cmd.returncode == 0:
log_error(
f"Previous backup task is still active - aborting (command: {running_cmd.stdout}).",
)
sys.exit(1)
else:
ps_flags = "-axp" if sys.platform.startswith("netbsd") else "-p"
cmd = f"ps {ps_flags} {running_pid} -o 'command' | grep '{APPNAME}'"
if run_cmd(cmd).stdout:
log_error("Previous backup task is still active - aborting.")
sys.exit(1)
def handle_still_running_or_failed_or_interrupted_backup(
inprogress_file: str,
mypid: int,
dest: str,
dest_folder: str,
previous_dest: Optional[str],
ssh: Optional[SSH],
) -> None:
"""Handle cases when backup is still running or failed or interrupted backup."""
if not find(inprogress_file, ssh):
return
running_pid = run_cmd(f"cat {inprogress_file}", ssh).stdout
exit_if_pid_running(running_pid, ssh)
if previous_dest:
# - Last backup is moved to current backup folder so that it can be resumed.
# - 2nd to last backup becomes last backup.
ssh_dest_folder_prefix = ssh.dest_folder_prefix if ssh else ""
log_info(
f"{ssh_dest_folder_prefix}{inprogress_file} already exists - the previous backup failed or was interrupted. Backup will resume from there.", # noqa: E501
)
run_cmd(f"mv -- {previous_dest} {dest}", ssh)
backups = find_backups(dest_folder, ssh)
previous_dest = backups[1] if len(backups) > 1 else ""
# Update PID to current process to avoid multiple concurrent resumes
run_cmd(f"echo {mypid} > {inprogress_file}", ssh)
def deal_with_no_space_left(
log_file: str,
dest_folder: str,
*,
ssh: Optional[SSH],
auto_expire: bool,
) -> bool:
"""Deal with no space left on device."""
with open(log_file) as f:
log_data = f.read()
no_space_left = re.search(
r"No space left on device \(28\)|Result too large \(34\)",
log_data,
)
if no_space_left:
if not auto_expire:
log_error(
"No space left on device, and automatic purging of old backups is disabled.",
)
sys.exit(1)
log_warn(
"No space left on device - removing oldest backup and resuming.",
)
backups = find_backups(dest_folder, ssh)
if len(backups) <= 1:
log_error("No space left on device, and no old backup to delete.")
sys.exit(1)
expire_backup(sorted(backups)[-1], ssh)
return True
return False
def check_rsync_errors(
log_file: str,
auto_delete_log: bool, # noqa: FBT001
) -> None:
"""Check rsync errors."""
with open(log_file) as f:
log_data = f.read()
if "rsync error:" in log_data:
log_error(
f"Rsync reported an error. Run this command for more details: grep -E 'rsync:|rsync error:' '{log_file}'",
)
elif "rsync:" in log_data:
log_warn(
f"Rsync reported a warning. Run this command for more details: grep -E 'rsync:|rsync error:' '{log_file}'",
)
else:
log_info(style("Backup completed without errors.", "magenta"))
if auto_delete_log:
os.remove(log_file)
def now_str() -> str:
"""Return current date and time as string in format YYYY-MM-DD-HHMMSS."""
return datetime.now().strftime("%Y-%m-%d-%H%M%S")
def start_backup(
src_folder: str,
dest: str,
exclusion_file: str,
inprogress_file: str,
link_dest_option: str,
rsync_flags: List[str],
log_dir: str,
mypid: int,
ssh: Optional[SSH],
now: str,
) -> str:
"""Start backup."""
log_file = os.path.join(
log_dir,
f"{now}.log",
)
if ssh is not None:
src_folder = f"{ssh.src_folder_prefix}{src_folder}"
dest = f"{ssh.dest_folder_prefix}{dest}"
log_info(style("Starting backup...", "yellow"))
log_info(f"From: {style(src_folder, bold=True)}/")
log_info(f"To: {style(dest, bold=True)}/")
cmd = "rsync"
if ssh is not None:
id_rsa_option = f"-i {ssh.id_rsa} " if ssh.id_rsa else ""
cmd = f"{cmd} -e 'ssh -p {ssh.port} {id_rsa_option}-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'"
cmd = f"{cmd} {' '.join(rsync_flags)}"
cmd = f"{cmd} --log-file '{log_file}'"
if exclusion_file:
cmd = f"{cmd} --exclude-from '{exclusion_file}'"
cmd = f"{cmd} {link_dest_option}"
cmd = f"{cmd} -- '{src_folder}/' '{dest}/'"
log_info(style("Running command:", bold=True))
log_info(style(cmd, "green"))
run_cmd(f"echo {mypid} > {inprogress_file}", ssh)
run_cmd(cmd)
return log_file
def backup(
src_folder: str,
dest_folder: str,
*,
exclusion_file: str,
log_dir: str,
auto_delete_log: bool,
expiration_strategy: str,
auto_expire: bool,
port: str,
id_rsa: str,
rsync_set_flags: str,
rsync_append_flags: str,
rsync_get_flags: bool,
allow_host_only: bool,
) -> None:
"""Perform backup of src_folder to dest_folder."""
(
src_folder,
dest_folder,
ssh,
) = handle_ssh(
src_folder,
dest_folder,
ssh_port=port,
id_rsa=id_rsa,
exclusion_file=exclusion_file,
allow_host_only=allow_host_only,
)
if not test_file_exists_src(src_folder):
log_error(f"Source folder '{src_folder}' does not exist - aborting.")
sys.exit(1)
check_dest_is_backup_folder(dest_folder, ssh)
now = now_str()
dest = os.path.join(dest_folder, now)
_backups = sorted(find_backups(dest_folder, ssh), reverse=True)
previous_dest = _backups[0] if _backups else None
inprogress_file = os.path.join(dest_folder, "backup.inprogress")
mypid = os.getpid()
if not os.path.exists(log_dir):
log_info(f"Creating log folder in '{log_dir}'...")
os.makedirs(log_dir)
handle_still_running_or_failed_or_interrupted_backup(
inprogress_file,
mypid,
dest,
dest_folder,
previous_dest,
ssh,
)
rsync_flags = get_rsync_flags(
src_folder,
dest_folder,
rsync_set_flags,
rsync_append_flags,
ssh,
)
if rsync_get_flags:
flags = " ".join(rsync_flags)
log_info(f"Rsync flags:\n{style(flags, 'yellow', bold=True)}")
sys.exit(0)
for _ in range(100): # max 100 retries when no space left
link_dest_option = get_link_dest_option(
previous_dest,
ssh,
)
if not find(dest, ssh, maxdepth=0):
_full_dest = style(f"{ssh.cmd if ssh else ''}{dest}", bold=True)
log_info(f"Creating destination {_full_dest}")
mkdir(dest, ssh)
expire_backups(
dest_folder,
expiration_strategy,
previous_dest if previous_dest else dest,
ssh,
)
log_file = start_backup(
src_folder,
dest,
exclusion_file,
inprogress_file,
link_dest_option,
rsync_flags,
log_dir,
mypid,
ssh,
now,
)
retry = deal_with_no_space_left(
log_file,
dest_folder,
ssh=ssh,
auto_expire=auto_expire,
)
if not retry:
break
check_rsync_errors(log_file, auto_delete_log)
rm_file(os.path.join(dest_folder, "latest"), ssh)
ln(
os.path.basename(dest),
os.path.join(dest_folder, "latest"),
ssh,
)
rm_file(inprogress_file, ssh)
def main() -> None:
"""Main function."""
args = parse_arguments()
global VERBOSE
VERBOSE = args.verbose
signal.signal(signal.SIGINT, lambda n, f: terminate_script(n, f))
backup(
src_folder=args.src_folder,
dest_folder=args.dest_folder,
exclusion_file=args.exclusion_file,
log_dir=os.path.expandvars(os.path.expanduser(args.log_dir)),
auto_delete_log=args._auto_delete_log,
expiration_strategy=args.strategy,
auto_expire=not args.no_auto_expire,
port=args.port,
id_rsa=args.id_rsa,
rsync_set_flags=args.rsync_set_flags,
rsync_append_flags=args.rsync_append_flags,
rsync_get_flags=args.rsync_get_flags,
allow_host_only=args.allow_host_only,
)
if __name__ == "__main__":
main() | /rsync-time-machine-1.3.0.tar.gz/rsync-time-machine-1.3.0/rsync_time_machine.py | 0.707304 | 0.196306 | rsync_time_machine.py | pypi |
# Rsync Time Machine 🕰️💾

[](https://codecov.io/gh/basnijholt/rsync-time-machine.py)
[](https://github.com/basnijholt/rsync-time-machine.py/stargazers)
[](https://pypi.python.org/pypi/rsync-time-machine)
[](https://github.com/basnijholt/rsync-time-machine.py/blob/main/LICENSE)
[](https://pypi.python.org/pypi/rsync-time-machine)

Introducing `rsync-time-machine.py` - a Python port of the [`rsync-time-backup`](https://github.com/laurent22/rsync-time-backup) script, offering Time Machine-style backups using rsync. It creates incremental backups of files and directories to the destination of your choice. The backups are structured in a way that makes it easy to recover any file at any point in time. 🚀
It works on Linux, macOS, and Windows (via WSL or Cygwin). The main advantage over Time Machine is flexibility, as it can backup from/to any filesystem and works on any platform. You can also backup to a Truecrypt drive without any problem. 😃
`rsync-time-machine.py` is fully tested, has no external dependencies (only Python ≥3.7 🐍), is fully compatible with [`rsync-time-backup`](https://github.com/laurent22/rsync-time-backup), offers pretty terminal output, and is fully typed! 🎉
<details><summary><b><u>[ToC]</u></b> 📚</summary>
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [:star2: Features](#star2-features)
- [:books: Usage](#books-usage)
- [:hammer_and_wrench: Installation](#hammer_and_wrench-installation)
- [:bulb: Examples](#bulb-examples)
- [:calendar: Backup Expiration Logic](#calendar-backup-expiration-logic)
- [:page_facing_up: Exclusion File](#page_facing_up-exclusion-file)
- [:lock: Built-in Lock](#lock-built-in-lock)
- [:gear: Rsync Options](#gear-rsync-options)
- [:no_entry_sign: No Automatic Backup Expiration](#no_entry_sign-no-automatic-backup-expiration)
- [:arrows_counterclockwise: How to Restore](#arrows_counterclockwise-how-to-restore)
- [:star: Featured on](#star-featured-on)
- [:heart: Support and Contributions](#heart-support-and-contributions)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
</details>
## :star2: Features
* 📁 Each backup is in its own folder named after the current timestamp.
* 🔒 Backup to/from remote destinations over SSH.
* 🔗 Files that haven't changed from one backup to the next are hard-linked to the previous backup, saving space.
* ⚠️ Safety check - the backup will only happen if the destination has explicitly been marked as a backup destination.
* 🔄 Resume feature - if a backup has failed or was interrupted, the tool will resume from there on the next backup.
* 🚫 Exclude file - support for pattern-based exclusion via the `--exclude-from` rsync parameter.
* 🧹 Automatically purge old backups based on a configurable expiration strategy.
* 🔗 "latest" symlink that points to the latest successful backup.
## :books: Usage
To use `rsync-time-machine.py`, you'll need to provide source and destination paths, along with any desired options:
```bash
rsync-time-machine --help
```
Shows the help message:
<!-- CODE:BASH:START -->
<!-- echo '```bash' -->
<!-- rsync-time-machine --help -->
<!-- echo '```' -->
<!-- CODE:END -->
<!-- OUTPUT:START -->
<!-- ⚠️ This content is auto-generated by `markdown-code-runner`. -->
```bash
usage: rsync-time-machine [-h] [-p PORT] [-i ID_RSA] [--rsync-get-flags]
[--rsync-set-flags RSYNC_SET_FLAGS]
[--rsync-append-flags RSYNC_APPEND_FLAGS]
[--log-dir LOG_DIR] [--strategy STRATEGY]
[--no-auto-expire] [--allow-host-only]
[--exclude-from EXCLUDE_FROM] [-v]
src_folder dest_folder [exclusion_file]
A script for creating and managing time-stamped backups using rsync.
positional arguments:
src_folder Source folder for backup. Format: [USER@HOST:]SOURCE
dest_folder Destination folder for backup. Format:
[USER@HOST:]DESTINATION
exclusion_file Path to the file containing exclude patterns. Cannot
be used together with `--exclude-from`.
options:
-h, --help show this help message and exit
-p PORT, --port PORT SSH port.
-i ID_RSA, --id_rsa ID_RSA
Specify the private ssh key to use.
--rsync-get-flags Display the default rsync flags that are used for
backup. If using remote drive over SSH, --compress
will be added.
--rsync-set-flags RSYNC_SET_FLAGS
Set the rsync flags that are going to be used for
backup.
--rsync-append-flags RSYNC_APPEND_FLAGS
Append the rsync flags that are going to be used for
backup.
--log-dir LOG_DIR Set the log file directory. If this flag is set,
generated files will not be managed by the script - in
particular they will not be automatically deleted.
Default: $HOME/.rsync-time-backup
--strategy STRATEGY Set the expiration strategy. Default: "1:1 30:7
365:30" means after one day, keep one backup per day.
After 30 days, keep one backup every 7 days. After 365
days keep one backup every 30 days.
--no-auto-expire Disable automatically deleting backups when out of
space. Instead, an error is logged, and the backup is
aborted.
--allow-host-only By default, the script expects a 'USER@HOST' pattern
for specifying SSH connections. When this flag is
used, it allows for the 'HOST' pattern without a
specified user. This is useful if you want to use
configurations from the `.ssh/config` file or rely on
the current username. Note: this option will not
enforce SSH usage, it only broadens the accepted input
formats.
--exclude-from EXCLUDE_FROM
Path to the file containing exclude patterns.
Alternative to the positional `exclusion_file`. Not to
be used with `exclusion_file`.
-v, --verbose Enable verbose output. This will slow down the backup
process (in simple tests by 2x).
```
<!-- OUTPUT:END -->
Please refer to the original [`rsync-time-backup`](https://github.com/laurent22/rsync-time-backup) README for a list of options, as they have been preserved in the Python port.
## :hammer_and_wrench: Installation
To install `rsync-time-machine.py`, simply clone the repository:
```bash
pip install rsync-time-machine
```
and use it like `rsync-time-machine --help`
Or just copy the script to your computer:
```bash
wget https://raw.githubusercontent.com/basnijholt/rsync-time-machine.py/main/rsync_time_machine.py
```
and use it like `./rsync_time_machine.py --help`
## :bulb: Examples
* Backup the home folder to backup_drive:
```
./rsync_time_machine.py /home /mnt/backup_drive
```
* Backup with exclusion list:
```
./rsync_time_machine.py /home /mnt/backup_drive excluded_patterns.txt
```
For more examples and detailed usage instructions, please refer to the original [`rsync-time-backup`](https://github.com/laurent22/rsync-time-backup) README.
## :calendar: Backup Expiration Logic
Backup sets are automatically deleted following a simple expiration strategy defined with the `--strategy` flag. The default strategy is `1:1 30:7 365:30`. Please see the original README for a detailed explanation.
## :page_facing_up: Exclusion File
An optional exclude file can be provided as a third parameter, compatible with the `--exclude-from` parameter of rsync.
The `--exclude-from` option in `rsync-time-machine.py` allows you to exclude specific files or directories from the backup process. You can provide an exclusion file containing patterns for files or directories that should be excluded.
<details>
<summary>📖🔽 Click here to expand the docs on <code>--exclude-from</code> 🔽📖</summary>
Here's how to use the `--exclude-from` feature in `rsync-time-machine.py`:
1. Create a text file named `exclusion_file.txt` (or any other name you prefer) in your preferred location.
2. Add the exclusion patterns to the file, one pattern per line. Patterns can be literal strings, wildcards, or character ranges.
3. Save the file.
To use this exclusion file while performing a backup with `rsync-time-machine.py`, include it as the third positional argument in your command (or with `--exclude-from exclusion_file.txt`). For example:
```bash
rsync-time-machine.py /home /mnt/backup_drive exclusion_file.txt
```
In this example, `/home` is the source folder, `/mnt/backup_drive` is the destination folder, and `exclusion_file.txt` contains the exclude patterns.
Here's a sample `exclusion_file.txt`:
```
+ /home/.fileA
- /home/.*
- /home/junk/
```
In this example:
- `+ /home/.fileA`: Include the file `.fileA` from the `home` directory.
- `- /home/.*`: Exclude all hidden files (files starting with a dot) from the `home` directory.
- `- /home/junk/`: Exclude the entire `junk` directory from the `home` directory.
Remember that the order of patterns matters, as rsync reads the file top-down and acts on the first matching pattern it encounters.
See [this tutorial](https://web.archive.org/web/20230126121643/https://sites.google.com/site/rsync2u/home/rsync-tutorial/the-exclude-from-option) for more information.
</details>
## :lock: Built-in Lock
The script is designed so that only one backup operation can be active for a given directory, avoiding conflicts.
## :gear: Rsync Options
To display, add, or remove rsync options, use the `--rsync-get-flags`, `--rsync-append-flags`, or `--rsync-set-flags` options.
## :no_entry_sign: No Automatic Backup Expiration
Use the `--no-auto-expire` flag to disable the default behavior of purging old backups when out of space.
## :arrows_counterclockwise: How to Restore
Restoring files from the backup is simple, as the script creates a backup in a regular directory. You can easily copy the files back to the original directory using a command like:
```
rsync -aP /path/to/last/backup/ /path/to/restore/to/
```
Consider using the `--dry-run` option to check what exactly is going to be copied. If you want to delete files that exist in the destination but not in the backup, use the `--delete` option. Be extra cautious when using this option to avoid data loss.
You can also restore files using any file explorer, including Finder on macOS or the command line.
## :star: Featured on
- the Real Python podcast: [Episode 158: Building Python CI With Docker & Applying for a Hacker Initiative Grant @ 00:26:28](https://realpython.com/podcasts/rpp/158/#t=1588)
- Y Combinator Hacker News: [Python Port of 600 Line Bash Script: rsync-time-machine.py for Rsync Backups](https://news.ycombinator.com/item?id=35933238) (self-posted)
- Reddit /rpython: [Ported a popular (untested) 600+ Line Bash Script 📜 to Python 🐍: Introducing rsync-time-machine.py for Time Machine-Style Backups Using Rsync 🔄⏰](https://www.reddit.com/r/Python/comments/13gtmz2/ported_a_popular_untested_600_line_bash_script_to/) (self-posted)
## :heart: Support and Contributions
We appreciate your feedback and contributions! If you encounter any issues or have suggestions for improvements, please file an issue on the GitHub repository. We also welcome pull requests for bug fixes or new features.
Happy backing up! 💾🕰️🎉
| /rsync-time-machine-1.3.0.tar.gz/rsync-time-machine-1.3.0/README.md | 0.503662 | 0.838349 | README.md | pypi |
import os
import subprocess
from typing import List
from command_watcher import CommandWatcherError, Watch
class ChecksCollection:
"""Collect multiple check results.
:params raise_exception: Raise an exception it some checks have
failed.
"""
raise_exception: bool
_messages: List[str]
passed: bool
watch: Watch
def __init__(self, watch: Watch, raise_exception: bool = True) -> None:
self.watch = watch
self.raise_exception = raise_exception
self._messages: List[str] = []
self.passed = True
@property
def messages(self) -> str:
"""
:return: A concatenated string containing all messages of all failed
checks.
"""
return " ".join(self._messages)
def _log_fail(self, message: str) -> None:
self._messages.append(message)
self.watch.log.warning(message)
self.passed = False
def check_file(self, file_path: str) -> None:
"""Check if a file exists.
:param file_path: The file to check.
"""
if not os.path.exists(file_path):
self._log_fail(
"--check-file: The file '{}' doesn’t exist.".format(file_path)
)
else:
self.watch.log.info("--check-file: The file '{}' exists.".format(file_path))
def check_ping(self, dest: str) -> None:
"""Check if a remote host is reachable by pinging to it.
:param dest: A destination to ping to.
"""
process = subprocess.run(
["ping", "-c", "3", dest],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if process.returncode != 0:
self._log_fail("--check-ping: '{}' is not reachable.".format(dest))
else:
self.watch.log.info("--check-ping: '{}' is reachable.".format(dest))
def check_ssh_login(self, ssh_host: str) -> None:
"""Check if the given host is online by retrieving its hostname.
:param ssh_host: A ssh host string in the form of:
`user@hostname` or `hostname` or `alias` (as specified in
`~/.ssh/config`)
"""
process = subprocess.run(
["ssh", ssh_host, "ls"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if not process.returncode == 0:
self._log_fail("--check-ssh-login: '{}' is not reachable.".format(ssh_host))
else:
self.watch.log.info(
"--check-ssh-login: '{}' is reachable.".format(ssh_host)
)
def have_passed(self) -> bool:
"""
:return: True in fall checks have passed else false.
:rtype: boolean"""
if self.raise_exception and not self.passed:
raise CommandWatcherError(self.messages)
return self.passed | /rsync_watch-0.7.1-py3-none-any.whl/rsync_watch/check.py | 0.80837 | 0.20142 | check.py | pypi |
from argparse import ArgumentParser
from importlib import metadata
from typing import List, Literal, Optional
__version__: str = metadata.version("rsync_watch")
class ArgumentsDefault:
host_name: str
dest_user_group: Optional[str]
exclude: Optional[List[str]]
rsync_args: Optional[str]
# Checks
action_check_failed: Optional[Literal["exception", "skip"]]
check_file: Optional[str]
check_ping: Optional[str]
check_ssh_login: Optional[str]
src: str
dest: str
def get_argparser() -> ArgumentParser:
parser: ArgumentParser = ArgumentParser(
description="A Python script to monitor the execution of a rsync task."
)
parser.add_argument(
"--host-name",
help="The hostname to submit over NSCA to the monitoring.",
)
parser.add_argument(
"--dest-user-group",
metavar="USER_GROUP_NAME",
help="Both the user name and the group name of the destination will "
"be set to this name.",
)
parser.add_argument(
"--exclude",
action="append",
help="See the documention of --exclude in the rsync manual.",
)
parser.add_argument(
"--rsync-args",
help="Rsync CLI arguments. Insert some rsync command line arguments. "
"Wrap all arguments in one string, for example: "
"--rsync-args '--exclude \"this folder\"'",
)
# checks
checks = parser.add_argument_group(
title="checks",
description="Perform different checks before running the rsync task.",
)
checks.add_argument(
"--action-check-failed",
choices=("exception", "skip"),
default="skip",
help="Select action what to do when a check failed.",
)
checks.add_argument(
"--check-file",
metavar="FILE_PATH",
help="Check if a file exists on the local machine.",
)
checks.add_argument(
"--check-ping",
metavar="DESTINATION",
help="Check if a remote host is reachable by pinging. DESTINATION can "
"a IP address or a host name or a full qualified host name.",
)
checks.add_argument(
"--check-ssh-login",
metavar="SSH_LOGIN",
help="Check if a remote host is reachable over the network by SSHing "
"into it. SSH_LOGIN: “root@192.168.1.1” "
"or “root@example.com” or “example.com”.",
)
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s {version}".format(version=__version__),
)
parser.add_argument("src", help="The source ([[USER@]HOST:]SRC)")
parser.add_argument("dest", help="The destination ([[USER@]HOST:]DEST)")
return parser | /rsync_watch-0.7.1-py3-none-any.whl/rsync_watch/cli.py | 0.870074 | 0.255013 | cli.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from dneio.outcome import Outcome
from trio._core._run import GLOBAL_RUN_CONTEXT, WaitTaskRescheduled, CancelShieldedCheckpoint, PermanentlyDetachCoroutineObject
import abc
import enum
import functools
import logging
import outcome
import trio
import types
import typing as t
__all__ = [
'shift',
'reset',
'Continuation',
'TrioSystemWaitReadable', 'set_trio_system_wait_readable',
]
logger = logging.getLogger(__name__)
SendType = t.TypeVar('SendType')
ReturnType = t.TypeVar('ReturnType')
AnswerType = t.TypeVar('AnswerType')
YieldType = t.TypeVar('YieldType')
class AnsweringContinuation(t.Generic[AnswerType, SendType, ReturnType]):
"""Something which can be passed SendType once, and produce AnswerType or ReturnType
This is basically a single-shot function from SendType to ReturnType, except
that we can also get a value of AnswerType, if the computation calls `shift`.
Presumably, that `shift` either arranges to store the resulting continuation
somewhere, or includes it in AnswerType so that the caller can deal with it.
In practice, we mostly use Continuation, which has None for its answer and
return types.
"""
@abc.abstractmethod
def resume(self, value: Outcome[SendType]) -> t.Union[AnswerType, ReturnType]: ...
@abc.abstractmethod
def send(self, value: SendType) -> t.Union[AnswerType, ReturnType]: ...
@abc.abstractmethod
def throw(self, exn: BaseException) -> t.Union[AnswerType, ReturnType]: ...
# right now, everything is cancellable at all times.
# we'll make this more customizable later.
@abc.abstractmethod
def is_cancelled(self) -> bool: ...
def __call__(self, value: SendType) -> t.Union[AnswerType, ReturnType]:
return self.send(value)
Continuation = AnsweringContinuation[None, SendType, None]
"""A Continuation whose return and answer types are both None
This is a degenerate form, which is most useful for callbacks registed on some
object, which shouldn't be returning or answering anything to the task invoking
the callback.
Really, this should be called NoneContinuation or something, and
AnswerContinuation should be called Continuation, but this is the more common
form, so it's nice for it to have the shorter name.
"""
class Runner(enum.Enum):
TRIO = "trio"
NATIVE = "native"
# For now, we assume we start under trio
_under_coro_runner: Runner = Runner.TRIO
def is_running_directly_under_trio() -> bool:
return _under_coro_runner == Runner.TRIO
#### Implementation of "native" continuations
class Shift(t.Generic[AnswerType, SendType, ReturnType]):
"The internal type we yield up to implement the shift function for native coroutines"
__slots__ = ('func')
def __init__(self, func: t.Callable[
[AnsweringContinuation[AnswerType, SendType, ReturnType]], AnswerType]) -> None:
self.func = func
ShiftCoroutine = t.Coroutine[Shift[AnswerType, SendType, ReturnType], SendType, ReturnType]
"A coroutine which yields Shift. Basically, a 'native' dneio coroutine"
def reset(
coro: ShiftCoroutine[AnswerType, SendType, ReturnType],
) -> t.Union[AnswerType, ReturnType]:
"Run this coro such that it can call `shift` and receive its continuation"
global _under_coro_runner
try:
previous_runner = _under_coro_runner
_under_coro_runner = Runner.NATIVE
yielded_value: Shift[AnswerType, SendType, ReturnType]
yielded_value = coro.send(None) # type: ignore
if not isinstance(yielded_value, Shift):
coro.throw(TypeError, TypeError("no yielding non-shifts!", yielded_value))
raise TypeError("coro", coro, "yielded something other than a Shift")
except StopIteration as e:
return_value: ReturnType = e.value
return return_value
finally:
_under_coro_runner = previous_runner
answer_value: AnswerType = yielded_value.func(NativeContinuation(coro))
return answer_value
@dataclass
class NativeContinuation(AnsweringContinuation[AnswerType, SendType, ReturnType]):
__slots__ = ('coro')
coro: ShiftCoroutine[AnswerType, SendType, ReturnType]
def resume(self, value: Outcome[SendType]) -> t.Union[AnswerType, ReturnType]:
if isinstance(value, outcome.Value):
return self.send(value.value)
else:
return self.throw(value.error)
# NativeContinuation.{send,throw} and reset are very similar; they could be
# abstracted, but that would add stack frames, and keeping them separate is
# an easy way to remove those stack frames, pending a C implementation.
def send(self, value: SendType) -> t.Union[AnswerType, ReturnType]:
global _under_coro_runner
try:
previous_runner = _under_coro_runner
_under_coro_runner = Runner.NATIVE
yielded_value: Shift[AnswerType, SendType, ReturnType]
yielded_value = self.coro.send(value)
except StopIteration as e:
return_value: ReturnType = e.value
return return_value
finally:
_under_coro_runner = previous_runner
if not isinstance(yielded_value, Shift):
self.coro.throw(TypeError("no yielding non-shifts!", yielded_value)) # type: ignore
raise TypeError("coro", self.coro, "yielded something other than a Shift")
answer_value: AnswerType = yielded_value.func(NativeContinuation(self.coro))
return answer_value
def throw(self, exn: BaseException) -> t.Union[AnswerType, ReturnType]:
global _under_coro_runner
try:
previous_runner = _under_coro_runner
_under_coro_runner = Runner.NATIVE
yielded_value: Shift[AnswerType, SendType, ReturnType]
yielded_value = self.coro.throw(exn) # type: ignore
except StopIteration as e:
return_value: ReturnType = e.value
return return_value
finally:
_under_coro_runner = previous_runner
if not isinstance(yielded_value, Shift):
self.coro.throw(TypeError("no yielding non-shifts!", yielded_value)) # type: ignore
raise TypeError("coro", self.coro, "yielded something other than a Shift")
answer_value: AnswerType = yielded_value.func(NativeContinuation(self.coro))
return answer_value
def is_cancelled(self) -> bool:
return False
#### Implementation of trio continuations
TrioTask = t.Any
"trio doesn't expose the type of Task publicly..."
@dataclass
class TrioContinuation(Continuation[SendType]):
# We inherit from Continuation not AnsweringContinuation because it's
# difficult to assign a proper AnswerType to trio tasks - easier for now to just
# flatly say "None".
__slots__ = ('task', 'cancelled', 'saved_send')
task: TrioTask
cancelled: bool
saved_send: t.Optional[Outcome[None]]
on_stack: bool
# This essentially repeats a large part of the trio run loop. It would be
# nicer if trio exposed a primtive for this directly.
def resume(self, value: Outcome[SendType]) -> None:
if self.cancelled:
# discard the result - not great, obviously...
logger.debug("TrioContinuation(%s): resumed after cancellation", self.task)
return
if self.on_stack:
logger.debug("TrioContinuation(%s): immediately resumed with %s", self.task, value)
# This will happen if the function passed to shift immediately resumes the
# continuation. With trio, we run the function passed to shift on the
# coroutine that's being suspended. So we can't resume the coroutine here,
# since it's already running. Instead we'll save the outcome, and in shift()
# we check saved_send and just return immediately if it's set. This is not
# normal shift/reset semantics but it's the best we can do with how trio is
# structured.
self.saved_send = value
return
resuming_task = GLOBAL_RUN_CONTEXT.task
runner = GLOBAL_RUN_CONTEXT.runner
logger.debug("TrioContinuation(%s): resuming with %s", self.task, value)
global _under_coro_runner
try:
previous_runner = _under_coro_runner
_under_coro_runner = Runner.TRIO
# We have to temporarily set GLOBAL_RUN_CONTEXT.task to the task that is being
# resumed; after all, that's the task that's really going to be running. This
# wouldn't be necessary if we had proper dynamically scoped variables in
# Python :(
GLOBAL_RUN_CONTEXT.task = self.task
# a little bit of reschedule(), before we run the task
self.task._abort_func = None
self.task.custom_sleep_data = None
try:
msg = self.task.context.run(self.task.coro.send, value)
except StopIteration as exn:
logger.debug("TrioContinuation(%s): return %s", self.task, exn.value)
GLOBAL_RUN_CONTEXT.runner.task_exited(self.task, outcome.Value(exn.value))
return
except BaseException as exn:
logger.debug("TrioContinuation(%s): raised %s", self.task, exn)
exn = exn.with_traceback(exn.__traceback__ and exn.__traceback__.tb_next)
GLOBAL_RUN_CONTEXT.runner.task_exited(self.task, outcome.Error(exn))
return
logger.debug("TrioContinuation(%s): yield %s", self.task, msg)
finally:
_under_coro_runner = previous_runner
GLOBAL_RUN_CONTEXT.task = resuming_task
if msg is CancelShieldedCheckpoint:
runner.reschedule(self.task)
elif type(msg) is WaitTaskRescheduled:
self.task._abort_func = msg.abort_func
if runner.ki_pending and self.task is runner.main_task:
self.task._attempt_delivery_of_pending_ki()
self.task._attempt_delivery_of_any_pending_cancel()
elif type(msg) is PermanentlyDetachCoroutineObject:
runner.task_exited(self.task, msg.final_outcome)
else:
raise TypeError("bad yield from continuation", msg)
def send(self, value: SendType) -> None:
return self.resume(outcome.Value(value))
def throw(self, exn: BaseException) -> None:
return self.resume(outcome.Error(exn))
def _abort_func(self, raise_cancel) -> trio.lowlevel.Abort:
logger.debug("TrioContinuation(%s): cancelled", self.task)
self.cancelled = True
return trio.lowlevel.Abort.SUCCEEDED
def is_cancelled(self) -> bool:
return self.cancelled
@types.coroutine
def shift(func: t.Callable[[Continuation[SendType]], AnswerType]) -> t.Generator[t.Any, t.Any, SendType]:
"""Call `func` with our current continuation and block until that continuation is resumed.
This is a coroutine function, just implemented synchronously because this is
the only place we actually yield from.
`func` gets a Continuation, not a full-fledged AnsweringContinuation, because
that's easier to support for now. But in theory `func` should be
AnsweringContinuation[AnswerType, SendType, ReturnType]
"""
ensure_system_trio_task_running()
if _under_coro_runner == Runner.TRIO:
trio_cont = TrioContinuation[SendType](
trio.lowlevel.current_task(), False, None,
on_stack=True,
)
# There's no surrounding reset to run `func`, so we just run
# it here and throw away the answer value.
func(trio_cont)
trio_cont.on_stack = False
if trio_cont.saved_send:
# the continuation was resumed immediately by func
return trio_cont.saved_send.unwrap()
return (yield WaitTaskRescheduled(trio_cont._abort_func)).unwrap()
elif _under_coro_runner == Runner.NATIVE:
return (yield Shift(func))
else:
raise Exception("running under unsupported coroutine runner")
#### Implementation of non-essential trio system task class
class TrioSystemWaitReadable:
"""Run "wait_readable" in a trio system task so we can avoid blocking trio tasks
See rsyscall.epoller for how this is used.
Frustratingly, we have to call .ensure_running on every call to `shift`,
because trio has no means of autostarting a system task. There has to be a
better way.
Note that just calling ensure_running in .wait or .wait_cb doesn't work; a
coroutine may have already called that in a previous call to trio.run and be
blocked inside TSWR, waiting for the system task to resume it.
Before settling on this approach we used a system where `shift` in a trio
task would automatically open a nursery and make an object much like this
one, which would automatically be passed down via dynamic scope and magic
priority inheritance. It fell apart because Python doesn't have proper
dynamic scope and maintaining the proper inheritance was too hard. That kind
of implicit, untyped inheritance is against the core idea of dneio anyway;
at least this object is very explicit and concrete; we don't use it through
a global variable, we explicitly pass it around. We just need to find a
better way to support it in trio.
"""
def __init__(self, fd_number: int) -> None:
self.fd_number = fd_number
# we shouldn't ever get more than one waiter
self._ops_in, self._ops_out = trio.open_memory_channel(1)
self._run_running = False
def ensure_running(self) -> None:
if not self._run_running:
self._run_running = True
trio.lowlevel.spawn_system_task(self.run)
def wait_cb(self, cb: Continuation[None]) -> None:
self._ops_in.send_nowait(cb)
async def wait(self) -> None:
return await shift(self.wait_cb)
async def run(self) -> None:
try:
while True:
cb = await self._ops_out.receive()
try:
await trio.lowlevel.wait_readable(self.fd_number)
except:
self.wait_cb(cb)
raise
try:
cb.send(None)
except:
logger.exception("TSWR.run: callback raised exception")
raise
finally:
logger.debug("TSWR.run: system task exiting")
self._run_running = False
_trio_system_wait_readable: t.Optional[TrioSystemWaitReadable] = None
def set_trio_system_wait_readable(tswr: TrioSystemWaitReadable) -> None:
global _trio_system_wait_readable
_trio_system_wait_readable = tswr
def ensure_system_trio_task_running() -> None:
"Called in every call to shift"
if _trio_system_wait_readable:
_trio_system_wait_readable.ensure_running() | /rsyscall-0.0.3.tar.gz/rsyscall-0.0.3/dneio/core.py | 0.780202 | 0.286469 | core.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from dneio import Continuation, shift, reset
from dneio.outcome import Outcome
import functools
import logging
import outcome
import typing as t
logger = logging.getLogger(__name__)
InType = t.TypeVar('InType')
OutType = t.TypeVar('OutType')
T = t.TypeVar('T')
class RequestQueue(t.Generic[InType, OutType]):
def __init__(self) -> None:
self._request_cbs: t.List[t.Tuple[InType, Continuation[OutType]]] = []
self._receiver_cb: t.Optional[Continuation[t.Tuple[InType, Continuation[OutType]]]] = None
self._final_exn: t.Optional[BaseException] = None
def close(self, final_exn: BaseException) -> None:
self._final_exn = final_exn
request_cbs, self._request_cbs = self._request_cbs, []
for _, cb in request_cbs:
cb.throw(self._final_exn)
def request_cb(self, val: InType, cb: Continuation[OutType]) -> None:
if self._final_exn:
logger.debug("RequestQueue.request_cb(%s, %s): throwing final exn %s", val, cb, self._final_exn)
cb.throw(self._final_exn)
elif self._receiver_cb:
logger.debug("RequestQueue.request_cb(%s, %s): waking up receiver_cb %s", val, cb, self._receiver_cb)
receiver_cb = self._receiver_cb
self._receiver_cb = None
receiver_cb.send((val, cb))
else:
logger.debug("RequestQueue.request_cb(%s, %s): appending to waiting list of size %d", val, cb, len(self._request_cbs))
self._request_cbs.append((val, cb))
async def request(self, val: InType) -> OutType:
if self._final_exn:
raise self._final_exn
logger.debug("RequestQueue.request(%s): shifting into request_cb", val)
return await shift(functools.partial(self.request_cb, val))
def get_one_cb(self, cb: Continuation[t.Tuple[InType, Continuation[OutType]]]) -> None:
assert self._receiver_cb is None
self._receiver_cb = cb
async def get_one(self) -> t.Tuple[InType, Continuation[OutType]]:
if self._request_cbs:
return self._request_cbs.pop(0)
else:
return await shift(self.get_one_cb)
async def get_many(self) -> t.List[t.Tuple[InType, Continuation[OutType]]]:
if self._request_cbs:
ret, self._request_cbs = self._request_cbs, []
return ret
else:
return [await shift(self.get_one_cb)]
def fetch_any(self) -> t.List[t.Tuple[InType, Continuation[OutType]]]:
ret, self._request_cbs = self._request_cbs, []
return ret
class Event:
def __init__(self) -> None:
self._waiting_cbs: t.List[Continuation[None]] = []
self._is_set = False
self._exc: t.Optional[BaseException] = None
async def wait(self) -> None:
if not self._is_set:
await shift(self._waiting_cbs.append)
if self._exc:
raise self._exc
def set(self) -> None:
self._is_set = True
for cb in self._waiting_cbs:
cb.send(None)
def close(self, exc: BaseException) -> None:
self._exc = exc
self.set()
@dataclass
class Future(t.Generic[T]):
_result: t.Optional[Outcome[T]] = None
_result_cb: t.Optional[Continuation[T]] = None
@staticmethod
def start(func: t.Callable[[], t.Awaitable[T]]) -> Future[T]:
self = Future[T]()
@functools.wraps(func)
async def wrapper() -> None:
result = await outcome.acapture(func)
self._result = result
if self._result_cb:
self._result_cb.resume(result)
reset(wrapper())
return self
def get_cb(self, cb: Continuation[T]) -> None:
if self._result:
return cb.resume(self._result)
assert self._result_cb is None
self._result_cb = cb
async def get(self) -> T:
if self._result:
return self._result.unwrap()
return (await shift(self.get_cb))
async def make_n_in_parallel(make: t.Callable[[], t.Awaitable[T]], count: int) -> t.List[T]:
"Call `make` n times in parallel, and return all the results."
return [await fut.get() for fut in
[Future.start(make) for _ in range(count)]]
async def run_all(callables: t.List[t.Callable[[], t.Awaitable[T]]]) -> t.List[T]:
"Call all the functions passed to it, and return all the results."
return [await fut.get() for fut in
[Future.start(func) for func in callables]] | /rsyscall-0.0.3.tar.gz/rsyscall-0.0.3/dneio/concur.py | 0.793266 | 0.179728 | concur.py | pypi |
from __future__ import annotations
from contextvars import ContextVar
import abc
import inspect
import sys
import traceback
import types
import typing as t
__all__ = [
'Wish',
'WishGranter',
'wish',
'my_wish_granter',
]
# TODO should we inherit from BaseException or Exception?
T = t.TypeVar('T')
class Wish(BaseException, t.Generic[T]):
"""A request for a value of type `return_type`
We inherit from BaseException. When wish is called on this value, it will fill in the
exception fields on this value, in the same way that raise would fill in the exception
fields of any exception.
A user can further inherit from this class to add more information to the Wish, in the
same way one would inherit from Exception to create a more specific Exception.
"""
return_type: t.Type[T]
def __init__(self, return_type: t.Type[T], *args) -> None:
self.return_type = return_type
super().__init__(*args)
my_wish_granter: ContextVar[WishGranter] = ContextVar('my_wish_granter')
"""The WishGranter currently active for our scope.
To use a new WishGranter in some scope, set my_wish_granter to a new value, then reset it
back to the old value once you leave that scope. A contextmanager would likely be helpful
for this.
"""
class WishGranter:
"""An object capable of responding to a wish.
A WishGranter is not active until it is bound. When wish is called, the closest bound
WishGranter is used to satisfy the wish. The WishGranter may itself choose to call
wish and propagate the wish up the stack. For example, a partially automated
WishGranter might be able to handle only certain classes of Wish, and fall back to
wishing again when it sees other classes.
WishGranters are bound to my_wish_granter to become active. This merely means they
will be used by `wish`; no actual change occurs to the WishGranter object.
Since my_wish_granter is a ContextVar, it's useful to make an analogy to exception
handlers, because the resolution process for ContextVars, and therefore WishGranters,
works the same way.
"""
@abc.abstractmethod
async def wish(self, wish: Wish[T]) -> T:
"Satisfy this wish, returning a value of the type requested by the wish"
pass
def _frames_to_traceback(frames: t.List[types.FrameType]) -> t.Optional[types.TracebackType]:
"Translate a list of frames (which can be obtained from the inspect module) to a traceback"
tb = None
for frame in frames:
tb = types.TracebackType(tb, frame, frame.f_lasti, frame.f_lineno)
return tb
# TODO should switch bool to typing_extensions.Literal[False]
async def wish(wish: Wish[T], *, from_exn: t.Union[BaseException, None, bool]=False) -> T:
"""Wish for some value, as specified by the passed Wish, and get that value
Pass `from_exn=exn` to explicitly set the cause/context for the Wish to the exception
`exn`; this is identical in behavior to doing `raise wish from exn` with exceptions.
Likewise, you can pass `from_exn=None` to suppress the exception cause/context.
We use the WishGranter currently bound to my_wish_granter in our ContextVar context to
perform the wish. This is directly analogous to using the closest bound exception
handler to handle an exception, and works the same way.
"""
if not isinstance(wish, Wish):
raise Exception("wishes should be of type Wish, not", wish)
raising_exception = sys.exc_info()[1]
if not isinstance(from_exn, bool):
wish.__cause__ = from_exn
elif raising_exception:
wish.__context__ = raising_exception
wish.__traceback__ = _frames_to_traceback([record.frame for record in inspect.stack()[1:]])
wish_granter = my_wish_granter.get()
ret = await wish_granter.wish(wish)
return ret | /rsyscall-0.0.3.tar.gz/rsyscall-0.0.3/wish/__init__.py | 0.420243 | 0.281134 | __init__.py | pypi |
from dataclasses import dataclass
import ast
import traceback
import typeguard
import typing as t
from arepl.astcodeop import ast_compile_interactive
from arepl.aeval import (
ReturnResult, ExceptionResult, ExpressionResult, FallthroughResult, Result, eval_single,
)
from arepl.help import help_to_str
class FromREPL(Exception):
def __init__(self, exn: Exception) -> None:
self.exn = exn
class PureREPL:
def __init__(self, global_vars: t.Dict[str, t.Any]) -> None:
self.global_vars = global_vars
self.buf = ""
async def eval_single(self, astob: ast.Interactive) -> Result:
return (await eval_single(astob, self.global_vars))
async def add_line(self, data: str) -> t.Optional[Result]:
"""Add a single line to the REPL buffer and try to parse and evaluate the AST.
If no AST can be parsed from the buffer, returns None.
Make sure to pass exactly one single line to this function,
including newline! Otherwise, you might pass multiple
statements at once, and the parser won't like that.
"""
self.buf += data
try:
# remove the last newline
astob = ast_compile_interactive(self.buf[:-1])
except Exception:
self.buf = ""
raise
else:
if astob is not None:
self.buf = ""
return (await self.eval_single(astob))
return None
class LineBuffer:
"A simple character buffer to split data into lines"
def __init__(self) -> None:
self.buf: str = ""
def add(self, data: str) -> t.List[str]:
self.buf += data
*lines, self.buf = self.buf.split('\n')
return [line + '\n' for line in lines]
# TODO I should also be able to pass in a predicate function which I call on the return value.
# That way I can represent constraints on the returned value at a value level.
# (I still do the wanted_type so that mypy type checking is correct)
T = t.TypeVar('T')
async def run_repl(read: t.Callable[[], t.Awaitable[bytes]],
write: t.Callable[[bytes], t.Awaitable[None]],
global_vars: t.Dict[str, t.Any], wanted_type: t.Type[T]) -> T:
async def print_to_user(*args) -> None:
await write((" ".join([str(arg) for arg in args]) + "\n").encode())
async def help_to_user(request) -> None:
await write(help_to_str(request).encode())
async def print_exn(e: BaseException) -> None:
# this call to run_repl may take place at a time where an exception is being handled; when
# we print this internal exception, we don't want to print that outside exception context,
# since it's irrelevant.
e.__suppress_context__ = True # type: ignore
await write("".join(traceback.format_exception(None, e, e.__traceback__)).encode())
global_vars['print'] = print_to_user
global_vars['help'] = help_to_user
repl = PureREPL(global_vars)
line_buf = LineBuffer()
await write(b">")
while True:
raw_data = await read()
if len(raw_data) == 0:
raise Exception("REPL hangup")
for line in line_buf.add(raw_data.decode()):
try:
result = await repl.add_line(line)
except Exception as exn:
await print_exn(exn)
continue
if result is None:
continue
if isinstance(result, ReturnResult):
try:
typeguard.check_type('return value', result.value, wanted_type)
except TypeError as e:
await print_exn(e)
else:
return result.value
elif isinstance(result, ExceptionResult):
if isinstance(result.exception, FromREPL):
raise result.exception
else:
await print_exn(result.exception)
elif isinstance(result, ExpressionResult):
await print_to_user(result.value)
global_vars['_'] = result.value
elif isinstance(result, FallthroughResult):
pass
else:
raise Exception("bad Result returned from PureREPL", result)
await write(b">") | /rsyscall-0.0.3.tar.gz/rsyscall-0.0.3/arepl/repl.py | 0.510741 | 0.227523 | repl.py | pypi |
import posixpath
from functools import wraps
import requests
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2.rfc6749.errors import (AccessDeniedError,
InvalidClientError,
MissingTokenError)
from requests_oauthlib import OAuth2Session
from rt_api import models, constants
def authenticated(func):
"""Decorate methods that make api calls that need authentication.
This decorator will ensure that methods that need authentication will
only be called if the api instance is authenticated.
Otherwise a ``NotAuthenticatedError`` will be raised.
"""
@wraps(func)
def auth_call(*args, **kwargs):
api = args[0]
if not api.user_id and not api._me:
raise NotAuthenticatedError
else:
return func(*args, **kwargs)
return auth_call
class Api(object):
"""Main class of the API.
Create an instance of this to access the api.
"""
def __init__(self, api_key=None):
"""Create an api object.
Args:
api_key (str, optional): api key to use.
If one is not supplied, a default one will be generated and used.
"""
self.__session = requests.Session()
self.user_id = None
self._me = None
if api_key:
self.__token = api_key
else:
self._get_token()
self.__session.headers.update({"Authorization": self.__token})
@property
def token(self):
"""Return the token currently in use by the api.
Returns:
str: Token currently in use by this instance.
"""
return self.__token
def _get_token(self):
"""Get an API token.
Raises:
AuthenticationError: if getting token fails.
"""
client = BackendApplicationClient(client_id=constants.CLIENT_ID)
oauth = OAuth2Session(client=client)
# Retry auth if error (to get around intermittent failures)
latest_exception = None
for i in range(3):
try:
token = oauth.fetch_token(
token_url=constants.AUTH_URL, client_id=constants.CLIENT_ID, client_secret=constants.CLIENT_SECRET)
self.__token = token["access_token"]
self.__session = oauth
self._me = None
return
except (AccessDeniedError, InvalidClientError, MissingTokenError) as e:
latest_exception = e
continue
raise AuthenticationError("Failed to get authentication token: {0}".format(latest_exception))
def authenticate(self, username, password):
"""Authenticate to the API using a username and password.
The token retrieved will be used for future API requests, and will
persist only until the api object is destroyed. If you wish to use
this token in future sessions, you should save the token and use it
again when creating an api object.
The token retrieved is linked to the credentials used to authenticate.
Using the token in the api mean actions performed will be done as the
user with those credentials.
Args:
username (str): Username of Rooster Teeth account.
password (str): Password of Rooster Teeth account.
Returns:
str: Token retrieved from API on successful authentication.
Raises:
AuthenticationError: if authentication fails.
"""
# TODO retry auth if error (to get around intermittent failures)
payload = {
"client_id": constants.CLIENT_ID,
"client_secret": constants.CLIENT_SECRET,
"grant_type": "password",
"scope": "user.access",
"username": username,
"password": password
}
result = self.__session.post(constants.AUTH_URL, data=payload)
try:
data = result.json()
except:
raise AuthenticationError("Failed to get authentication token: {0} {1}".format(
result.status_code, result.text))
if result.status_code == 401:
error = data.get("error_message")
raise AuthenticationError(error)
elif result.status_code == 201:
# Success
self.__session = requests.Session()
self.__token = data['access_token']
self.__session.headers.update({"Authorization": self.__token})
self.user_id = int(result.headers.get("X-User-Id"))
return self.__token
@property
def me(self):
"""Access the :class:`~rt_api.models.User` object for the authenticated user.
If not authenticated as a user, returns None.
Returns:
User: The user object corresponding to authenticated user or None.
"""
if not self._me and self.user_id:
self._me = self.user(self.user_id)
elif not self._me and not self.user_id:
# No user id, and no user, so cant find authenticated user
return None
return self._me
def __get_data(self, url, params=None):
"""Get the data at the given URL, using supplied parameters.
Args:
url (str): The URL to retrieve data from.
params (dict, optional): Key-value pairs to include when making the request.
Returns:
json: The JSON response.
"""
response = self.__session.get(url, params=params)
# Check status code
if response.status_code == 401:
# TODO Bad api key
response.raise_for_status()
elif response.status_code == 404:
# Api Item does not exist
return None
elif response.status_code != requests.codes.ok:
response.raise_for_status()
try:
return response.json()
except ValueError:
# Parsing json response failed
return None
def __build_response(self, path, model_class):
"""Retrieve data from given path and load it into an object of given model class.
Args:
path (str): Path of API to send request to.
model_class (type): The type of pbject to build using the response from the API.
Returns:
object: Instance of the specified model class.
"""
data = self.__get_data(posixpath.join(constants.END_POINT, path))
if not data:
# TODO raise exception complaining that no data was retrieved from api?
return None
return model_class(data, self)
def __get_multiple(self, model_class, path, key=None, **kwargs):
"""Retrieve from API endpoint that returns a list of items.
Args:
model (type): The type of object to build using the response from the API.
path (str): The path of API to send request to.
key (str, optional): Key to use as index into each item of data from API.
**kwargs: Key-value pairs to include when making the request.
Returns:
list: A list containing items of type model_class.
"""
url = posixpath.join(constants.END_POINT, path)
data = self.__get_data(url, kwargs)
if not data:
return None
items = []
for json_item in data:
item = json_item[key] if key else json_item
items.append(model_class(item, self))
return items
def __pager(self, model_class, path, count=20, page=1, **kwargs):
"""Paginate an API resource.
This is a generator that yields a single result.
It handles retrieving new pages from the Api as needed.
Args:
model_class (type): The type of model that will be instantiate for api results.
path (str): Path of API to send request to.
count (int): Number of Api items per page (Default value = 20).
page (int): The page to start the generator from (Default value = 1).
**kwargs: Key-value pairs to include when making the request.
Yields:
object: An instance of ``model_class``.
"""
while True:
items = self.__get_multiple(model_class, path, page=page, count=count, **kwargs)
if items:
for item in items:
yield item
page += 1
else:
break
def episode(self, episode_id):
"""Retrieve the episode corresponding to the specified id.
Args:
episode_id (int): ID of the episode to retrieve.
Returns:
Episode: Episode instance.
"""
return self.__build_response("episodes/{0}".format(episode_id), models.Episode)
def episodes(self, site=None, page=1, count=20):
# TODO add more explanation about how iterable works (see shows() doc)
"""Get latest episodes from feed.
Args:
site (str, optional): If specified, only episodes from this site will be returned.
page (int): The page to start from (Default value = 1).
count (int): Number of Episodes per page (Default value = 20).
Returns:
iterable: An iterable collection of :class:`Episodes <rt_api.models.Episode>`
from 'latest' feed.
"""
return self.__pager(models.Episode, "feed/", key="item", type="Episode", count=count, page=page, site=site)
def season(self, season_id):
"""Retrieve the season corresponding to the specified id.
Args:
season_id (int): ID of the season to retrieve.
Returns:
Season: Season instance.
"""
return self.__build_response("seasons/{0}".format(season_id), models.Season)
def season_episodes(self, season_id):
"""Retrieve the episodes that belong to the season with the specified id.
Args:
season_id (int): ID of the season.
Returns:
list: A list of :class:`~rt_api.models.Episode` objects.
"""
res = []
for episode in self.__pager(models.Episode, "seasons/{0}/episodes".format(season_id), count=20, page=1):
res.append(episode)
return res
def show_seasons(self, show_id):
"""Get the seasons belonging to show with specified ID.
Args:
show_id (int): ID of the show.
Returns:
list: A list of :class:`~rt_api.models.Season` objects.
"""
return self.__get_multiple(models.Season, "shows/{0}/seasons/".format(show_id))
def show(self, show_id):
"""Return show with given id.
Args:
show_id (int): ID of the show to retrieve.
Returns:
Show: Show instance.
"""
return self.__build_response("shows/{0}".format(show_id), models.Show)
def shows(self, site=None, page=1, count=20):
"""Return an iterable feed of :class:`Shows <rt_api.models.Show>`.
This will return an iterable, which starts at the specified page,
and can be iterated over to retrieve all shows onwards.
Under the hood, as this is iterated over, new pages are fetched from the API.
Therefore, the size of ``count`` will dictate the delay this causes.
A larger ``count`` means larger delay, but fewer total number of
pages will need to be fetched.
Args:
site (str): Only return shows from specified site, or all sites if None.
page (int): The page to start from (Default value = 1).
count (int): Number of Shows per page (Default value = 20).
Returns:
iterable: An iterable collection of :class:`Shows <rt_api.models.Show>`.
Example::
r = rt_api()
show_feed = r.shows(site="theKnow")
for show in show_feed:
print(show)
"""
# TODO 'site' should be an Enum?
return self.__pager(models.Show, "shows/", count=count, page=page, site=site)
def user(self, user_id):
"""Retrieve the User with the specified id.
Args:
user_id (int): ID of the user to retrieve.
Returns:
User: User instance.
"""
return self.__build_response("users/{0}".format(user_id), models.User)
@authenticated
def update_user_details(self, user_id, **kwargs):
"""Update the details of the user with the specified id.
You must be authenticated as the user to be updated.
Attributes should be specified as keyword arguments.
Possible keyword arguments:
displayTitle,
name,
sex,
location,
occupation,
about
Note:
All attributes will be updated. If an attribute is not specified,
the remote end assumes it to be empty and sets it as such.
Args:
user_id (int): ID of the user to update.
Raises:
NotAuthenticatedError: if not currently authenticated as a user,
or this is attempted on a user not authenticated as.
"""
if user_id != self.user_id:
# Attempting to update a user we are not authenticated as.
# This will result in a 401 response, so don't bother sending request.
raise NotAuthenticatedError
path = "users/{0}".format(user_id)
url = posixpath.join(constants.END_POINT, path)
data = kwargs
response = self.__session.put(url, data=data)
response.raise_for_status()
# Update 'me' user with new details
# TODO update existing user object instead of creating new one and replacing reference
self._me = models.User(response.json(), self)
def user_queue(self, user_id, page=1, count=20):
# TODO add more explanation about how iterable works (see shows() doc)
"""Retrieve the episodes in specified user's queue.
Args:
user_id (int): The ID of the user to get the queue of.
page (int): The page to start from (Default value = 1).
count (int): Number of Episodes per page (Default value = 20).
Returns:
iterable: Iterable of :class:`~rt_api.models.Episode` instances.
"""
return self.__pager(models.Episode, "users/{0}/queue".format(user_id), page=page, count=count)
@authenticated
def add_episode_to_queue(self, episode_id):
"""Add specified episode to current user's queue.
Args:
episode_id (int): ID of the episode to add to user's queue.
Returns:
str: Success message from API or None.
Raises:
NotAuthenticatedError: if not currently authenticated as a user.
"""
path = "episodes/{0}/add-to-queue".format(episode_id)
url = posixpath.join(constants.END_POINT, path)
response = self.__session.post(url)
response.raise_for_status()
# Mark user queue as needing refresh
self.me.queue_dirty = True
return response.headers.get("X-Message")
@authenticated
def remove_episode_from_queue(self, episode_id):
"""Remove specified episode from current user's queue.
Args:
episode_id (int): ID of the episode to remove from user's queue.
Returns:
str: Success message from API or None.
Raises:
NotAuthenticatedError: if not currently authenticated as a user.
"""
path = "episodes/{0}/remove-from-queue".format(episode_id)
url = posixpath.join(constants.END_POINT, path)
response = self.__session.delete(url)
response.raise_for_status()
# Mark user queue as needing refresh
self.me.queue_dirty = True
return response.headers.get("X-Message")
@authenticated
def mark_episode_watched(self, episode_id):
"""Mark the specified episode as having been watched by the current user.
Args:
episode_id (int): ID of the episode to mark as having been watched.
"""
path = "episodes/{0}/mark-as-watched".format(episode_id)
url = posixpath.join(constants.END_POINT, path)
response = self.__session.put(url)
response.raise_for_status()
def search(self, query, include=None):
"""Perform a search for the specified query.
Currently only supports searching for Episodes, Shows, and Users.
Unfortunately, the Api only returns up to 10 of each resource type.
Args:
query (str): The value to search for.
include (list, optional): A list of types to include in the results (Default value = None).
If ``include`` is specified, only objects of those types will be returned in the results.
Example:
Search for "funny", only in shows and episodes.
.. code-block:: python
search("funny", include=[rt_api.models.Show, rt_api.models.Episode])
Returns:
list: The search results.
"""
url = posixpath.join(constants.END_POINT, "search/?q={0}".format(query))
data = self.__get_data(url)
mapping = {
"episodes": models.Episode,
"shows": models.Show,
"users": models.User
}
items = []
for result_set in data:
# Try to find corresponding model for this result type
model_key = None
for result_type in mapping:
if result_type in result_set.keys():
model_key = result_type
break
if model_key:
# Check if we are doing any filtering
if include and mapping[model_key] not in include:
# This model is not in 'include', so skip it
continue
for item in result_set[model_key]:
items.append(mapping[model_key](item))
return items
class AuthenticationError(Exception):
"""Raised when an error is encountered while performing authentication."""
pass
class NotAuthenticatedError(Exception):
"""Raised if an action requiring authentication is attempted but no account is authenticated."""
pass | /rt_api-1.1.1.tar.gz/rt_api-1.1.1/rt_api/api.py | 0.718496 | 0.231462 | api.py | pypi |
from urllib.parse import quote_plus, urlencode
from rt_client.common import utils
from rt_client import exceptions
# Valid record types
RECORD_TYPES = (
"ticket",
"queue",
"asset",
"user",
"group",
"catalog",
"attachment",
"customfield",
"customrole",
)
class RecordManager(object):
def __init__(self, client, record_type):
"""
Generic Record Manager.
Args:
client (RTClient): A valid RTClient instance.
record_type (str): String value present in RECORD_TYPES.
InvalidRecordException: If the record_type is not in RECORD_TYPES.
"""
if record_type not in RECORD_TYPES:
raise ValueError(f"Invalid record type: {record_type}")
self.record_type = record_type
self.client = client
def create(self, attrs):
"""
Generic record creation.
Args:
attrs (dict): A dictionary of attributes for the record.
Returns:
json dict of attributes.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
return self.client.post(self.record_type, attrs)
def get(self, record_id):
"""
Generic record retrieval.
Args:
record_id (str): The id code of the specific record to retrieve.
Returns:
json dict of attributes.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
return self.client.get(f"{self.record_type}/{record_id}")
def get_all(self, fields=None, order_by=None, order="DESC", page=1, per_page=20):
"""
Generic record archive retrieval.
Args:
self.record_type (str): Record type from RECORD_TYPES,
e.g. 'ticket', 'queue', 'asset', 'user', 'group', 'attachment'
fields (dict/list/string, optional): A value representing the fields or
subfields wanted from the record. Expected format is the form of:
- "FieldA"
- ["FieldA", "FieldB", "FieldC"]
- {"FieldA": "SubfieldA"}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"}}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"},
{"FieldC": {"SubfieldC": "Sub-subfieldC"}}
- ["FieldA", {"FieldA": "SubfieldA"}]
order_by (str, optional): A field to sort records by.
order (str, optional): The order to sort results in. 'ASC' or 'DESC'.
Defaults to 'DESC'
page (int, optional): The page number, for paginated results.
Defaults to the first (1) page.
per_page (int, optional): Number of results per page. Defaults
to 20 records per page, maximum value of 100.
Returns:
JSON dict in the form of the example below:
{
"count": 20,
"page": 1,
"per_page": 20,
"total": 3810,
"items": [
{…},
{…},
…
]
}
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
payload = {"page": page, "per_page": per_page}
if fields:
payload.update(utils.build_fields_query(fields))
if order_by:
payload.update({"orderby": order_by, "order": order})
query_string = urlencode(payload, quote_via=quote_plus)
return self.client.get(f"{self.record_type}s/all?{query_string}")
def update(self, record_id, attrs):
"""
Generic record update.
Args:
record_id (str): The id code of the specific record to update.
attrs (dict): A dictionary of attributes with updated values.
Returns:
Array containing a string with confirmation of update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
return self.client.put(f"{self.record_type}/{record_id}", attrs)
def delete(self, record_id):
"""
Generic record deletion.
Args:
record_id (str): The id code of the specific record to delete.
Returns:
Array containing a string with confirmation of deletion.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
return self.client.delete(f"{self.record_type}/{record_id}")
def search(
self,
search_terms,
fields=None,
page=1,
per_page=20,
order_by=None,
order="DESC",
):
"""
Generic record search.
Args:
search_terms (array of dict): An array of dicts containing
the keys "field", "operator" (optional), and "value."
Example:
[
{ "field": "Name",
"operator": "LIKE",
"value": "Engineering" },
{ "field": "Lifecycle",
"value": "helpdesk" }
]
fields (dict/list/string, optional): A value representing the fields or
subfields wanted from the record. Expected format is the form of:
- "FieldA"
- ["FieldA", "FieldB", "FieldC"]
- {"FieldA": "SubfieldA"}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"}}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"},
{"FieldC": {"SubfieldC": "Sub-subfieldC"}}
- ["FieldA", {"FieldA": "SubfieldA"}]
page (int, optional): The page number, for paginated results.
Defaults to the first (1) page.
per_page (int, optional): Number of results per page. Defaults
to 20 records per page, maximum value of 100.
order_by (str, optional): A field to sort records by.
order (str, optional): The order to sort results in. 'ASC' or 'DESC'.
Defaults to 'DESC'
Returns:
JSON dict in the form of the example below:
{
"count": 20,
"page": 1,
"per_page": 20,
"total": 3810,
"items": [
{…},
{…},
…
]
}
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
search_terms.extend(
[{"field": "page", "value": page}, {"field": "per_page", "value": per_page}]
)
payload = {}
if fields:
payload.update(utils.build_fields_query(fields))
if order_by:
payload.update({"orderby": order_by, "order": order})
query_string = urlencode(payload, quote_via=quote_plus)
return self.client.post(
f"{self.record_type}s?{query_string}", content=search_terms
)
def history(self, record_id, fields=None, page=1, per_page=20):
"""
Generic history retrieval.
Args:
record_id (str): The id code of the specific record.
fields (dict/list/string, optional): A value representing the fields or
subfields wanted from the record. Expected format is the form of:
- "FieldA"
- ["FieldA", "FieldB", "FieldC"]
- {"FieldA": "SubfieldA"}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"}}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"},
{"FieldC": {"SubfieldC": "Sub-subfieldC"}}
- ["FieldA", {"FieldA": "SubfieldA"}]
page (int, optional): The page number, for paginated results.
Defaults to the first (1) page.
per_page (int, optional): Number of results per page. Defaults
to 20 records per page, maximum value of 100.
Returns:
JSON dict in the form of the example below:
{
"count" : 20,
"page" : 1,
"per_page" : 20,
"total" : 3810,
"items" : [
{ … },
{ … },
…
]
}
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
payload = {"page": page, "per_page": per_page}
if fields:
payload.update(utils.build_fields_query(fields))
endpoint = f"{self.record_type}/{record_id}/history?"
endpoint += urlencode(payload, quote_via=quote_plus)
return self.client.get(endpoint)
def _not_supported_msg(self, operation):
err_message = f"{operation.title()} is not supported "
err_message += f"for record type {self.record_type} due to RT API limitations."
return err_message
class LimitedRecordManager(RecordManager):
def get_all(self, *args, **kwargs):
raise exceptions.UnsupportedOperation(self._not_supported_msg("get all"))
def create(self, *args, **kwargs):
raise exceptions.UnsupportedOperation(self._not_supported_msg("create"))
def update(self, *args, **kwargs):
raise exceptions.UnsupportedOperation(self._not_supported_msg("update"))
def delete(self, *args, **kwargs):
raise exceptions.UnsupportedOperation(self._not_supported_msg("delete"))
def history(self, *args, **kwargs):
raise exceptions.UnsupportedOperation(self._not_supported_msg("history")) | /rt-client-0.5.0.tar.gz/rt-client-0.5.0/rt_client/v2/record_manager.py | 0.856528 | 0.313866 | record_manager.py | pypi |
import os
import json
from urllib.parse import quote_plus, urlencode
import magic
from rt_client import exceptions
from rt_client.common import utils
from rt_client.v2.client import RecordManager
# Ticket Statuses
STATUS_TYPES = ("new", "open", "stalled", "resolved", "rejected", "deleted")
CLOSED_STATUS = ("resolved", "rejected", "deleted")
class TicketManager(RecordManager):
record_type = "ticket"
def __init__(self, client):
self.client = client
def get_all(self, *args, **kwargs):
raise exceptions.UnsupportedError(self._not_supported_msg("get all"))
def _prep_attachments(self, attachments):
files = []
for attachment in attachments:
files.append(
(
"Attachments",
(
os.path.basename(attachment),
open(attachment, "rb"),
magic.from_file(attachment, mime=True),
),
)
)
return files
def create(self, attrs, attachments=None):
"""
ticket creation.
Args:
attrs (dict): A dictionary of attributes for the record.
attachments (array, optional): Files to attach, as full path filenames.
Defaults to None.
Returns:
json dict of attributes.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
if "ContentType" not in attrs:
attrs["ContentType"] = "text/plain"
if attachments:
files = [("JSON", (None, json.dumps(attrs), "application/json"))]
files += self._prep_attachments(attachments)
return self.client.post_files(self.record_type, files)
else:
return self.client.post(self.record_type, attrs)
def update(self, record_id, attrs, attachments=None):
"""
ticket update.
Args:
record_id (str): The id code of the specific record to update.
attrs (dict): A dictionary of attributes with updated values.
attachments (array, optional): Files to attach. Defaults to None.
Returns:
Array containing a string with confirmation of update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
if attachments:
files = [("JSON", (None, json.dumps(attrs), "application/json"))]
files += self._prep_attachments(attachments)
return self.client.post_files(f"{self.record_type}/{record_id}", files)
else:
return self.client.put(f"{self.record_type}/{record_id}", attrs)
def bulk_create(self, data):
""" For the creation of multiple tickets in a single request """
# TODO Testing
return self.client.post("/tickets/bulk", data)
def bulk_update(self, data):
""" For making changes to multiple tickets in a single request """
# TODO Testing
return self.client.put("/tickets/bulk", data)
def reply(self, ticket_id, attrs, attachments=None):
"""
Reply to a ticket, include email update to correspondents.
Args:
ticket_id (str): The id code of the specific ticket to reply.
attrs (dict): A dictionary containing keys "Subject", "Content",
and optionally "Cc" and "Bcc" fields.
attachments (array, optional): Files to attach. Defaults to None.
Returns:
Array containing a string with confirmation of update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
if "ContentType" not in attrs:
attrs["ContentType"] = "text/plain"
if attachments:
files = [("JSON", (None, json.dumps(attrs), "application/json"))]
files += self._prep_attachments(attachments)
return self.client.post_files(f"ticket/{ticket_id}/correspond", files)
else:
return self.client.post(f"ticket/{ticket_id}/correspond", attrs)
def comment(self, ticket_id, comment, attachments=None):
"""
Add a comment to an existing ticket. Comments are for internal
use and not visible to clients.
Args:
ticket_id (str): The id code of the specific ticket to reply.
comment (str): The string content of the comment to be added.
attachments (array, optional): Files to attach. Defaults to None.
Returns:
Array containing a string with confirmation of update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
if attachments:
content = {"id": ticket_id, "Action": "comment", "Text": comment}
files = [("JSON", (None, json.dumps(content), "application/json"))]
files += self._prep_attachments(attachments)
return self.client.post_files(f"ticket/{ticket_id}/comment", files)
else:
# Because this endpoint needs a text/plain content type,
# it calls client.sess.post directly, rather than going through
# client.post like most other methods.
return self.client.post(
self.client.host + f"ticket/{ticket_id}/comment",
data=comment,
headers={"Content-Type": "text/plain"},
)
def close(self, ticket_id, reject=False):
"""
'Close' a ticket. The default status used for closing is "Resolved"
though "Rejected" can be selected instead via an optional parameter.
Args:
ticket_id (str): The id code of the specific ticket to close.
reject (bool, optional): Optionally close as "Rejected" rather
than "Resolved."
Returns:
Array containing a string with confirmation of status update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
closed = "rejected" if reject else "resolved"
return self.update(ticket_id, {"Status": closed})
def reopen(self, ticket_id):
"""
Change a ticket's status to open.
Args:
ticket_id (str): The id code of the specific ticket to reopen.
Returns:
Array containing a string with confirmation of status update.
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
return self.update(ticket_id, {"Status": "open"})
def change_status(self, ticket_id, new_status):
"""
Change a given ticket's status to specified value.
Args:
ticket_id (str): The id code of the specific ticket to reopen.
new_status (str): A valid ticket state as a string. Valid states
include: "new", "open", "blocked", "stalled", "resolved", and
"rejected".
Returns:
Array containing a string with confirmation of status update.
Raises:
ValueError: If the status does not match a valid existing status.
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
if new_status in STATUS_TYPES:
return self.update(ticket_id, {"Status": new_status})
else:
raise ValueError(f"Invalid ticket status type {new_status}.")
def history(
self,
ticket_id,
fields="Data,Type,Creator,Created",
page=1,
per_page=20,
order_by=None,
order="DESC",
):
"""
retrieve transactions related to a specific ticket.
Args:
ticket_id (str): The id code of the ticket.
fields (dict/list/string, optional): A value representing the fields or
subfields wanted from the record. Expected format is the form of:
- "FieldA"
- ["FieldA", "FieldB", "FieldC"]
- {"FieldA": "SubfieldA"}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"}}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"},
{"FieldC": {"SubfieldC": "Sub-subfieldC"}}
- ["FieldA", {"FieldA": "SubfieldA"}]
page (int, optional): The page number, for paginated results.
Defaults to the first (1) page.
per_page (int, optional): Number of results per page. Defaults
to 20 records per page, maximum value of 100.
order_by (str, optional): A field to sort records by.
order (str, optional): The order to sort results in. 'ASC' or 'DESC'.
Defaults to 'DESC'
Returns:
JSON dict in the form of the example below:
{
"count" : 20,
"page" : 1,
"per_page" : 20,
"total" : 3810,
"items" : [
{ … },
{ … },
…
]
}
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
payload = {"page": page, "per_page": per_page}
if fields:
payload.update(utils.build_fields_query(fields))
if order_by:
payload.update({"orderby": order_by, "order": order})
endpoint = f"{self.record_type}/{ticket_id}/history?"
endpoint += urlencode(payload, quote_via=quote_plus)
return self.client.get(endpoint)
def search(
self,
search_query,
fields=None,
simple_search=False,
page=1,
per_page=20,
order_by=None,
order="DESC",
):
"""
Search for tickets using TicketSQL.
Args:
search_query (str): The query string in TicketSQL.
Example: '(Status = "new" OR Status = "open") AND Queue = "General"'
See https://rt-wiki.bestpractical.com/wiki/TicketSQL for more
detailed information.
fields (dict/list/string, optional): A value representing the fields or
subfields wanted from the record. Expected format is the form of:
- "FieldA"
- ["FieldA", "FieldB", "FieldC"]
- {"FieldA": "SubfieldA"}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"}}
- {"FieldA": {"SubfieldA": "Sub-subfieldA"},
{"FieldC": {"SubfieldC": "Sub-subfieldC"}}
- ["FieldA", {"FieldA": "SubfieldA"}]
simple_search (bool, optional): When True use simple search syntax,
when False use TicketSQL.
order_by (str, optional): A field to sort records by.
order (str, optional): The order to sort results in. 'ASC' or 'DESC'.
Defaults to 'DESC'
page (int, optional): The page number, for paginated results.
Defaults to the first (1) page.
per_page (int, optional): Number of results per page. Defaults
to 20 records per page, maximum value of 100.
Returns:
JSON dict in the form of the example below:
{
"count" : 20,
"page" : 1,
"per_page" : 20,
"total" : 3810,
"items" : [
{ … },
{ … },
…
]
}
Raises:
See Python Requests docs at
http://docs.python-requests.org/en/master/_modules/requests/exceptions/
"""
payload = {
"query": search_query,
"simple": 1 if simple_search else 0,
"page": page,
"per_page": per_page,
}
if fields:
payload.update(utils.build_fields_query(fields))
if order_by:
payload.update({"orderby": order_by, "order": order})
search_endpoint = "tickets?" + urlencode(payload, quote_via=quote_plus)
return self.client.get(search_endpoint) | /rt-client-0.5.0.tar.gz/rt-client-0.5.0/rt_client/v2/tickets.py | 0.601359 | 0.212886 | tickets.py | pypi |
def _build_subfields_query(subfields):
root_fields = []
subfield_payload = {}
for subfield in subfields.items():
root_fields.append(subfield[0])
keys, value = _build_subfield_query({subfield[0]: subfield[1]})
keys = [f"[{k}]" for k in keys]
subfield_payload[f"fields{''.join(keys)}"] = value
return root_fields, subfield_payload
def _build_subfield_query(subfield):
if isinstance(subfield, dict):
keys = list(subfield.keys())
if len(keys) != 1:
raise TypeError("Incorrectly formatted subfield query dict.")
recursive_val = _build_subfield_query(subfield[keys[0]])
if isinstance(recursive_val, tuple):
keys += recursive_val[0]
recursive_val = recursive_val[1]
return keys, recursive_val
elif isinstance(subfield, (list, set, tuple)):
return ",".join(subfield)
elif isinstance(subfield, (str)):
return subfield
raise TypeError(f"Unsupported type in subfield query: '{type(subfield)}'")
def build_fields_query(fields):
"""Build the fields variable into the structure RT expects
RT expects:
{"field": "FieldA,FieldB,FieldC...",
"field[FieldA]": "Subfield1...",
"field[FieldA][Subfield1]": "Sub-subfield..."}
But because we really shouldn't be working with those kinds
of keys directly, we allow passing in field queries as lists and
nested dicts, that we then parse into that formatself.
Example input:
- 'FieldA'
- ['FieldA', 'FieldB', 'FieldC']
- {'FieldA': {'SubfieldA': 'Sub-subfieldA'}}
- ["FieldA", "FieldB", {"FieldC": {"SubfieldC": "Sub-subfieldC"}}]
- {'FieldA': {'SubfieldA': {'Sub-subfieldA': 'Sub-sub-subfieldA'}}}
- ['FieldA', 'FieldB', {'FieldC': {'SubfieldC': 'Sub-subfieldC'}}]
"""
payload = {}
if isinstance(fields, dict):
root_fields, subfield_payload = _build_subfields_query(fields)
# NOTE(adriant): For testing and consistency, we want the fields
# sorted, but a set breaks that, so we do this.
unique_non_sub_fields = sorted(list(set(root_fields)))
payload["fields"] = ",".join(unique_non_sub_fields)
payload.update(subfield_payload)
elif isinstance(fields, (list, set, tuple)):
non_sub_fields = []
for f in fields:
if isinstance(f, dict):
root_fields, subfield_payload = _build_subfields_query(f)
non_sub_fields += root_fields
payload.update(subfield_payload)
elif isinstance(f, (list, set, tuple)):
non_sub_fields += list(f)
else:
non_sub_fields.append(f)
# NOTE(adriant): For testing and consistency, we want the fields
# sorted, but a set breaks that, so we do this.
unique_non_sub_fields = sorted(list(set(non_sub_fields)))
payload["fields"] = ",".join(unique_non_sub_fields)
else:
payload["fields"] = fields
return payload | /rt-client-0.5.0.tar.gz/rt-client-0.5.0/rt_client/common/utils.py | 0.561936 | 0.191441 | utils.py | pypi |
# rt_opt: Run-and-tumble global optimizer
Metaheuristic global optimization algorithm combining a bacterial run-and-tumble chemotactic search
(see, e.g., [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5418374/)) with a local,
gradient-based search around the best minimum candidate points.
The algorithm's goal is to find the global minimum of an objective function f over a (possibly
bounded) region Ω, that is, to find
<p align="center">
min f(x), x ∈ Ω, <br/>
f: Ω ⊂ ℝ<sup>n</sup> → ℝ.
</p>
For the local, gradient-based search, a bounded BFGS algorithm is used.
Since the chemotactic search becomes more and more ineffective with increasing problem
dimensionality,
[Sequential Random Embeddings](http://www.lamda.nju.edu.cn/huyq/papers/ijcai16-sre.pdf) are used
to solve the optimization problem once its dimensionality exceeds a given threshold. The idea of
Sequential Random Embeddings is basically to reduce high-dimensional problems to low-dimensional
ones by embedding the original, high-dimensional search space ℝ<sup>n</sup> into a low dimensional
one, ℝ<sup>m</sup>, by sequentially applying the random linear transformation
<p align="center">
x<sub>0</sub> = 0<br>
x<sub>k+1</sub> = α<sub>k+1</sub>x<sub>k</sub> + A • y<sub>k+1</sub>,<br>
x ∈ ℝ<sup>n</sup>, y ∈ ℝ<sup>m</sup>,
A ∈ N(0, 1)<sup>n×m</sup>, α ∈ ℝ,
</p>
and minimizing the objective function f(α<sub>k+1</sub>x<sub>k</sub> + A • y<sub>k+1</sub>) w.r.t.
(α<sub>k+1</sub>, y<sub>k+1</sub>) for each k in a given range.
## Installation
rt_opt can be most conveniently installed via pip:
```
pip install rt_opt
```
## Usage
For a quick start, try to find the global minimum of the
[Eggholder function](https://www.sfu.ca/~ssurjano/egg.html) within the default square
x<sub>i</sub> ∈ [-512, 512] ∀ i = 1, 2:
```
import time
import numpy as np
from rt_opt.optimizer import optimize
from rt_opt.testproblems import Eggholder
problem = Eggholder()
box_bounds = np.vstack((problem.bounds.lower, problem.bounds.upper)).T
start = time.time()
ret = optimize(problem.f, bounds=box_bounds)
end = time.time()
print(f'Function minimum found at x = {ret.x}, yielding f(x) = {ret.fun}.')
print(f'Optimization took {end - start:.3f} seconds.')
print(f'Optimization error is {np.abs(ret.fun - problem.min.f)}.')
>>> Function minimum found at x = [512. 404.23180623], yielding f(x) = -959.6406627208495.
>>> Optimization took 0.097 seconds.
>>> Optimization error is 1.3642420526593924e-12.
```
### Non-rectangular bounds
If your optimization problem involves an arbitrary, non-rectangular bounded domain, you may as well
provide a custom bounds callback. Every such callback must return a tuple (x_projected, bounds_hit),
where x_projected is the input variable x projected to the defined search region. That is, if x is
within this region, it is returned unchanged, whereas if it is outside this region, it is projected
to the region's boundaries. The second output, bounds_hit, indicates whether the boundary has been
hit for each component of x. If, for example, x is three-dimensional and has hit the search region's
boundaries in x<sub>1</sub> and x<sub>2</sub>, but not in x<sub>3</sub>,
bounds_hit = [True, True, False]. Here, we define a "boundary hit" in any component of x in the
following way:<br>
bounds_hit[i] = True iff either x + δê<sub>i</sub> or x - δê<sub>i</sub> is outside
the defined search domain ∀ δ ∈ ℝ⁺, where ê<sub>i</sub> is the i-th unit vector.
In the following example, we try to find the global minimum of the Eggholder function within a
circle with radius 512 around the origin:
```
import numpy as np
import matplotlib.pyplot as plt
from rt_opt.optimizer import optimize
from rt_opt.testproblems import Eggholder
RADIUS = 512
def bounds_circle(x):
"""
Callback function for defining a circular bounded domain with a given radius around the origin.
"""
bounds_hit = np.zeros(len(x), dtype=bool)
if np.sqrt(np.sum(np.square(x))) > RADIUS: # x outside of the circle
x = x / (np.sqrt(np.sum(np.square(x)))) * RADIUS
bounds_hit = np.ones(len(x), dtype=bool)
return x, bounds_hit
def gridmap2d(fun, x_specs, y_specs):
"""
Helper function for plotting the objective function.
"""
grid_x = np.linspace(*x_specs)
grid_y = np.linspace(*y_specs)
arr_z = np.empty(len(grid_x) * len(grid_y))
i = 0
for y in grid_y:
for x in grid_x:
arr_z[i] = fun(np.array([x, y]))
i += 1
arr_x, arr_y = np.meshgrid(grid_x, grid_y)
arr_z.shape = arr_x.shape
return arr_x, arr_y, arr_z
problem = Eggholder()
ret = optimize(problem.f, x0=[0, 0], bounds=bounds_circle, domain_scale=RADIUS)
print(f'Function minimum found at x = {ret.x}, yielding f(x) = {ret.fun}.')
# Plot objective function and optimizer traces
fig, ax = plt.subplots()
X, Y, Z = gridmap2d(problem.f, (-512, 512, 100), (-512, 512, 100))
cp = ax.contourf(X, Y, Z, levels=20)
ax.set_xlim([-512, 512])
ax.set_ylim([-512, 512])
fig.colorbar(cp)
for single_trace in ret.trace.transpose(1, 0, 2):
ax.plot(single_trace[:, 0], single_trace[:, 1], 'o', c='white', ms=0.4)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(f'Problem: Eggholder', fontsize=10)
plt.show()
>>> Function minimum found at x = [418.56055019 171.04305027], yielding f(x) = -629.6336812770477.
```

## Performance
For a performance comparison of rt_opt with other global optimization algorithms, namely
*differential_evolution* and *dual_annealing* from scipy, as well as dlib's
[LIPO]( http://blog.dlib.net/2017/12/a-global-optimization-algorithm-worth.html )-based
*find_min_global*, see [here](demo/results). The comparison results were obtained by running
[demo.py](demo/demo.py), where each of these four algorithms was evaluated 100 times with default
parameters on a couple of 2D and 15D test functions (details on these functions can be found
[here](https://en.wikipedia.org/wiki/Test_functions_for_optimization)).
| /rt_opt-0.1.1.tar.gz/rt_opt-0.1.1/README.md | 0.761893 | 0.913213 | README.md | pypi |
import itertools
import warnings
import numpy as np
from scipy import spatial, special
from scipy.optimize import OptimizeResult
from rt_opt.global_search import run_and_tumble
from rt_opt.local_search import bfgs_b
def _prepare_bounds(bounds, n_dims):
"""
Check size and validity of a rectangular bounds object, and turn it into the required format for
the following calculations.
:param bounds: [array-like object] Rectangular bounds object
:param n_dims: [int] Dimensionality of the problem
:return: (bound_lower [np.array], bound_upper [np.array])
"""
if bounds is not None:
bounds = np.array(bounds)
if n_dims is not None:
assert bounds.shape == (n_dims, 2), ('bounds has wrong shape. Expected shape: ' +
'(n_dims, 2), where n_dims is the ' +
'dimensionality of the problem.')
bound_lower = bounds[:, 0]
bound_upper = bounds[:, 1]
assert (bound_upper > bound_lower).all(), ('Upper bound must always be larger than lower ' +
'bound.')
return bound_lower, bound_upper
else:
assert n_dims is not None, 'If bounds is None, n_dims must be provided.'
return np.repeat(-np.inf, n_dims), np.repeat(np.inf, n_dims)
def _prepare_x0(x0, n_bacteria_per_dim, max_dims, n_reduced_dims_eff):
"""
Check and prepare initial conditions object x0. If x0 is a vector, that is, if it has the shape
(n_dims,) it is duplicated times the total number of bacteria, which is given by
i) n_bacteria = n_bacteria_per_dim ** n_dims if n_dims <= max_dims or
ii) n_bacteria = n_bacteria_per_dim ** n_reduced_dims_eff if n_dims > max_dims.
:param x0: [array-like object] Initial conditions object. Must have the shape
(n_bacteria, n_dims) or (n_dims,)
:param n_bacteria_per_dim: [int] Number of bacteria for each dimension
:param max_dims: [int] Maximum dimension of problems to be solved without using Sequential
Random Embeddings
:param n_reduced_dims_eff: [int] Number of effective reduced dimensions used by the Sequential
Random Embeddings algorithm
:return: Initial conditions for all bacteria [np.array of shape (n_bacteria, n_dims)]
"""
x0 = np.array(x0)
if len(x0.shape) == 1:
n_dims = x0.shape[0]
n_bacteria = (n_bacteria_per_dim ** n_dims if n_dims <= max_dims else
n_bacteria_per_dim ** n_reduced_dims_eff)
x0_population = np.tile(x0, (n_bacteria, 1))
elif len(x0.shape) == 2:
n_dims = x0.shape[1]
n_bacteria = x0.shape[0]
n_bacteria_target = (n_bacteria_per_dim ** n_dims if n_dims <= max_dims else
n_bacteria_per_dim ** n_reduced_dims_eff)
if n_bacteria != n_bacteria_target:
warnings.warn('The number of bacteria given by x0 does not match the number of ' +
'bacteria given by the relation ' +
'n_bacteria = n_bacteria_per_dim ** n_dims if n_dims <= max_dims else ' +
'n_bacteria_per_dim ** (n_reduced_dims + 1). The latter implies that ' +
f'n_bacteria = {n_bacteria_target}, whereas the former implies ' +
f'that n_bacteria = {n_bacteria}. Using n_bacteria = {n_bacteria}.')
x0_population = x0.copy()
else:
raise ValueError('x0 must be an array of either the shape (n_bacteria, n_dims) or ' +
'(n_dims,).')
return x0_population
def _pad_trace(trace, targetLength):
"""
Pad single-bacteria trace to given length.
:param trace: [np.array] Single-bacteria trace
:param targetLength: [int] Desired length
:return: Padded trace [np.array]
"""
currentLength = trace.shape[0]
paddingLength = (targetLength - currentLength)
return np.pad(trace, [(0, paddingLength), (0, 0)], mode="edge")
def _sequential_random_embeddings(f, x0, bounds, n_reduced_dims_eff=3, n_embeddings=10,
verbosity=1, **optimizer_kwargs):
"""
Implementation of the Sequential Random Embeddings algorithm described in
+++++
H. Qian, Y.-Q. Hu, and Y. Yu, Derivative-Free Optimization of High-Dimensional Non-Convex
Functions by Sequential Random Embeddings, Proceedings of the Twenty-Fifth International Joint
Conference on Artificial Intelligence, AAAI Press (2016).
+++++
The idea is basically to reduce high-dimensional problems to low-dimensional ones by embedding
the original, high-dimensional search space ℝ^h into a low dimensional one, ℝ^l, by
sequentially applying the random linear transformation
x(n+1) = α(n+1)x(n) + A•y(n+1), x ∈ ℝ^h, y ∈ ℝ^l, A ∈ N(0, 1)^(h×l), α ∈ ℝ
and minimizing the objective function f(αx + A•y) w.r.t. (α, y).
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0: [np.array] Initial values for the bacteria population in the original,
high-dimensional space ℝ^h
:param bounds: [callable] Bounds projection, see description of parameter
``projection_callback`` in :func:`local_search.bfgs_b`
:param n_reduced_dims_eff: [int] Effective dimension of the embedded problem, ℝ^(l+1)
:param n_embeddings: [int] Number of embedding iterations
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:param optimizer_args: [dict] Arguments to pass to the actual optimization routine
:return: Best minimum of f found [scipy.optimize.OptimizeResult]
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
orig_dim = x0.shape[1]
x = np.zeros(orig_dim)
x_best = x.copy()
f_best = np.inf
nfev = nit = 0
success_best = False
for i in range(n_embeddings):
A = np.random.normal(size=(orig_dim, n_reduced_dims_eff - 1))
# Normalize rows of A
normalization_sum = A.sum(axis=1)
normalization_sum = np.where(normalization_sum == 0, 1, normalization_sum)
A = A / normalization_sum[:, np.newaxis]
def f_embedded(arg): return f(bounds(arg[0] * x + A.dot(arg[1:]))[0])
# Set up bounds callback
def bounds_embedded(arg):
bounds_hit = np.zeros(len(arg), dtype=bool)
x_proj, bounds_hit_orig = bounds(arg[0] * x + A.dot(arg[1:]))
if bounds_hit_orig.any(): # Boundary hit in original, non-embedded variable
arg[1:] = np.linalg.lstsq(A, x_proj - arg[0] * x, rcond=None)[0]
bounds_hit[1:] = (A[bounds_hit_orig] != 0).any(axis=0)
return arg, bounds_hit
# Set up y0
y0 = np.zeros((x0.shape[0], n_reduced_dims_eff))
y0[:, 0] = 1
y0[:, 1:] = np.array([np.linalg.lstsq(A, x_orig - x, rcond=None)[0] for x_orig in x0])
if verbosity > 0:
infoMsg = f'\nEmbedding iteration {i}'
print(infoMsg)
print('-' * len(infoMsg))
optimizer_kwargs['verbosity'] = verbosity
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message='Found initial conditions outside the defined search domain.'
)
res_embedded = optimize(f_embedded, x0=y0, bounds=bounds_embedded, **optimizer_kwargs)
y = res_embedded.x
f_val = res_embedded.fun
nfev += res_embedded.nfev
nit += res_embedded.nit
x = bounds(y[0] * x + A.dot(y[1:]))[0]
if verbosity > 0:
print(f'Random embedding gave x = {x}.')
if f_val < f_best:
f_best = f_val
x_best = x.copy()
success_best = res_embedded.success
result = OptimizeResult()
result.success = success_best
result.x = x_best
result.fun = f_best
result.nfev = nfev
result.nit = nit
result.trace = None
return result
def optimize(f, x0=None, bounds=None, domain_scale=None, init='uniform', stepsize_start=None,
stepsize_decay_fac=1e-3, base_tumble_rate=0.1, niter_rt=400, n_bacteria_per_dim=3,
stationarity_window=20, eps_stat=1e-3, attraction=False, attraction_window=10,
attraction_sigma=None, attraction_strength=0.5, bounds_reflection=False,
n_best_selection=3, c_gd=1e-6, a_gd=None, n_linesearch_gd=20, alpha_linesearch_gd=0.5,
beta_linesearch_gd=0.33, eps_abs_gd=1e-9, eps_rel_gd=1e-6, niter_gd=100,
n_embeddings=5, max_dims=3, n_reduced_dims=2, verbosity=0):
"""
Metaheuristic global optimization algorithm combining a bacterial run-and-tumble chemotactic
search with a local, gradient-based search around the best minimum candidate points.
The algorithm's goal is to find
min f(x), x ∈ Ω,
where f: Ω ⊂ ℝ^n → ℝ.
Since the chemotactic search becomes more and more ineffective with increasing problem
dimensionality, Sequential Random Embeddings are used to solve the optimization problem once its
dimensionality exceeds a given threshold.
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0: [array-like object] Optional initial conditions object. Must have the shape
(n_bacteria, n_dims) or (n_dims,). If x0 == None, initial conditions are sampled randomly
or uniformly-spaced from Ω. Note that this only works if Ω is a rectangular box, i.e., if
no or non-rectangular bounds are imposed, x0 must not be None
:param bounds: [callable or array-like object] Defines the bounded domain Ω. If provided, must
be one of the following:
- Bounds projection callback, as defined in description of parameter
``projection_callback`` in :func:`local_search.bfgs_b`
- Rectangular box constraints. For each component x_i of x,
bounds[i, 0] <= x_i <= bounds[i, 1], that is, bounds must have shape (n_dims, 2)
:param domain_scale: [float] Scale of the optimization problem. If not provided, the algorithm
tries to guess the scale from any provided rectangular box constraints. Used for
auto-scaling algorithm stepsizes
:param init: [string] Determines how initial bacteria positions are sampled from Ω if
x0 == None, see description of parameter ``x0``. Currently supported: 'random' and
'uniform'
:param stepsize_start: [float] See description of parameter ``stepsize_start`` in
:func:`global_search.run_and_tumble`. If not provided, the algorithm tries to auto-scale
this length to the problem's scale
:param stepsize_decay_fac: [float] Factor by which the run-and-tumble stepsize has decayed in
the last run-and-tumble iteration compared to its initial value
:param base_tumble_rate: [float] See description of parameter ``base_tumble_rate`` in
:func:`global_search.run_and_tumble`
:param niter_rt: [int] Maximum number of run-and-tumble iterations
:param n_bacteria_per_dim: [int] How many bacteria to spawn in each dimension. Note that the
total number of bacteria is
i) n_bacteria = n_bacteria_per_dim ** n_dims if n_dims <= max_dims or
ii) n_bacteria = n_bacteria_per_dim ** (n_reduced_dims + 1) if n_dims > max_dims.
If x0 is provided with shape (n_bacteria, n_dims), n_bacteria should agree with this
relation.
:param stationarity_window: [int] See description of parameter ``stationarity_window`` in
:func:`global_search.run_and_tumble`
:param eps_stat: [float] See description of parameter ``stationarity_window`` in
:func:`global_search.run_and_tumble`
:param attraction: [bool] See description of parameter ``attraction`` in
:func:`global_search.run_and_tumble`
:param attraction_window: [int] See description of parameter ``attraction_window`` in
:func:`global_search.run_and_tumble`
:param attraction_sigma: [float] See description of parameter ``attraction_sigma`` in
:func:`global_search.run_and_tumble`. If not provided, the algorithm tries to auto-scale
this length to the problem's scale
:param attraction_strength: [float] See description of parameter ``attraction_strength`` in
:func:`global_search.run_and_tumble`
:param bounds_reflection: [bool] See description of parameter ``bounds_reflection`` in
:func:`global_search.run_and_tumble`
:param n_best_selection: [int] At the end of the run-and-tumble exploration stage, a local
gradient-based search is performed, starting from the best positions found thus far by
the n_best_selection best bacteria
:param c_gd: [float] See description of parameter ``c`` in :func:`local_search.bfgs_b`
:param a_gd: [float] See description of parameter ``a`` in :func:`local_search.bfgs_b`. If not
provided, the algorithm tries to auto-scale this length to the problem's scale
:param n_linesearch_gd: [int] See description of parameter ``n_linesearch`` in
:func:`local_search.bfgs_b`
:param alpha_linesearch_gd: [float] See description of parameter ``alpha_linesearch`` in
:func:`local_search.bfgs_b`
:param beta_linesearch_gd: [float] See description of parameter ``beta_linesearch`` in
:func:`local_search.bfgs_b`
:param eps_abs_gd: [float] See description of parameter ``eps_abs`` in
:func:`local_search.bfgs_b`
:param eps_rel_gd: [float] See description of parameter ``eps_rel`` in
:func:`local_search.bfgs_b`
:param niter_gd: [int] Maximum number of local, gradient-based search iterations
:param n_embeddings: [int] Number of embedding iterations when using Sequential Random
Embeddings. Only has an effect if n_dims > max_dims
:param max_dims: [int] Maximum dimension of problems to be solved without using Sequential
Random Embeddings
:param n_reduced_dims: [int] Dimension of the embedded problem. Only has an effect if
n_dims > max_dims
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:return: Best minimum of f found [scipy.optimize.OptimizeResult]
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
assert n_reduced_dims >= 2, 'n_reduced_dims must not be less than 2.'
n_reduced_dims_eff = n_reduced_dims + 1
if bounds is None or callable(bounds):
assert x0 is not None, ('If no box constraints are provided for bounds, x0 must not be ' +
'None.')
x0_population = _prepare_x0(x0, n_bacteria_per_dim, max_dims, n_reduced_dims_eff)
n_bacteria, n_dims = x0_population.shape
if bounds is None:
bound_lower, bound_upper = _prepare_bounds(bounds, n_dims)
def projection_callback(x):
x = np.clip(x, bound_lower, bound_upper)
bounds_hit = np.where(((x == bound_lower) | (x == bound_upper)), True, False)
return x, bounds_hit
def projection_callback_population(x):
return projection_callback(x)
else:
def projection_callback(x):
return bounds(x)
def projection_callback_population(x):
out = np.array([projection_callback(x_single) for x_single in x])
return out[:, 0], out[:, 1]
elif isinstance(bounds, (list, np.ndarray)):
if x0 is not None:
x0_population = _prepare_x0(x0, n_bacteria_per_dim, max_dims, n_reduced_dims_eff)
n_bacteria, n_dims = x0_population.shape
bound_lower, bound_upper = _prepare_bounds(bounds, n_dims)
else:
bound_lower, bound_upper = _prepare_bounds(bounds, None)
n_dims = len(bound_lower)
n_bacteria = (n_bacteria_per_dim ** n_dims if n_dims <= max_dims else
n_bacteria_per_dim ** n_reduced_dims_eff)
if init == 'uniform' and n_dims > max_dims:
init = 'random'
if verbosity > 0:
warnings.warn('The option init="uniform" is only available for problems with ' +
'dimensionality less than or equal to max_dims, which was ' +
f'set to {max_dims}. Since the current problem has ' +
f'dimensionality {n_dims}, init was automatically set to ' +
f'"random".')
if init == 'random':
x0_population = np.random.uniform(bound_lower, bound_upper,
size=(n_bacteria, n_dims))
elif init == 'uniform':
init_points = []
for i in range(n_dims):
init_points.append(np.linspace(bound_lower[i], bound_upper[i],
n_bacteria_per_dim))
x0_population = np.array(np.meshgrid(*init_points)).reshape(n_dims, -1).T
else:
raise ValueError('init must either be "random" or "uniform".')
def projection_callback(x):
x = np.clip(x, bound_lower, bound_upper)
bounds_hit = np.where(((x == bound_lower) | (x == bound_upper)), True, False)
return x, bounds_hit
def projection_callback_population(x):
return projection_callback(x)
else:
raise ValueError('bounds must either be None, an array or corresponding nested list of ' +
'shape (n_dims, 2), or a custom callback function. See the docstring ' +
'for details.')
assert niter_rt > stationarity_window, 'niter_rt must be larger than stationarity_window.'
assert n_best_selection <= n_bacteria, 'n_best_selection must not be larger than n_bacteria.'
if stepsize_start is not None:
auto_scale_stepsize = False
else:
auto_scale_stepsize = True
stepsize_start = 1e-1
stepsize_end = stepsize_decay_fac * stepsize_start
if attraction_sigma is not None:
auto_scale_attraction_sigma = False
else:
auto_scale_attraction_sigma = True
attraction_sigma = 1
if a_gd is not None:
auto_scale_a_gd = False
else:
auto_scale_a_gd = True
a_gd = 1e-2
x0_population_orig = x0_population.copy()
x0_population, _ = projection_callback_population(x0_population)
if not np.array_equal(x0_population, x0_population_orig):
warnings.warn('Found initial conditions outside the defined search domain.')
max_scale = None
if domain_scale is not None:
max_scale = domain_scale
elif isinstance(bounds, (list, np.ndarray)):
# noinspection PyUnboundLocalVariable
domain_range = bound_upper - bound_lower
max_scale = np.max(np.where(np.isinf(domain_range), 0, domain_range))
if max_scale is not None and max_scale > 0:
if auto_scale_stepsize:
stepsize_start = stepsize_start * max_scale
stepsize_end = stepsize_end * max_scale
if auto_scale_attraction_sigma:
attraction_sigma = attraction_sigma * max_scale
if auto_scale_a_gd:
a_gd = a_gd * max_scale
if n_dims > max_dims:
if verbosity > 0:
print(f'Using sequential random embeddings in {n_reduced_dims} + 1 dimensions.')
return _sequential_random_embeddings(f,
x0_population,
projection_callback,
n_reduced_dims_eff=n_reduced_dims_eff,
n_embeddings=n_embeddings,
verbosity=verbosity,
domain_scale=max_scale,
init=init,
stepsize_start=stepsize_start,
stepsize_decay_fac=stepsize_decay_fac,
base_tumble_rate=base_tumble_rate,
niter_rt=niter_rt,
n_bacteria_per_dim=n_bacteria_per_dim,
stationarity_window=stationarity_window,
eps_stat=eps_stat,
attraction=attraction,
attraction_window=attraction_window,
attraction_sigma=attraction_sigma,
attraction_strength=attraction_strength,
bounds_reflection=bounds_reflection,
n_best_selection=n_best_selection,
c_gd=c_gd,
a_gd=a_gd,
n_linesearch_gd=n_linesearch_gd,
alpha_linesearch_gd=alpha_linesearch_gd,
beta_linesearch_gd=beta_linesearch_gd,
eps_abs_gd=eps_abs_gd,
eps_rel_gd=eps_rel_gd,
niter_gd=niter_gd,
max_dims=n_reduced_dims_eff)
else:
x_best, f_best, nfev, nit, trace = run_and_tumble(f,
x0_population,
projection_callback_population,
niter_rt,
stepsize_start,
stepsize_end,
base_tumble_rate=base_tumble_rate,
stationarity_window=stationarity_window,
eps_stat=eps_stat,
attraction=attraction,
attraction_window=attraction_window,
attraction_sigma=attraction_sigma,
attraction_strength=attraction_strength,
bounds_reflection=bounds_reflection,
verbosity=verbosity)
if verbosity == 2:
print('===============================================================================')
if verbosity > 0:
print(f'Best result after run-and-tumble stage is x = {x_best[np.argmin(f_best)]}, ' +
f'f(x) = {np.min(f_best)}. Starting local, gradient-based optimization for the ' +
f'{n_best_selection} best bacteria.')
sortIdx = f_best.argsort()
x_best_selection = x_best[sortIdx[:n_best_selection]]
x_best_gd = np.empty(x_best_selection.shape)
f_min_gd = np.empty(n_best_selection)
nfev_gd = 0
nit_gd = 0
success_gd = np.empty(n_best_selection)
trace_gd = np.empty((niter_gd, n_bacteria, n_dims))
trace_gd[:, sortIdx[n_best_selection:], :] = trace[-1, sortIdx[n_best_selection:], :]
nit_gd_arr = np.empty(n_best_selection)
visited_points = trace.reshape(-1, n_dims)
for n, x_start in enumerate(x_best_selection):
if verbosity == 2:
print(f'Performing gradient descent for bacterium {n}.')
# Calculate quadratic function approximation around x_start
num_sampling_points = 2 * int(special.binom(n_dims + 2, 2))
# noinspection PyArgumentList,PyUnresolvedReferences
sampling_points = visited_points[
spatial.cKDTree(visited_points).query(x_start, num_sampling_points)[1]
]
func_values = np.array([f(point) for point in sampling_points])
nfev += num_sampling_points
polynomial_powers = list(itertools.filterfalse(lambda prod: sum(list(prod)) > 2,
itertools.product((0, 1, 2),
repeat=n_dims)))
sampling_matrix = np.stack([np.prod(sampling_points ** d, axis=1)
for d in polynomial_powers], axis=-1)
coeffs = np.linalg.lstsq(sampling_matrix, func_values, 2)[0]
# Calculate Hessian matrix from the quadratic approximation
H = np.ones((n_dims, n_dims))
square_powers = list(itertools.filterfalse(
lambda zipped_item: sum(list(zipped_item[0])) != 2, zip(polynomial_powers, coeffs))
)
for square_power, coeff in square_powers:
idcs_to_consider = np.argwhere(np.array(square_power) != 0)
if len(idcs_to_consider) == 1: # Diagonal
H[idcs_to_consider[0], idcs_to_consider[0]] = 0.5 * coeff
elif len(idcs_to_consider) == 2: # Mixed derivatives
H[idcs_to_consider[0], idcs_to_consider[1]] = coeff
H[idcs_to_consider[1], idcs_to_consider[0]] = coeff
else:
raise RuntimeError("Polynomial function approximation seems to be of higher "
"order than two. This shouldn't happen.")
local_optimization_result = bfgs_b(f,
x_start,
projection_callback,
H_start=H,
a=a_gd,
c=c_gd,
niter=niter_gd,
n_linesearch=n_linesearch_gd,
alpha_linesearch=alpha_linesearch_gd,
beta_linesearch=beta_linesearch_gd,
eps_abs=eps_abs_gd,
eps_rel=eps_rel_gd,
verbosity=verbosity)
x_best_gd[n] = local_optimization_result.x
f_min_gd[n] = local_optimization_result.f
nfev_gd += local_optimization_result.nfev
nit_gd += local_optimization_result.nit
nit_gd_arr[n] = local_optimization_result.nit
success_gd[n] = local_optimization_result.success
trace_gd[:, sortIdx[n], :] = _pad_trace(local_optimization_result.trace, niter_gd)
result = OptimizeResult()
result.success = success_gd.any()
result.x = x_best_gd[np.argmin(f_min_gd)]
result.fun = np.min(f_min_gd)
result.nfev = nfev + nfev_gd
result.nit = nit + nit_gd
trace_gd = trace_gd[:np.max(nit_gd_arr).astype(int)]
result.trace = np.concatenate((trace, trace_gd))
return result | /rt_opt-0.1.1.tar.gz/rt_opt-0.1.1/rt_opt/optimizer.py | 0.83363 | 0.722851 | optimizer.py | pypi |
import numpy as np
from scipy.stats import stats
def run_and_tumble(f, x0_population, projection_callback, niter, stepsize_start, stepsize_end,
base_tumble_rate=0.1, stationarity_window=20, eps_stat=1e-3, attraction=False,
attraction_window=10, attraction_sigma=1, attraction_strength=0.5,
bounds_reflection=False, verbosity=1):
"""
Implementation of a bacterial run-and-tumble optimizer algorithm, motivated by the chemotactic
behavior of E.Coli. The motion of E.Coli consists of directed, ballistic "runs", interrupted by
sudden random re-orientations, so-called "tumbles", that appear at some given rate. If a
bacterium detects to swim toward higher concentrations of an attractant (i.e., if the attractant
concentration increases during a run), its tumbling rate is lowered, thus inducing an effective
movement toward the attractant's source.
Here, we implement a simplified E.Coli chemotaxis model, where the attractant concentration is
the negative of a given objective function.
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0_population: [np.array] Initial condition for the bacteria population.
x0_population.shape[0] defines the number of bacteria and x0_population.shape[1] the
problem dimensionality
:param projection_callback: [callable] Bounds projection, see description of parameter
``projection_callback`` in :func:`local_search.bfgs_b`
:param niter: [int] Maximum number of run-and-tumble steps
:param stepsize_start: [float] Defines the initial length of a "run" step
:param stepsize_end: [float] Defines the final length of a "run" step at the last iteration.
The actual stepsize decreases quadratically from stepsize_start to stepsize_end
:param base_tumble_rate: [float] "Undisturbed" tumble rate when a bacterium does not feel any
change in attractant concentration
:param stationarity_window: [int] If the mean position of all bacteria has had a relative change
less than eps_stat over a step window stationarity_window, the bacteria distribution is
considered to be stationary and the algorithm stops
:param eps_stat: [float] See description of parameter ``stationarity_window``
:param attraction: [bool] Whether the bacteria attract each other or not. We model bacteria
attraction the following way: Each bacterium is supposed to leave some kind of magic
attractant at the places it has visited thus far, that attracts all other bacteria
:param attraction_window: [int] Defines the number of recent positions in a bacterium's trace
that contributes to the attraction mechanism. We have to define this cut-off length,
since otherwise calculating the bacteria attractions becomes computationally very
expensive. This parameter only has an effect if attraction == True
:param attraction_sigma: [float] The bacterial attractant concentration is modeled to decay
according to a Gaussian distribution,
-----
attraction_strength / 2 / attraction_sigma ** 2
* exp(-(np.square(x - x_vis) / 2 / attraction_sigma ** 2),
-----
around each point x_vis visited thus far. This parameter only has an effect if
attraction == True
:param attraction_strength: [float] See description of parameter ``attraction_sigma``. Note that
if attraction_strength < 0, the bacterial attraction turns into a repulsion. This
parameter only has an effect if attraction == True
:param bounds_reflection: [bool] Whether bacteria reverse their direction when hitting a
boundary (True) or tumble randomly (False)
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:return: (x_best, f_best, nfev, nit, trace), where
- x_best [np.array] is the best x found so far,
- f_best [float] is the corresponding objective function value,
- nfev [int] is the number of objective function evaluations taken,
- nit [int] is the number of run-and-tumble iterations, and
- trace [np.array] is the bacteria population trace, i.e., contains all visited points
of x for each bacterium
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
n_bacteria = x0_population.shape[0]
n_dims = x0_population.shape[1]
x = x0_population.copy()
x_best = x0_population.copy()
x_old = x0_population.copy()
f_old = np.array([f(val) for val in x_old])
f_best = f_old.copy()
f_max = f_old.copy()
nfev = n_bacteria
trace = np.empty((niter + 1, n_bacteria, n_dims))
trace[0] = x0_population.copy()
x_sum = x0_population.sum(axis=0)
x_mean_history = []
# Initial random bacteria orientations
v = np.empty(x0_population.shape)
for m in range(n_bacteria):
v_m = np.random.uniform(-1, 1, n_dims)
while not v_m.any():
v_m = np.random.uniform(-1, 1, n_dims)
v_m = v_m / np.sqrt(np.sum(v_m ** 2))
v[m] = v_m
for n in range(niter):
alpha = stepsize_start + (stepsize_end - stepsize_start) * (n ** 2) / (niter ** 2)
if attraction:
# Calculate attraction between the bacteria traces
kernel = (x[:, None, None, :] -
trace[None, (n + 1 - min(n, attraction_window)):(n + 1), :, :])
grad_attractant = (
attraction_strength / 2 / (attraction_sigma ** 2) * kernel *
np.exp(-(np.square(kernel) / 2 /
(attraction_sigma ** 2)).sum(axis=3))[:, :, :, None]
).sum(axis=(1, 2))
else:
grad_attractant = np.zeros(x.shape)
# Run
x = x_old + (v - grad_attractant) * alpha
x, bounds_hit = projection_callback(x)
f_new = np.array([f(val) for val in x])
nfev += n_bacteria
trace[n + 1] = x.copy()
# Tumble
# We add a small constant (1e-9) to the denominator below, in order to account for the fact
# that a bacterium may be stuck at the boundary, in which case x[i, :] = x_old[i, :]
delta_f = (f_new - f_old) / (np.sqrt(np.sum((x - x_old) ** 2, axis=1)) + 1e-9)
# Avoid exp over/underflow
delta_f = np.maximum(np.minimum(delta_f, 100), -100)
tumble_rate = base_tumble_rate * np.exp(delta_f)
# Calculate new orientation
for m, tr in enumerate(tumble_rate):
if bounds_reflection and bounds_hit[m].any():
# Reflection at boundaries
v[m] = -v[m]
elif bounds_hit[m].any() or np.random.uniform() > 1 - tr:
# Realistically, tr must be clipped to [0, 1]. However, the inequality above is not
# influenced by this clipping and we thus omit it in order to save computation time
v_m = np.random.uniform(-1, 1, n_dims)
while not v_m.any():
v_m = np.random.uniform(-1, 1, n_dims)
v_m = v_m / np.sqrt(np.sum(v_m ** 2))
v[m] = v_m
# Remember best results
x_best = np.where((f_new < f_best)[:, None], x, x_best)
f_best = np.minimum(f_new, f_best)
f_max = np.maximum(f_new, f_max)
x_old = x.copy()
f_old = f_new.copy()
if verbosity == 2:
for m in range(n_bacteria):
print('Run-and-tumble step {}, bacterium {}:\tx = {}, f(x) = {}'
.format(n + 1, m, x[m], f_new[m]))
# Calculate mean position of the bacteria
x_sum = x_sum + x.sum(axis=0)
x_mean = x_sum / n_bacteria / (n + 1)
x_mean_history.append(x_mean)
if (n + 1) % stationarity_window == 0:
# If the mean position has had a relative change less than eps_stat over a step window
# stationarity_window, we consider the bacteria distribution as stationary
window = np.array(x_mean_history[-stationarity_window:]).sum(axis=1)
slope, intercept, rValue, _pValue, _stdErr = stats.linregress(
np.linspace(0, 1, len(window)), window
)
if rValue ** 2 > 0.9 and abs(slope / intercept) < eps_stat:
nit = n + 1
if verbosity > 0:
print(f'Run-and-tumble stage: Stationary state detected after {nit} steps.')
break
else:
if verbosity > 0:
print('Run-and-tumble stage: No stationary state could be detected after ' +
f'{niter + 1} iterations. Please try increasing niter or the stationarity ' +
f'detection threshold eps_stat.')
nit = niter + 1
trace = trace[:(nit + 1)]
return x_best, f_best, nfev, nit, trace | /rt_opt-0.1.1.tar.gz/rt_opt-0.1.1/rt_opt/global_search.py | 0.868367 | 0.762954 | global_search.py | pypi |
from types import SimpleNamespace
import numpy as np
def bfgs_b(f, x0, projection_callback, H_start=None, a=1, c=1e-6, niter=100, n_linesearch=20,
alpha_linesearch=0.5, beta_linesearch=0.5, eps_abs=1e-9, eps_rel=1e-6, verbosity=1):
"""
Implementation of the BFGS algorithm for arbitrarily bounded search regions. An estimate of the
optimal step size for each iteration is found using a two-way-backtracking line search
algorithm.
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0: [np.array] Initial condition
:param projection_callback: [callable] Bounds projection. The function bounds(x) must return a
tuple (x_projected, bounds_hit), where x_projected is the input variable x projected to
the defined search region. That is, if x is within this region, it is returned unchanged,
whereas if it is outside this region, it is projected to the region's boundaries. The
second output, bounds_hit, indicates whether the boundary has been hit for each component
of x. If, for example, x is three-dimensional and has hit the search region's boundaries
in x_1 and x_2, but not in x_3, bounds_hit = [True, True, False]. Note that the search
domain needs not necessarily be rectangular. Therefore, we define a "boundary hit" in any
component of x in the following way:
bounds_hit[i] = True iff either x + δê_i or x - δê_i is outside the defined search
domain ∀ δ ∈ ℝ⁺, where ê_i is the i-th unit vector
:param H_start: [np.array] Initial Hessian at x0
:param a: [float] Initial line search step size
:param c: [float] Numerical differentiation step size
:param niter: [int] Maximum number of BFGS iterations
:param n_linesearch: [int] Maximum number of linesearch steps in each iteration
:param alpha_linesearch: [float] Line search control parameter alpha, see description in
:func:`two_way_linesearch`. Must be in between 0 and 1
:param beta_linesearch: [float] Line search control parameter beta, see description in
:func:`two_way_linesearch`. Must be in between 0 and 1
:param eps_abs: [float] Absolute tolerance
:param eps_rel: [float] Relative tolerance
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:return: (x_best, f_best, nfev, nit, success, trace), where
- x_best [np.array] is the best x found so far,
- f_best [float] is the corresponding objective function value,
- nfev [int] is the number of objective function evaluations taken,
- nit [int] is the number of BFGS iterations,
- success [bool] indicates whether the BGFS algorithm finished successfully, i.e,
whether absolute and relative tolerances were met, and
- trace [np.array] is the optimizer trace, i.e., contains all visited points of x
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
def calculate_gradient():
gradient = np.zeros(n_dims)
for m in range(n_dims):
unit_vec = np.zeros(n_dims)
unit_vec[m] = 1
f_minus = f(x - c * unit_vec)
f_plus = f(x + c * unit_vec)
gradient[m] = (f_plus - f_minus) / 2 / c
return gradient
n_dims = len(x0)
trace = np.empty((niter, n_dims))
if H_start is None or np.max(np.abs(H_start)) < 1e-3:
B = np.identity(n_dims)
else:
assert (len(H_start.shape) == 2 and
H_start.shape[0] == n_dims and
H_start.shape[1] == n_dims), 'H_start has wrong format.'
try:
B = np.linalg.inv(H_start)
except np.linalg.LinAlgError:
B = np.identity(n_dims)
nfev = 0
x = x0.copy()
x, _ = projection_callback(x)
x_best = x.copy()
grad = calculate_gradient()
nfev += 2 * n_dims
f_curr = f_best = f(x)
nfev += 1
acc0 = np.linalg.norm(x - projection_callback(x - grad)[0])
for k in range(niter):
_, bounds_hit = projection_callback(x)
B[bounds_hit] = np.identity(n_dims)[bounds_hit]
# Calculate search direction
d = -B.dot(grad)
# Calculate optimal step size and update x
a_old = a
ls_result = two_way_linesearch(f, x, grad, d, a, n_linesearch, f_curr, projection_callback,
alpha_linesearch, beta_linesearch)
f_curr = ls_result.f
x = ls_result.x
a = ls_result.a
nfev += ls_result.nfev
if not ls_result.success and verbosity == 2:
print(f"BGFS step {k + 1}: Couldn't find sufficiently good step size during " +
f"{n_linesearch} line search steps.")
if f_curr < f_best:
f_best = f_curr
x_best = x.copy()
else:
a = a_old * 0.95
# Update inverse Hessian approximation
s = a * d
s[bounds_hit] = 0
grad_new = calculate_gradient()
nfev += 2 * n_dims
y = grad_new - grad
y[bounds_hit] = 0
if np.dot(y, s) <= 0:
B = np.identity(n_dims)
else:
B = (
(np.identity(n_dims) - np.outer(s, y) / np.dot(y, s))
.dot(B)
.dot(np.identity(n_dims) - np.outer(y, s) / np.dot(y, s)) +
np.outer(s, s) / np.dot(y, s)
)
grad = grad_new
trace[k] = x.copy()
if verbosity == 2:
print('BGFS step {}:\tx = {}, f(x) = {}'.format(k + 1, x, f_curr))
acc = np.linalg.norm(x - projection_callback(x - grad)[0])
if acc <= eps_abs + eps_rel * acc0:
nit = k + 1
success = True
if verbosity > 0:
print(f'BGFS target accuracy reached after {nit} steps.')
break
else:
if verbosity > 0:
print(f'Could not reach desired BGFS accuracy after {niter + 1} iterations. Please ' +
'try increasing the number of iterations or the tolerance.')
nit = niter + 1
success = False
trace = trace[:nit]
res = {'x': x_best, 'f': f_best, 'nfev': nfev, 'nit': nit, 'success': success, 'trace': trace}
return SimpleNamespace(**res)
def two_way_linesearch(f, x, grad, d, a, niter, f_old, projection_callback, alpha, beta):
"""
Implementation of a two-way-backtracking line search algorithm, as outlined in
+++++
T. T. Truong, T. H. Nguyen, Backtracking gradient descent method for general C1 functions, with
applications to Deep Learning, arXiv:1808.05160 (2018).
+++++
Here, we also include a projection of x onto a bounded subspace. The Armijo condition deciding
whether a stepsize a is accepted reads in this case:
f(P(x + a * d)) ≤ f(x) - alpha * ∇f(x)•(x - P(x + a * d)),
where d is the search direction and P the projection of x onto a bounded subspace.
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x: [np.array] Current (starting) position
:param grad: [np.array] Gradient of f at the current (starting) position x
:param d: [np.array] Search direction
:param a: [float] Initial stepsize
:param niter: [int] Maximum number of line search iterations
:param f_old: [float] Objective function value at the beginning, f(x)
:param projection_callback: [callable] Bounds projection, see description of parameter
``projection_callback`` in :func:`bfgs_b`
:param alpha: [float] Line search control parameter alpha. Must be in between 0 and 1
:param beta: [float] Line search control parameter beta. Must be in between 0 and 1
:return: Namespace object with the following attributes:
- success: [bool] Whether the line search exited successfully, i.e., whether a stepsize
fulfilling the above Armijo conditions was found
- x: [np.array] New position after the step
- f: [float] Objective function value at the new position
- a: [float] (Sub)optimal stepsize found by the algorithm
- nfev: [int] Number of objective function calls
"""
nfev = 0
x_old = x.copy()
# Initial stage deciding whether to increase or decrease search step size
x, _ = projection_callback(x_old + a * d)
f_new = f(x)
nfev += 1
f_target = f_old - alpha * grad.dot(x_old - x)
if f_new >= f_target:
# Initial step was too large => decrease a
for i in range(niter):
a = a * beta
x, _ = projection_callback(x_old + a * d)
f_new = f(x)
nfev += 1
f_target = f_old - alpha * grad.dot(x_old - x)
if f_new < f_target:
res = {'success': True, 'f': f_new, 'x': x, 'a': a, 'nfev': nfev}
return SimpleNamespace(**res)
else:
res = {'success': False, 'f': f_new, 'x': x, 'a': a, 'nfev': nfev}
return SimpleNamespace(**res)
else:
# Initial step might probably have been larger => try to increase a
for i in range(niter):
a_before = a
x_before = x.copy()
f_before = f_new
a = a / beta
x, _ = projection_callback(x_old + a * d)
f_new = f(x)
nfev += 1
f_target = f_old - alpha * grad.dot(x_old - x)
if f_new > f_target:
res = {'success': True, 'f': f_before, 'x': x_before, 'a': a_before, 'nfev': nfev}
return SimpleNamespace(**res)
else:
res = {'success': False, 'f': f_new, 'x': x, 'a': a, 'nfev': nfev}
return SimpleNamespace(**res)
def adam_spsa(f, x0, projection_callback, c=1e-9, a=0.1, gamma=0.101, alpha=0.602, A_fac=0.05,
beta_1=0.9, beta_2=0.9, eps=1e-15, niter=1000, verbosity=1):
"""
Implementation of a Simultaneous Perturbation Stochastic Approximation (SPSA) gradient descent
algorithm, see
+++++
J. C. Spall, An Overview of the Simultaneous Perturbation Method for Efficient Optimization,
Johns Hopkins APL Technical Digest 19 (1998).
+++++
coupled with an Adaptive Moment Estimation (Adam), see
+++++
D. P. Kingma, J. Ba, Adam: A Method for Stochastic Optimization, arXiv:1412.6980 (2014).
+++++
In addition, here we allow to constrain the search region to a rectangular box. Please note that
this SPSA implementation was not designed to deal with noisy objective functions, but rather to
speed up high-dimensional local optimization with expensive cost functions (in n dimensions,
a standard central differences gradient approximation takes 2n objective function calls, whereas
the SPSA gradient approximation only takes 2, independent of the problem's dimensionality).
:param f: [callable] Objective function. Must accept its argument x as numpy array
:param x0: [np.array] Initial condition
:param projection_callback: [callable] Bounds projection, see description of parameter
``projection_callback`` in :func:`bfgs_b`
:param c: [float] Initial step size for estimating the gradient approximation
:param a: [float] Initial "gradient descent" step size
:param gamma: [float] SPSA gamma determining the decay of the step size for estimating the
gradient approximation over time. Must be > 0. The larger gamma, the faster the decay
:param alpha: [float] SPSA gamma determining the decay of the "gradient descent" step size over
time. Must be > 0. The larger alpha, the faster the decay
:param A_fac: [float] Offset factor for calculating the SPSA "gradient descent" step size decay.
Must be > 0. The larger A_fac, the smaller the step size
:param beta_1: [float] Adam "forgetting factor" for the previous gradient approximations. Must
be in between 0 and 1
:param beta_2: [float] Adam "forgetting factor" for the squares of the previous gradient
approximations. Must be in between 0 and 1
:param eps: [float] Absolute tolerance
:param niter: [int] Maximum number of iterations
:param verbosity: [int] Output verbosity. Must be 0, 1, or 2
:return: (x_best, f_best, nfev, nit, success, trace), where
- x_best [np.array] is the best x found so far,
- f_best [float] is the corresponding objective function value,
- nfev [int] is the number of objective function evaluations taken,
- nit [int] is the number of iterations,
- success [bool] indicates whether the algorithm finished successfully, i.e, whether
absolute tolerances were met, and
- trace [np.array] is the optimizer trace, i.e., contains all visited points of x
"""
assert verbosity in [0, 1, 2], 'verbosity must be 0, 1, or 2.'
assert np.array_equal(projection_callback(x0)[0], x0), ('x0 outside the bounded domain ' +
'defined by projection_callback.')
n_dims = len(x0)
A = A_fac * niter
m = v = 0
trace = np.empty((niter, n_dims))
f0 = f(x0)
nfev = 1
f_best = f0
x_best = x0.copy()
x = x0.copy()
for k in range(niter):
ak = a / (k + 1 + A) ** alpha
ck = c / (k + 1) ** gamma
# Choose stochastic perturbations for calculating the gradient approximation
delta = 2 * np.round(np.random.uniform(0, 1, n_dims)) - 1
# Boundary hit
_, bounds_hit = projection_callback(x)
if bounds_hit.any():
f_minus = f(x - ck * delta)
f_plus = f(x + ck * delta)
nfev += 2
ghat_test = (f_plus - f_minus) / (2 * ck * delta)
# Check whether following the objective function's gradient would lead to leaving
# the bounded domain
bounds_hit_new = projection_callback(x - ak * ghat_test)[1]
bounds_stuck = np.logical_and(bounds_hit, bounds_hit_new)
# "Projected" stochastic perturbations vector, with perturbations only parallel to the
# boundary
delta = np.where(bounds_stuck, 0, delta)
# Calculate SPSA gradient approximation
f_minus = f(x - ck * delta)
f_plus = f(x + ck * delta)
nfev += 2
ghat = (f_plus - f_minus) / (2 * ck * np.where(delta == 0, np.inf, delta))
# Adam algorithm, with the true gradient replaced by the SPSA gradient approximation
m = beta_1 * m + (1 - beta_1) * ghat
v = beta_2 * v + (1 - beta_2) * np.power(ghat, 2)
m_hat = m / (1 - np.power(beta_1, k + 1))
v_hat = v / (1 - np.power(beta_2, k + 1))
x = x - ak * m_hat / (np.sqrt(v_hat) + 1e-9)
# Clip x to bounded region
x, _ = projection_callback(x)
f_new = f(x)
nfev += 1
if f_new <= f_best:
f_best = f_new
x_best = x.copy()
a *= 1.5
else:
x = x_best.copy()
a /= 1.5
trace[k] = x.copy()
if verbosity == 2:
print('SPSA step {}:\tx = {}, ghat = {}'.format(k + 1, x, ghat))
if abs(f_plus - f_minus) < eps:
nit = k + 1
success = True
if verbosity > 0:
print(f'SPSA Gradient descent target accuracy reached after {nit} steps.')
break
else:
if verbosity > 0:
print(f'Could not reach desired SPSA gradient descent accuracy after {niter + 1} ' +
'iterations. Please try increasing the number of iterations or the tolerance.')
nit = niter + 1
success = False
trace = trace[:nit]
res = {'x': x_best, 'f': f_best, 'nfev': nfev, 'nit': nit, 'success': success, 'trace': trace}
return SimpleNamespace(**res) | /rt_opt-0.1.1.tar.gz/rt_opt-0.1.1/rt_opt/local_search.py | 0.839208 | 0.68489 | local_search.py | pypi |
import numpy as np
def __rpa_tolerance_function_cents(cent_true, cent_pred, cent_tolerance):
return abs(cent_true - cent_pred) <= cent_tolerance
def __rpa_tolerance_function_relative(cent_true, cent_pred, tolerance):
return abs(cent_true - cent_pred) <= (cent_true * tolerance)
def __raw_pitch_accuracy(cents_true, cents_pred, tolerance_function, tolerance):
counter_true = 0
counter_false = 0
for i in range(len(cents_true)):
if tolerance_function(cents_true[i], cents_pred[i], tolerance):
counter_true += 1
else:
counter_false += 1
if counter_true > 0:
result = counter_true / (counter_true + counter_false) * 100
else:
result = 0
return result
def raw_pitch_accuracy_cent(cents_true, cents_pred, cent_tolerance=50):
return __raw_pitch_accuracy(cents_true, cents_pred, __rpa_tolerance_function_cents, cent_tolerance)
def raw_pitch_accuracy_hz(hz_true, hz_pred, relative_tolerance=0.02):
return __raw_pitch_accuracy(hz_true, hz_pred, __rpa_tolerance_function_relative, relative_tolerance)
def mean_absolute_error(truth, prediction):
diff = abs(truth - prediction)
return np.mean(diff)
def get_hz_metrics(hz_true, hz_pred, rpa_relative_tolerance=0.02, print_output=False):
diff_hz = hz_true - hz_pred
min_error_hz = np.min(np.abs(diff_hz))
max_error_hz = np.max(np.abs(diff_hz))
mean_hz = np.mean(diff_hz)
median_hz = np.median(diff_hz)
mae_hz = mean_absolute_error(hz_true, hz_pred)
std_dev_hz = np.std(diff_hz)
quantile_05 = np.quantile(diff_hz, 0.05)
quantile_95 = np.quantile(diff_hz, 0.95)
rpa_hz = raw_pitch_accuracy_hz(hz_true, hz_pred, rpa_relative_tolerance)
if print_output:
l = 20
r = 6
f = '_'
p = 2
print(__format_prop_for_print("Min abs err [Hz]", min_error_hz))
print(__format_prop_for_print("Max abs err [Hz]", max_error_hz))
print(__format_prop_for_print("Mean err [Hz]", mean_hz))
print(__format_prop_for_print("Median [Hz]", median_hz))
print(__format_prop_for_print("MAE [Hz]", mae_hz))
print(__format_prop_for_print("StdDev [Hz]", std_dev_hz))
print(__format_prop_for_print("5% quant err [Hz]", quantile_05))
print(__format_prop_for_print("95% quant err [Hz]", quantile_95))
print(__format_prop_for_print("RPA [Hz]", rpa_hz))
return {
"min_error_hz": min_error_hz,
"max_error_hz": max_error_hz,
"mean_error_hz": mean_hz,
"median_error_hz": median_hz,
"mae_hz": mae_hz,
"std_dev_hz": std_dev_hz,
"quantile_05": quantile_05,
"quantile_95": quantile_95,
"rpa_hz": rpa_hz,
}
def __format_prop_for_print(name, value, filler="_", decimal_places=2, length_name=20, length_value=8):
return f"{(name + ' ').ljust(length_name, filler)}{(' ' + str(round(value, decimal_places))).rjust(length_value, filler)}" | /rt_pie_lib-0.1.12.tar.gz/rt_pie_lib-0.1.12/rt_pie_lib/metrics/__init__.py | 0.529263 | 0.579995 | __init__.py | pypi |
import numpy as np
import mir_eval
from scipy.signal import argrelextrema
def convert_cent_to_hz(cent, f_ref=10.0):
return f_ref * 2 ** (cent / 1200.0)
def convert_hz_to_cent(hertz, f_ref=10.0):
return mir_eval.melody.hz2cents(hertz, f_ref)
def convert_semitone_to_hz(semi, f_ref=10.0):
return convert_cent_to_hz(100 * semi, f_ref)
def convert_bin_to_weighted_average_cents(label, octaves=6, cents_per_bin=20, hz_lower_bound=32.7):
classifier_lowest_cent = convert_hz_to_cent(np.array([hz_lower_bound]))[0]
classifier_total_bins = int((1200 / cents_per_bin) * octaves)
classifier_cents = np.linspace(
0, (classifier_total_bins - 1) * cents_per_bin, classifier_total_bins) + classifier_lowest_cent
if label.ndim == 1:
product_sum = np.sum(label * classifier_cents)
weight_sum = np.sum(label)
return product_sum / weight_sum
if label.ndim == 2:
product_sum = np.dot(label, classifier_cents)
weight_sum = np.sum(label, axis=1)
return product_sum / weight_sum
raise Exception("Label should be either 1d or 2d ndarray.")
def convert_bin_to_local_average_cents(salience, center=None):
"""
find the weighted average cents near the argmax bin
"""
if not hasattr(convert_bin_to_local_average_cents, 'cents_mapping'):
# the bin number-to-cents mapping
convert_bin_to_local_average_cents.cents_mapping = (
np.linspace(0, 7180, 360) + 1997.3794084376191)
if salience.ndim == 1:
center = int(np.argmax(salience))
start = max(0, center - 4)
end = min(len(salience), center + 5)
salience = salience[start:end]
product_sum = np.sum(
salience * convert_bin_to_local_average_cents.cents_mapping[start:end])
weight_sum = np.sum(salience)
return product_sum / weight_sum
if salience.ndim == 2:
return np.array([convert_bin_to_local_average_cents(salience[i, :]) for i in
range(salience.shape[0])])
raise Exception("Label should be either 1d or 2d ndarray.")
def convert_bin_to_local_average_cents_lowest_maxima(salience, center=None, maxima_order=10, maxima_minval=0.2, tolerance=0.1):
"""
find the weighted average cents near the argmax bin todo
"""
if salience.ndim == 1:
maxima = argrelextrema(salience, np.greater, order=maxima_order)[0]
maxima = [(x, convert_cent_to_hz(convert_bin_to_local_average_cents(__create_maximum_bin(x))))
for x in maxima if salience[x] >= maxima_minval]
if len(maxima) > 1:
success, idx = __try_find_f0_in_maxima(maxima, tolerance=tolerance)
if success:
salience = np.zeros(360)
salience[maxima[idx][0]] = 1
return convert_bin_to_local_average_cents(salience, center=center)
if salience.ndim == 2:
return np.array([convert_bin_to_local_average_cents_lowest_maxima(salience[i, :]) for i in
range(salience.shape[0])])
raise Exception("Label should be either 1d or 2d ndarray.")
def __create_maximum_bin(index):
b = np.zeros(360)
b[index] = 1
return b
def __try_find_f0_in_maxima(maxima, tolerance=0.1):
maxima.sort(key=lambda x: x[1])
for i in range(len(maxima) - 1):
max_current = maxima[i][1]
max_next = maxima[i + 1][1]
rel_diff = abs(max_current * 2 - max_next) / max_next
if rel_diff <= tolerance:
return True, i
return False, None | /rt_pie_lib-0.1.12.tar.gz/rt_pie_lib-0.1.12/rt_pie_lib/converters/__init__.py | 0.581184 | 0.49469 | __init__.py | pypi |
import os
import logging
from tensorflow.keras.models import load_model
import pathlib
from rt_pie import config
class FittedModel:
def __init__(self, name, block_size, time_component, desc, path):
self.name = name
self.block_size = block_size
self.time_component = time_component
self.desc = desc
self.path = path
def help_formatting(self):
return f" {self.name.ljust(14, ' ')}{self.desc}\n"
models = [
FittedModel("LSTM_1024", 1024, True, "LSTM, block size 1024, hop size 512",
os.path.join(config.MODELS_BASE_PATH, "lstm_1024_512_e100_model.hdf5")),
FittedModel("LSTM_512", 512, True, "LSTM, block size 512, hop size 256",
os.path.join(config.MODELS_BASE_PATH, "lstm_512_256_e065_model.hdf5")),
FittedModel("LSTM_256", 256, True, "LSTM, block size 256, hop size 128",
os.path.join(config.MODELS_BASE_PATH, "lstm_256_128_e092_model.hdf5")),
FittedModel("CREPE_2048", 2048, False, "CREPE, block size 2048, hop size 1024",
os.path.join(config.MODELS_BASE_PATH, "crepe_2048_1024_e100_model.hdf5")),
FittedModel("CREPE_1024", 1024, False, "CREPE, block size 1024, hop size 512",
os.path.join(config.MODELS_BASE_PATH, "crepe_1024_512_e100_model.hdf5")),
FittedModel("CREPE_512", 512, False, "CREPE, block size 512, hop size 256",
os.path.join(config.MODELS_BASE_PATH, "crepe_512_256_e100_model.hdf5")),
FittedModel("CREPE_256", 256, False, "CREPE, block size 256, hop size 128",
os.path.join(config.MODELS_BASE_PATH, "crepe_256_128_e084_model.hdf5")),
FittedModel("DEEPF0_2048", 2048, False, "DEEPF0, block size 2048, hop size 1024",
os.path.join(config.MODELS_BASE_PATH, "deepf0_2048_1024_e100_model.hdf5")),
FittedModel("DEEPF0_1024", 1024, False, "DEEPF0, block size 1024, hop size 512",
os.path.join(config.MODELS_BASE_PATH, "deepf0_1024_512_e087_model.hdf5")),
FittedModel("DEEPF0_512", 512, False, "DEEPF0, block size 512, hop size 256",
os.path.join(config.MODELS_BASE_PATH, "deepf0_512_256_e048_model.hdf5")),
FittedModel("DEEPF0_256", 256, False, "DEEPF0, block size 256, hop size 128",
os.path.join(config.MODELS_BASE_PATH, "deepf0_256_128_e091_model.hdf5")),
FittedModel("DEEPF0_1024_reduced", 1024, False, "DEEPF0, block size 1024, hop size 512, reduced filters",
os.path.join(config.MODELS_BASE_PATH, "deepf0_1024_512_reduced_filter_e100_model.hdf5"))
]
def get_model(model):
try:
model = next((m for m in models if m.name == model), models[0])
model_path_abs = os.path.join(pathlib.Path(__file__).parent.parent.absolute(), model.path)
return model, load_model(model_path_abs)
except Exception as e:
logging.error("Could not find specified model.")
logging.error(e)
exit(-1) | /rt_pie-0.1.15-py3-none-any.whl/rt_pie/fitted_models.py | 0.434821 | 0.178418 | fitted_models.py | pypi |
import numpy as np
def __rpa_tolerance_function_cents(cent_true, cent_pred, cent_tolerance):
return abs(cent_true - cent_pred) <= cent_tolerance
def __rpa_tolerance_function_relative(cent_true, cent_pred, tolerance):
return abs(cent_true - cent_pred) <= (cent_true * tolerance)
def __raw_pitch_accuracy(cents_true, cents_pred, tolerance_function, tolerance):
counter_true = 0
counter_false = 0
for i in range(len(cents_true)):
if tolerance_function(cents_true[i], cents_pred[i], tolerance):
counter_true += 1
else:
counter_false += 1
if counter_true > 0:
result = counter_true / (counter_true + counter_false) * 100
else:
result = 0
return result
def raw_pitch_accuracy_cent(cents_true, cents_pred, cent_tolerance=50):
return __raw_pitch_accuracy(cents_true, cents_pred, __rpa_tolerance_function_cents, cent_tolerance)
def raw_pitch_accuracy_hz(hz_true, hz_pred, relative_tolerance=0.02):
return __raw_pitch_accuracy(hz_true, hz_pred, __rpa_tolerance_function_relative, relative_tolerance)
def mean_absolute_error(truth, prediction):
diff = abs(truth - prediction)
return np.mean(diff)
def get_hz_metrics(hz_true, hz_pred, rpa_relative_tolerance=0.02, print_output=False):
diff_hz = hz_true - hz_pred
min_error_hz = np.min(np.abs(diff_hz))
max_error_hz = np.max(np.abs(diff_hz))
mean_hz = np.mean(diff_hz)
median_hz = np.median(diff_hz)
mae_hz = mean_absolute_error(hz_true, hz_pred)
std_dev_hz = np.std(diff_hz)
quantile_05 = np.quantile(diff_hz, 0.05)
quantile_95 = np.quantile(diff_hz, 0.95)
rpa_hz = raw_pitch_accuracy_hz(hz_true, hz_pred, rpa_relative_tolerance)
if print_output:
l = 20
r = 6
f = '_'
p = 2
print(__format_prop_for_print("Min abs err [Hz]", min_error_hz))
print(__format_prop_for_print("Max abs err [Hz]", max_error_hz))
print(__format_prop_for_print("Mean err [Hz]", mean_hz))
print(__format_prop_for_print("Median [Hz]", median_hz))
print(__format_prop_for_print("MAE [Hz]", mae_hz))
print(__format_prop_for_print("StdDev [Hz]", std_dev_hz))
print(__format_prop_for_print("5% quant err [Hz]", quantile_05))
print(__format_prop_for_print("95% quant err [Hz]", quantile_95))
print(__format_prop_for_print("RPA [Hz]", rpa_hz))
return {
"min_error_hz": min_error_hz,
"max_error_hz": max_error_hz,
"mean_error_hz": mean_hz,
"median_error_hz": median_hz,
"mae_hz": mae_hz,
"std_dev_hz": std_dev_hz,
"quantile_05": quantile_05,
"quantile_95": quantile_95,
"rpa_hz": rpa_hz,
}
def __format_prop_for_print(name, value, filler="_", decimal_places=2, length_name=20, length_value=8):
return f"{(name + ' ').ljust(length_name, filler)}{(' ' + str(round(value, decimal_places))).rjust(length_value, filler)}" | /rt_pie-0.1.15-py3-none-any.whl/rt_pie/rt_pie_lib/metrics/__init__.py | 0.529263 | 0.579995 | __init__.py | pypi |
import numpy as np
import mir_eval
from scipy.signal import argrelextrema
def convert_cent_to_hz(cent, f_ref=10.0):
return f_ref * 2 ** (cent / 1200.0)
def convert_hz_to_cent(hertz, f_ref=10.0):
return mir_eval.melody.hz2cents(hertz, f_ref)
def convert_semitone_to_hz(semi, f_ref=10.0):
return convert_cent_to_hz(100 * semi, f_ref)
def convert_bin_to_weighted_average_cents(label, octaves=6, cents_per_bin=20, hz_lower_bound=32.7):
classifier_lowest_cent = convert_hz_to_cent(np.array([hz_lower_bound]))[0]
classifier_total_bins = int((1200 / cents_per_bin) * octaves)
classifier_cents = np.linspace(
0, (classifier_total_bins - 1) * cents_per_bin, classifier_total_bins) + classifier_lowest_cent
if label.ndim == 1:
product_sum = np.sum(label * classifier_cents)
weight_sum = np.sum(label)
return product_sum / weight_sum
if label.ndim == 2:
product_sum = np.dot(label, classifier_cents)
weight_sum = np.sum(label, axis=1)
return product_sum / weight_sum
raise Exception("Label should be either 1d or 2d ndarray.")
def convert_bin_to_local_average_cents(salience, center=None):
"""
find the weighted average cents near the argmax bin
"""
if not hasattr(convert_bin_to_local_average_cents, 'cents_mapping'):
# the bin number-to-cents mapping
convert_bin_to_local_average_cents.cents_mapping = (
np.linspace(0, 7180, 360) + 1997.3794084376191)
if salience.ndim == 1:
center = int(np.argmax(salience))
start = max(0, center - 4)
end = min(len(salience), center + 5)
salience = salience[start:end]
product_sum = np.sum(
salience * convert_bin_to_local_average_cents.cents_mapping[start:end])
weight_sum = np.sum(salience)
return product_sum / weight_sum
if salience.ndim == 2:
return np.array([convert_bin_to_local_average_cents(salience[i, :]) for i in
range(salience.shape[0])])
raise Exception("Label should be either 1d or 2d ndarray.")
def convert_bin_to_local_average_cents_lowest_maxima(salience, center=None, maxima_order=10, maxima_minval=0.2, tolerance=0.1):
"""
find the weighted average cents near the argmax bin todo
"""
if salience.ndim == 1:
maxima = argrelextrema(salience, np.greater, order=maxima_order)[0]
maxima = [(x, convert_cent_to_hz(convert_bin_to_local_average_cents(__create_maximum_bin(x))))
for x in maxima if salience[x] >= maxima_minval]
if len(maxima) > 1:
success, idx = __try_find_f0_in_maxima(maxima, tolerance=tolerance)
if success:
salience = np.zeros(360)
salience[maxima[idx][0]] = 1
return convert_bin_to_local_average_cents(salience, center=center)
raise Exception("Label should be 1d ndarray.")
def __create_maximum_bin(index):
b = np.zeros(360)
b[index] = 1
return b
def __try_find_f0_in_maxima(maxima, tolerance=0.1):
maxima.sort(key=lambda x: x[1])
for i in range(len(maxima) - 1):
max_current = maxima[i][1]
max_next = maxima[i + 1][1]
rel_diff = abs(max_current * 2 - max_next) / max_next
if rel_diff <= tolerance:
return True, i
return False, None | /rt_pie-0.1.15-py3-none-any.whl/rt_pie/rt_pie_lib/converters/__init__.py | 0.574275 | 0.482612 | __init__.py | pypi |
import numpy as np
import scipy.io as sio
import pathlib
class PPISCS:
def __init__(self, model_path=None):
if model_path is None:
model_path = pathlib.Path(__file__).resolve().parent / 'coef_scs_fcc.mat'
else:
model_path = pathlib.Path(model_path).resolve()
self._model = sio.loadmat(model_path)['model'][0, 0]
self._bias = [np.array(x) for x in self._model['bias'][0]]
self._weights = [np.array(x) for x in self._model['weights'][0]]
@staticmethod
def swish(x):
return x / (1 + np.exp(-x))
@staticmethod
def softplus(x):
return np.log(1 + np.exp(-np.abs(x))) + np.maximum(x, 0)
@staticmethod
def eval_on_batch(y, layer_bias, layer_weights):
n_layers = len(layer_bias)
y = PPISCS.swish(np.matmul(y, layer_weights[0]) + layer_bias[0])
for i in range(1, n_layers - 1):
y = np.hstack((y, PPISCS.swish(np.matmul(y, layer_weights[i]) + layer_bias[i])))
y = np.matmul(y, layer_weights[-1]) + layer_bias[-1]
return PPISCS.softplus(y)
@staticmethod
def eval(y_i, bias, weights, batch_size):
n_y = y_i.shape[0]
if n_y <= batch_size:
y_out = PPISCS.eval_on_batch(y_i, bias, weights)
return y_out
y_out = np.empty((n_y, bias[-1].size), dtype=y_i.dtype)
for ik in range(0, n_y, batch_size):
y_out[ik:ik + batch_size] = PPISCS.eval_on_batch(y_i[ik:ik + batch_size], bias, weights)
return y_out
def predict(self, input_data, batch_size=4096*4, crop=True):
if not isinstance(input_data, np.ndarray) or input_data.ndim != 2 or input_data.shape[1] != 9:
raise ValueError('Input data must be a 2D numpy array with 9 columns')
if crop and input_data[0, 1] == 1:
crop_end = 61
else:
crop_end = None
y = (input_data.astype(np.float64)- self._model['x_sft']) / self._model['x_sc']
y = PPISCS.eval(y, self._bias, self._weights, batch_size)
if crop_end is not None:
y = y[:, :crop_end]
return y | /rt_ppiscs-1.0.1.tar.gz/rt_ppiscs-1.0.1/rt_ppiscs/model.py | 0.689096 | 0.348202 | model.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rt_probability-1.0.tar.gz/rt_probability-1.0/rt_probability/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
def decode_enum(enum_dict: dict):
def decode(value):
if value not in enum_dict:
return enum_dict[KeyError]
return enum_dict[value]
return decode
ncom_nav_status = {
KeyError: 'Reserved',
0: 'Invalid',
1: 'Raw_IMU_measurements',
2: 'Initializing',
3: 'Locking',
4: 'Locked',
5: 'Reserved',
6: 'Expired_firmware',
7: 'Blocked_firmware',
10: 'Status_only',
11: 'Internal_use',
20: 'Trigger_packet_initializing',
21: 'Trigger_packet_locking',
22: 'Trigger_packet_locked',
}
ncom_channel_status = {
KeyError: 'Reserved_for_future_use',
0: 'Time_satellites_mode',
1: 'Kalman_filter_innovations_1',
2: 'Internal_info_primary_antenna',
3: 'Position_accuracy',
4: 'Velocity_accuracy',
5: 'Orientation_accuracy',
6: 'Gyro_bias',
7: 'Accelerometer_bias',
8: 'Gyro_scale_factor',
9: 'Gyro_bias_accuracy',
10: 'Accelerometer_accuracy',
11: 'Gyro_scalr_factor_accuracy',
12: 'Position_estimate_primary_antenna',
13: 'Orientation_estimate_dual_antenna',
14: 'Position_accuracy_primary_antenna',
15: 'Orientation_accuracy_dual_antenna',
16: 'INS_rotation',
17: 'Internal_info_secondary_antenna',
18: 'Internal_info_IMU',
19: 'INS_SW_version',
20: 'Differential_correction_info',
21: 'Disk_space_log_size',
22: 'Internal_info_processing_timing',
23: 'Up_time_GNSS_rejections_PTP_status',
24: 'Asynchronous_packet_event_input_falling_edge',
25: 'Reserved',
26: 'Displacement_lever_arm',
27: 'Internal_info_dual_antenna_ambiguity',
28: 'Internal_info_dual_antenna_ambiguity',
29: 'Initial_settings_NAVconfig',
30: 'OS_script_version_info',
31: 'HW_config_info',
32: 'Kalman_filter_innovation_2',
33: 'Zero_velocity_lever_arm',
34: 'Zero_velocity_lever_arm_accuracy',
35: 'Lateral_advanced_slip_lever_arm',
36: 'Lateral_advanced_slip_lever_arm_accuracy',
37: 'Heading_misalignment_angle',
38: 'Zero_velocity_option_settings_third_serial_output_mode',
39: 'Lateral_advanced_slip_option_settings',
40: 'NCOM_version_Id',
41: 'Output_baud_rates_WiFI_IP_address',
42: 'Heading_lock_options',
43: 'Asynchronous_packet_triggered_by_rising_edge_of_event_input',
44: 'Wheel_speed_configuration',
45: 'Wheel_speed_counts',
46: 'Wheel_speed_lever_arm',
47: 'Wheel_speed_lever_arm_accuracy',
48: 'Undulation_dilution_of_precision_of_GPS_datum_information',
49: 'OmniSTAR_tracking_information',
50: 'Information_sent_to_the_command_decoder',
51: 'Additional_slip_point_1_lever_arm',
52: 'Additional_slip_point_2_lever_arm',
53: 'Additional_slip_point_3_lever_arm',
54: 'Additional_slip_point_4_lever_arm',
55: 'Information_about_the_primary_GNSS_receiver',
56: 'Information_about_the_secondary_GNSS_receiver',
57: 'Position_estimate_of_the_primary_GNSS_antenna',
58: 'Vehicle_to_output_frame_rotation',
59: 'IMU_decoding_status',
60: 'Definition_of_the_surface_angles',
61: 'Internal_information_about_external_GNSS_receiver',
62: 'Information_about_the_external_GNSS_receiver',
63: 'Angular_acceleration_filter_settings',
64: 'Hardware_information_and_external_GNSS_receiver_configuration',
65: 'Asynchronous_packet_triggered_by_camera_distance_output',
66: 'Extended_local_coordinate_definition_latitude_and_longitude',
67: 'Extended_local_coordinate_definition_altitude_and_heading',
68: 'Additional_slip_point_5_lever_arm',
69: 'Additional_slip_point_6_lever_arm',
70: 'Additional_slip_point_7_lever_arm',
71: 'Additional_slip_point_8_lever_arm',
72: 'Status_information',
73: 'Status_information',
74: 'Linear_acceleration_filter_settings',
75: 'Miscellaneous',
76: 'Internal_information_about_differential_corrections',
77: 'Differential_correction_configuration',
78: 'CAN_bus_status_information',
79: 'Asynchronous_packet_triggered_by_falling_edge_of_event_input_2',
80: 'Asynchronous_packet_triggered_by_rising_edge_of_event_input_2',
81: 'Asynchronous_packet_triggered_by_camera_distance_output_2',
82: 'Hardware_configuration_information',
83: 'Status_information',
84: 'Status_information',
85: 'Software_version_information',
86: 'Reserved_for_future_use',
87: 'Linear_jerk_filter_settings',
88: 'Kalman_filter_innovations_3',
89: 'Vertical_advanced_slip_lever_arm',
90: 'Vertical_advanced_slip_lever_arm_accuracy',
91: 'Pitch_misalignment_angle',
92: 'Vertical_advanced_slip_option_settings',
93: 'Galileo_and_Beidou_Satellite_Used_counts',
94: 'Galileo_and_Beidou_Satellite_L1_L2_counts',
95: 'Generic_aiding_packet_information',
}
ncom_position_velocity_orientation_mode = {
KeyError: 'Reserved',
0: 'None',
1: 'Search',
2: 'Doppler',
3: 'SPS',
4: 'Differential',
5: 'RTK_Float',
6: 'RTK_Integer',
7: 'WAAS',
8: 'OmniSTAR',
9: 'OmniSTAR_HP',
10: 'No_data',
11: 'Blanked',
12: 'Doppler_PP',
13: 'SPS_PP',
14: 'Differential_PP',
15: 'RTK_Float_PP',
16: 'RTK_Integer_PP',
17: 'OmniSTAR_XP',
18: 'CDGPS',
19: 'Not_recognized',
20: 'gxDoppler',
21: 'gxSPS',
22: 'gxDifferential',
23: 'gxFloat',
24: 'gxInteger',
25: 'ixDoppler',
26: 'ixSPS',
27: 'ixDifferential',
28: 'ixFloat',
29: 'ixInteger',
30: 'PPP_converging',
31: 'PPP',
32: 'Unknown',
}
ncom_blended_processing_methods = {
KeyError: 'Reserved',
0: 'Invalid',
1: 'Real_time',
2: 'Simulated',
3: 'Post_process_forward',
4: 'Post_process_backward',
5: 'Post_process_combined',
6: 'Unknown',
}
ncom_ptp_status = {
KeyError: 'Value_not_in_documentation',
0: 'PTP_status_invalid',
1: 'PTP_status_initializing',
2: 'PTP_status_faulty',
3: 'PTP_status_disabled',
4: 'PTP_status_listening',
5: 'PTP_status_pre_master',
6: 'PTP_status_master',
7: 'PTP_status_passive',
8: 'PTP_status_uncalibrated',
9: 'PTP_status_slave',
10: 'PTP_status_locked',
11: 'PTP_status_config_error',
12: 'PTP_status_critical_error',
13: 'PTP_status_unknown',
}
rcom_lane_status_channel = {
KeyError: 'Value_not_in_documentation',
0: 'GPS_coarse_time',
1: 'RT-Range_SW_dev_ID',
2: 'Map_number',
6: 'OS_and_script_version',
7: 'UTC_offset_CPU_load',
8: 'Point_A_lever-arm',
9: 'Point_B_lever-arm',
10: 'Point_C_lever-arm',
15: 'Command_communication_status',
}
rcom_ex_range_status_channel = {
KeyError: 'Value_not_in_documentation',
1: 'Latency_measurement',
2: 'RT-Range_SW_dev_ID',
3: 'Target_wireless_LAN_communication_status',
4: 'Hunter_ethernet_communication_status',
5: 'Range_output_latency_offset_measurement',
6: 'OS_and_script_version',
7: 'UTC_offset_CPU_load',
8: 'Fixed_point_position',
9: 'Hunter_target_IP_addresses',
10: 'Fixed_point_altitude',
11: 'RT-Range_local_coordinate_origin_latitude_longitude',
12: 'RT-Range_local_coordinate_origin_altitude_heading',
13: 'Hunter_lever_arm',
14: 'Target_lever_arm',
15: 'Command_communication_status',
16: 'Range_accuracy',
17: 'Target_vehicle_geometry',
18: 'Acceleration_filter_settings',
19: 'Extrapolation_filter_settings',
20: 'Feature_point_position',
21: 'Feature_point_altitude_heading',
22: 'Hunter_vehicle_geometry',
} | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/status_definitions.py | 0.713831 | 0.34715 | status_definitions.py | pypi |
from dataclasses import dataclass, InitVar
from typing import Callable, Any
from typing import TypeAlias
import numpy as np
ValueConvertFunc: TypeAlias = Callable[[Any], Any]
@dataclass
class RTType:
"""
RT data type definition
:param dtype: NumPy dtype for structure decoding
:param get_value: Function for converting dtype into raw value
"""
dtype: type | np.dtype
get_value: ValueConvertFunc | None = None
def __post_init__(self):
if self.get_value is None:
self.get_value = self._default_get_value
@staticmethod
def _default_get_value(value):
return value
@dataclass
class Field:
"""
RT data packet field
:param name: Filed name
:param rt_type: RT data type of the field
:param decode_value: Function for raw value conversion to specified format
:param unit: Unit of the field value
"""
name: str
rt_type: InitVar[RTType]
decode_value: InitVar[Callable[[Any], Any]] = None
unit: str = None
def __post_init__(self, rt_type: RTType, decode_value: InitVar[ValueConvertFunc]):
if decode_value is None:
decode_value = self._default_decode_value
self._decode_value = decode_value
self._decode_dtype = rt_type.get_value
self.dtype = rt_type.dtype
@staticmethod
def _default_decode_value(value):
return value
def get_value(self, value):
return self._decode_value(self._decode_dtype(value))
@dataclass
class VariableBlock:
selector: int
structure: dict[int, list[Field]]
size: InitVar[int]
name: str | None = None
def __post_init__(self, size: int):
field_name = self.name if self.name is not None else f'field_{hex(id(self))}'
self.default = Field(field_name, RTType(
np.dtype([(f'b{i}', np.uint8) for i in range(size)]),
get_value=lambda _: 'Parser_not_implemented',
))
Selector: TypeAlias = tuple[int, ...] | None
Structure: TypeAlias = list[Field | VariableBlock]
Byte = RTType(np.int8)
UByte = RTType(np.uint8)
Short = RTType(np.int16)
UShort = RTType(np.uint16)
Word = RTType(
np.dtype([('b0', np.uint8), ('b1', np.uint8), ('b2', np.uint8)]),
get_value=lambda v: x if (x := np.frombuffer(bytes(v) + b'\0', dtype=np.int32)[0]) < 0x800000 else -(~x & 0x00FFFFFF) - 1,
)
UWord = RTType(
np.dtype([('b0', np.uint8), ('b1', np.uint8), ('b2', np.uint8)]),
get_value=lambda v: np.frombuffer(bytes(v) + b'\0', dtype=np.int32)[0],
)
Long = RTType(np.int32)
ULong = RTType(np.uint32)
Int64 = RTType(np.int64)
UInt64 = RTType(np.uint64)
Float = RTType(np.float32)
Double = RTType(np.float64) | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/rt_types.py | 0.930268 | 0.379723 | rt_types.py | pypi |
import itertools
from copy import deepcopy
import numpy as np
from rt_range.ethernet.rt_types import Field, VariableBlock, Selector, Structure, ValueConvertFunc
class Packet:
def __init__(self, fields: Structure):
self._raw_structure = fields
self._get_blocks_and_selectors()
self._parse_fields()
self._structure: dict[Selector, np.dtype] = {
selector: np.dtype([(field.name, field.dtype) for field in structure])
for selector, structure in self._fields.items()}
self._decoder: dict[Selector, dict[str, ValueConvertFunc]] = {
selector: {field.name: field.get_value for field in fields}
for selector, fields in self._fields.items()}
def _get_blocks_and_selectors(self):
self._blocks = tuple(filter(
lambda e: isinstance(e, VariableBlock),
self._raw_structure),
)
self._selectors = tuple(block.selector for block in self._blocks)
def _parse_fields(self):
def wrap_name(field: Field, block: VariableBlock):
field = deepcopy(field)
if block.name is not None:
field.name = f'{block.name}_{field.name}'
return field
def make_struct(selector: Selector):
for struct_field in self._raw_structure:
if isinstance(struct_field, Field):
yield struct_field
else:
yield from (wrap_name(f, struct_field)
for f in struct_field.structure[selector[self._blocks.index(struct_field)]])
self._fields: dict[Selector, tuple[Field, ...]] = {
selector: tuple(make_struct(selector))
for selector in itertools.product(*(block.structure for block in self._blocks))}
self._append_default()
def _append_default(self):
self._fields[None] = tuple(field if isinstance(field, Field) else field.default
for field in self._raw_structure)
def _get_selector(self, buffer: bytes) -> Selector:
return tuple(buffer[s] for s in self._selectors)
def decode(self, buffer: bytes) -> tuple[np.array, Selector]:
selector = self._get_selector(buffer)
if selector not in self._structure:
selector = None
return np.frombuffer(buffer, dtype=self._structure[selector]), selector
def get(self, obj: np.array, name: str, selector: Selector):
return self._decoder[selector][name](obj[name][0])
def translate(self, obj: np.array, selector: Selector) -> dict[str, ...]:
return {field.name: self.get(obj, field.name, selector) for field in self._fields[selector]}
def parse(self, buffer: bytes) -> dict[str, ...]:
return self.translate(*self.decode(buffer)) | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/rt_packet.py | 0.604516 | 0.225076 | rt_packet.py | pypi |
import itertools
from rt_range.common import convert_10_pow_m2, convert_10_pow_m3, convert_10_pow_m7
from rt_range.ethernet.rt_packet import Packet
from rt_range.ethernet.rt_types import Field, VariableBlock, Byte, UByte, Short, UShort, Word, UWord, Long, ULong, Float
from rt_range.ethernet.status_definitions import decode_enum, rcom_ex_range_status_channel, ncom_position_velocity_orientation_mode
def multiple_sensor_points():
return itertools.chain.from_iterable(
(
Field(f'resultant_range_from_sensor_point_{idx}_to_target', ULong, decode_value=convert_10_pow_m3, unit='m'),
Field(f'percentage_target_visible_in_FoV_sensor_point_{idx}', UByte),
Field(f'percentage_FoV_occupied_by_target_sensor_point_{idx}', UByte),
)
for idx in range(12)
)
rcom_ex_range_status = {
0: [
Field('GPS_time', Long, unit='min'),
Field('hunter_position_mode', UByte, decode_value=decode_enum(ncom_position_velocity_orientation_mode)),
Field('target_position_mode', UByte, decode_value=decode_enum(ncom_position_velocity_orientation_mode)),
Field('target_latency', UShort, decode_value=convert_10_pow_m3, unit='s'),
],
1: [
Field('ID_byte_0', Byte),
Field('ID_byte_1', Byte),
Field('ID_byte_2', Byte),
Field('ID_byte_3', Byte),
Field('ID_byte_4', Byte),
Field('ID_byte_5', Byte),
Field('ID_byte_6', Byte),
Field('ID_byte_7', Byte),
],
2: [
Field('target_radio_characters_received', UShort),
Field('target_radio_packets_received', UShort),
Field('target_radio_characters_skipped', UShort),
Field('reserved', UShort),
],
3: [
Field('target_WLAN_characters_received', UShort),
Field('target_WLAN_packets_received', UShort),
Field('target_WLAN_characters_skipped', UShort),
Field('reserved', UShort),
],
4: [
Field('hunter_ethernet_characters_received', UShort),
Field('hunter_ethernet_packets_received', UShort),
Field('hunter_ethernet_characters_skipped', UShort),
Field('reserved', UShort),
],
5: [
Field('hunter_output_latency', UShort, decode_value=convert_10_pow_m3, unit='s'),
Field('range_longitudinal_offset', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('range_lateral_offset', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('reserved', UShort),
],
6: [
Field('major_OS_version', UByte),
Field('minor_OS_version', UByte),
Field('OS_revision_version', UByte),
Field('script_version', UWord),
Field('reserved', UShort),
],
7: [
Field('UTC_offset', Short, unit='s'),
Field('range_reference_plane_configuration', UByte),
Field('target_feature_set_number', UByte),
Field('number_feature_points_feature_set', UShort),
Field('maximum_feature_points_per_feature_cell', UByte),
Field('CPU_load', UByte, decode_value=lambda v: v * 0.4, unit='%'),
],
8: [
Field('fixed_point_latitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
Field('fixed_point_longitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
],
9: [
Field('hunter_ip_byte_1', UByte),
Field('hunter_ip_byte_2', UByte),
Field('hunter_ip_byte_3', UByte),
Field('hunter_ip_byte_4', UByte),
Field('target_ip_byte_1', UByte),
Field('target_ip_byte_2', UByte),
Field('target_ip_byte_3', UByte),
Field('target_ip_byte_4', UByte),
],
10: [
Field('fixed_point_altitude', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('fixed_point_heading', ULong, decode_value=convert_10_pow_m7, unit='deg'),
],
11: [
Field('local_coordinate_origin_latitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
Field('local_coordinate_origin_longitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
],
12: [
Field('local_coordinate_origin_altitude', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('local_coordinate_origin_heading', ULong, decode_value=convert_10_pow_m7, unit='deg'),
],
13: [
Field('hunter_lever_arm_x', Word, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_lever_arm_y', Word, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_lever_arm_z', Short, decode_value=convert_10_pow_m3, unit='m'),
],
14: [
Field('target_lever_arm_x', Word, decode_value=convert_10_pow_m3, unit='m'),
Field('target_lever_arm_y', Word, decode_value=convert_10_pow_m3, unit='m'),
Field('target_lever_arm_z', Short, decode_value=convert_10_pow_m3, unit='m'),
],
15: [
Field('UDP_command_characters_received', UShort),
Field('UDP_command_packets_received', UShort),
Field('UDP_command_characters_skipped', UShort),
Field('UDP_command_errors', UShort),
],
16: [
Field('range_longitudinal_accuracy', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('range_lateral_accuracy', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('range_vertical_accuracy', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('range_magnitude_accuracy', UShort, decode_value=convert_10_pow_m3, unit='m'),
],
17: [
Field('target_vehicle_length', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('target_vehicle_width', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('target_polygon_number', UShort),
Field('target_vehicle_height', UShort, decode_value=convert_10_pow_m3, unit='m'),
],
18: [
Field('acceleration_filter_cut_off_frequency', Float, unit='Hz'),
Field('acceleration_filter_damping_ratio', Float),
],
19: [
Field('extrapolation_filter_cut_off_frequency', Float, unit='Hz'),
Field('extrapolation_filter_damping_ratio', Float),
],
20: [
Field('feature_point_latitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
Field('feature_point_longitude', Long, decode_value=convert_10_pow_m7, unit='deg'),
],
21: [
Field('feature_point_altitude', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('feature_point_heading', ULong, decode_value=convert_10_pow_m7, unit='deg'),
],
22: [
Field('hunter_vehicle_length', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_vehicle_width', UShort, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_polygon_number', UShort),
Field('hunter_vehicle_height', UShort, decode_value=convert_10_pow_m3, unit='m'),
],
}
RCOM_extended_range = Packet([
Field('sync', UByte), # 0x57
Field('packet_type', UByte), # 0x02
Field('length_of_data_section', UShort),
Field('GPS_time_into_minute', UShort, decode_value=convert_10_pow_m3, unit='s'),
Field('target_number', UByte),
Field('total_number_of_targets', UByte),
Field('lateral_range', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('longitudinal_range', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_range_rate', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('longitudinal_range_rate', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('hunter_measurement_point_position_x', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_measurement_point_position_y', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('target_measurement_point_position_x', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('target_measurement_point_position_y', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('hunter_heading_angle', UShort, decode_value=convert_10_pow_m2, unit='deg'),
Field('target_heading_angle', UShort, decode_value=convert_10_pow_m2, unit='deg'),
Field('range_status', UByte),
Field('status_channel', UByte, decode_value=decode_enum(rcom_ex_range_status_channel)),
VariableBlock(selector=41, name='status', size=8, structure=rcom_ex_range_status),
Field('hunter_forward_velocity', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('hunter_lateral_velocity', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_range_acceleration', Short, decode_value=convert_10_pow_m2, unit='m/s^2'),
Field('longitudinal_range_acceleration', Short, decode_value=convert_10_pow_m2, unit='m/s^2'),
Field('nearest_target_polygon_vertex_to_hunter_point_left', UByte),
Field('nearest_target_polygon_vertex_to_hunter_point_right', UByte),
Field('target_visibility', UByte, unit='%'),
Field('target_feature_point_type', UByte),
Field('target_feature_point_index', UShort),
Field('nearest_hunter_polygon_vertex_to_target_point_left', UByte),
Field('nearest_hunter_polygon_vertex_to_target_point_right', UByte),
Field('nearest_target_polygon_vertex_to_hunter_polygon_left', UByte),
Field('nearest_target_polygon_vertex_to_hunter_polygon_right', UByte),
Field('nearest_hunter_polygon_vertex_to_target_polygon_left', UByte),
Field('nearest_hunter_polygon_vertex_to_target_polygon_right', UByte),
Field('nearest_target_polygon_vertex_to_hunter_point_scale', UByte, decode_value=lambda v: v * 0.004),
Field('nearest_hunter_polygon_vertex_to_target_point_scale', UByte, decode_value=lambda v: v * 0.004),
Field('nearest_target_polygon_vertex_to_hunter_polygon_scale', UByte, decode_value=lambda v: v * 0.004),
Field('nearest_hunter_polygon_vertex_to_target_polygon_scale', UByte, decode_value=lambda v: v * 0.004),
Field('hunter_polygon_origin_position_x', Long),
Field('hunter_polygon_origin_position_y', Long),
Field('target_polygon_origin_position_x', Long),
Field('target_polygon_origin_position_y', Long),
Field('hunter_unit_position_x', Long),
Field('hunter_unit_position_y', Long),
Field('target_unit_position_x', Long),
Field('target_unit_position_y', Long),
Field('hunter_pitch_angle', Short, decode_value=convert_10_pow_m2, unit='deg'),
Field('hunter_roll_angle', Short, decode_value=convert_10_pow_m2, unit='deg'),
Field('target_pitch_angle', Short, decode_value=convert_10_pow_m2, unit='deg'),
Field('target_roll_angle', Short, decode_value=convert_10_pow_m2, unit='deg'),
*multiple_sensor_points(),
Field('checksum', UByte),
]) | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/rcom/extended_range_packet.py | 0.745954 | 0.216632 | extended_range_packet.py | pypi |
from rt_range.common import convert_10_pow_m2, convert_10_pow_m3, convert_10_pow_m4
from rt_range.ethernet.rt_packet import Packet
from rt_range.ethernet.rt_types import Field, VariableBlock, Byte, UByte, Short, UShort, Word, UWord, Long, ULong
from rt_range.ethernet.status_definitions import decode_enum, rcom_lane_status_channel
def lever_arm_struct(point_name: str):
return [
Field(f'point_{point_name}_lever-arm_x', Word, decode_value=convert_10_pow_m3, unit='m'),
Field(f'point_{point_name}_lever-arm_y', Word, decode_value=convert_10_pow_m3, unit='m'),
Field(f'point_{point_name}_lever-arm_z', Short, decode_value=convert_10_pow_m3, unit='m'),
]
rcom_lane_status = {
0: [
Field('GPS_time', Long, unit='min'),
Field('reserved', ULong),
],
1: [
Field('ID_byte_0', Byte),
Field('ID_byte_1', Byte),
Field('ID_byte_2', Byte),
Field('ID_byte_3', Byte),
Field('ID_byte_4', Byte),
Field('ID_byte_5', Byte),
Field('ID_byte_6', Byte),
Field('ID_byte_7', Byte),
],
2: [
Field('map_number', UByte),
Field('reserved_1', UWord),
Field('reserved_2', ULong),
],
6: [
Field('major_OS_version', UByte),
Field('minor_OS_version', UByte),
Field('OS_revision_version', UByte),
Field('script_version', UWord),
Field('reserved', UShort),
],
7: [
Field('UTC_offset', Short, unit='s'),
Field('reserved_1', UShort),
Field('reserved_2', UWord),
Field('CPU_load', UByte, decode_value=lambda v: v * 0.4, unit='%'),
],
8: lever_arm_struct('A'),
9: lever_arm_struct('B'),
10: lever_arm_struct('C'),
15: [
Field('UDP_command_characters_received', UShort),
Field('UDP_command_packets_received', UShort),
Field('UDP_command_characters_skipped', UShort),
Field('UDP_command_errors', UShort),
],
}
RCOM_lane = Packet([
Field('sync', UByte), # 0x57
Field('packet_type', UByte), # 0x01
Field('length_of_data_section', UShort),
Field('GPS_time_into_minute', UShort, decode_value=convert_10_pow_m3, unit='s'),
Field('line_number_to_left_of_A', UByte),
Field('line_number_to_right_of_A', UByte),
Field('distance_along_line_1', Long, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_to_left_of_A', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_velocity_to_left_of_A', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_acceleration_to_left_of_A', Short, decode_value=convert_10_pow_m2, unit='m/s^2'),
Field('lateral_distance_to_right_of_A', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_velocity_to_right_of_A', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_acceleration_to_right_of_A', Short, decode_value=convert_10_pow_m2, unit='m/s^2'),
Field('lateral_distance_from_point_A_to_line_1', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_2', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_3', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_4', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_5', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_6', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_7', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_A_to_line_8', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_on_left_of_A', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_on_right_of_A', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('line_on_left_of_point_B', UByte),
Field('line_on_right_of_point_B', UByte),
Field('line_on_left_of_point_C', UByte),
Field('line_on_right_of_point_C', UByte),
Field('reserved', UByte),
Field('status_channel', UByte, decode_value=decode_enum(rcom_lane_status_channel)),
VariableBlock(selector=49, name='status', size=8, structure=rcom_lane_status),
Field('lateral_velocity_from_point_A_to_line_1', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_2', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_3', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_4', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_5', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_6', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_7', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_velocity_from_point_A_to_line_8', Short, decode_value=convert_10_pow_m2, unit='m/s'),
Field('lateral_distance_from_point_B_to_line_1', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_2', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_3', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_4', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_5', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_6', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_7', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_B_to_line_8', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_1', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_2', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_3', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_4', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_5', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_6', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_7', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('lateral_distance_from_point_C_to_line_8', Short, decode_value=convert_10_pow_m3, unit='m'),
Field('curvature_of_line_1', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_2', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_3', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_4', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_5', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_6', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_7', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_line_8', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_point_A', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_point_B', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('curvature_of_point_C', Short, decode_value=convert_10_pow_m4, unit='1/m'),
Field('heading_with_respect_to_line_on_left_of_A', Short, decode_value=convert_10_pow_m2, unit='deg'),
Field('heading_with_respect_to_line_on_right_of_A', Short, decode_value=convert_10_pow_m2, unit='deg'),
Field('checksum', UByte),
]) | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/rcom/lane_packet.py | 0.690455 | 0.263365 | lane_packet.py | pypi |
from rt_range.common import convert_10_pow_m1, convert_10_pow_m2, convert_10_pow_m4, convert_10_pow_m6, validity_bit0
from rt_range.ethernet.rt_types import Field, Byte, UByte, Short, UShort, Long, ULong
from rt_range.ethernet.status_definitions import decode_enum, ncom_position_velocity_orientation_mode, ncom_blended_processing_methods, ncom_ptp_status
def validity_bit0_pow_m1(value: int):
return convert_10_pow_m1(validity_bit0(value))
async_packet = [
Field('trigger_minute', Long, unit='min'),
Field('trigger_millisecond', UShort, unit='ms'),
Field('trigger_microsecond', Byte, decode_value=lambda v: v * 4 * 10e-6, unit='us'),
Field('trigger_count', UByte),
]
BatchS = { # TODO: Block definitions for other status_channel values
0: [
Field('time_since_gps', Long, unit='min'),
Field('num_satellites', UByte),
Field('position_mode', UByte, decode_value=decode_enum(ncom_position_velocity_orientation_mode)),
Field('velocity_mode', UByte, decode_value=decode_enum(ncom_position_velocity_orientation_mode)),
Field('orientation_mode', UByte, decode_value=decode_enum(ncom_position_velocity_orientation_mode)),
],
1: [
Field('position_x_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('position_y_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('position_z_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('velocity_x_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('velocity_y_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('velocity_z_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('orientation_pitch_innovation', UByte, decode_value=validity_bit0_pow_m1),
Field('orientation_heading_innovation', UByte, decode_value=validity_bit0_pow_m1),
],
2: [
Field('primary_GNSS_characters_received', UShort),
Field('primary_GNSS_packets_received', UShort),
Field('primary_GNSS_characters_skipped', UShort),
Field('primary_GNSS_packets_skipped', UShort),
],
3: [
Field('north_position_accuracy', UShort, unit='mm'),
Field('east_position_accuracy', UShort, unit='mm'),
Field('down_position_accuracy', UShort, unit='mm'),
Field('age', UByte),
Field('ABD_robot_UMAC_interface_status', UByte),
],
4: [
Field('north_velocity_accuracy', UShort, unit='mm/s'),
Field('east_velocity_accuracy', UShort, unit='mm/s'),
Field('down_velocity_accuracy', UShort, unit='mm/s'),
Field('age', UByte),
Field('processing_method', UByte, decode_value=decode_enum(ncom_blended_processing_methods)),
],
5: [
Field('heading_accuracy', UShort, unit='mm'),
Field('pitch_accuracy', UShort, unit='mm'),
Field('roll_accuracy', UShort, unit='mm'),
Field('age', UByte),
Field('reserved', UByte),
],
6: [
Field('gyro_bias_x', Short, decode_value=convert_10_pow_m4, unit='rad/s'),
Field('gyro_bias_y', Short, decode_value=convert_10_pow_m4, unit='rad/s'),
Field('gyro_bias_z', Short, decode_value=convert_10_pow_m4, unit='rad/s'),
Field('age', UByte),
Field('L1_L2_GPS_measurements_decoded_primary_receiver', UByte),
],
7: [
Field('accelerometer_bias_x', Short, decode_value=convert_10_pow_m1, unit='mm/s^2'),
Field('accelerometer_bias_y', Short, decode_value=convert_10_pow_m1, unit='mm/s^2'),
Field('accelerometer_bias_z', Short, decode_value=convert_10_pow_m1, unit='mm/s^2'),
Field('age', UByte),
Field('L1_L2_GPS_measurements_decoded_secondary_receiver', UByte),
],
8: [
Field('gyro_scale_factor_x', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('gyro_scale_factor_y', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('gyro_scale_factor_z', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('age', UByte),
Field('L1_L2_GPS_measurements_decoded_external_receiver', UByte),
],
9: [
Field('accuracy_gyro_bias_x', Short, decode_value=convert_10_pow_m6, unit='rad/s'),
Field('accuracy_gyro_bias_y', Short, decode_value=convert_10_pow_m6, unit='rad/s'),
Field('accuracy_gyro_bias_z', Short, decode_value=convert_10_pow_m6, unit='rad/s'),
Field('age', UByte),
Field('L1_L2_GLONASS_measurements_decoded_primary_receiver', UByte),
],
10: [
Field('accuracy_accelerometer_bias_x', Short, decode_value=convert_10_pow_m2, unit='mm/s^2'),
Field('accuracy_accelerometer_bias_y', Short, decode_value=convert_10_pow_m2, unit='mm/s^2'),
Field('accuracy_accelerometer_bias_z', Short, decode_value=convert_10_pow_m2, unit='mm/s^2'),
Field('age', UByte),
Field('L1_L2_GLONASS_measurements_decoded_secondary_receiver', UByte),
],
11: [
Field('accuracy_gyro_scale_factor_x', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('accuracy_gyro_scale_factor_y', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('accuracy_gyro_scale_factor_z', Short, decode_value=convert_10_pow_m4, unit='%'),
Field('age', UByte),
Field('L1_L2_GLONASS_measurements_decoded_external_receiver', UByte),
],
12: [
Field('distance_to_primary_GNSS_x', Short, unit='mm'),
Field('distance_to_primary_GNSS_y', Short, unit='mm'),
Field('distance_to_primary_GNSS_z', Short, unit='mm'),
Field('age', UByte),
Field('reserved', UByte),
],
13: [
Field('heading_GNSS_antennas', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('pitch_GNSS_antennas', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('distance_GNSS_antennas', Short, unit='mm'),
Field('age', UByte),
Field('number_GPS_satellites_in_heading_module', UByte),
],
14: [
Field('accuracy_distance_to_primary_GNSS_x', Short, decode_value=convert_10_pow_m1, unit='mm'),
Field('accuracy_distance_to_primary_GNSS_y', Short, decode_value=convert_10_pow_m1, unit='mm'),
Field('accuracy_distance_to_primary_GNSS_z', Short, decode_value=convert_10_pow_m1, unit='mm'),
Field('age', UByte),
Field('number_satellites_in_position_solution', UByte),
],
15: [
Field('accuracy_heading_GNSS_antennas', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('accuracy_pitch_GNSS_antennas', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('accuracy_distance_GNSS_antennas', Short, unit='mm'),
Field('age', UByte),
Field('number_GLONASS_satellites_in_heading_module', UByte),
],
16: [
Field('vehicle_heading_INS', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('vehicle_pitch_INS', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('vehicle_roll_INS', Short, decode_value=convert_10_pow_m4, unit='rad'),
Field('validity', UByte),
Field('UTC_time_offset', UByte, decode_value=validity_bit0, unit='s'),
],
17: [
Field('secondary_GNSS_characters_received', UShort),
Field('secondary_GNSS_packets_received', UShort),
Field('secondary_GNSS_characters_skipped', UShort),
Field('secondary_GNSS_packets_skipped', UShort),
],
18: [
Field('IMU_characters_received', ULong),
Field('IMU_packets_received', UShort),
Field('IMU_characters_skipped', UShort),
],
19: [
Field('ID_byte_0', Byte),
Field('ID_byte_1', Byte),
Field('ID_byte_2', Byte),
Field('ID_byte_3', Byte),
Field('ID_byte_4', Byte),
Field('ID_byte_5', Byte),
Field('ID_byte_6', Byte),
Field('ID_byte_7', Byte),
],
20: [
Field('differential_corrections_age', Short),
Field('station_ID_byte_0', Byte),
Field('station_ID_byte_1', Byte),
Field('station_ID_byte_2', Byte),
Field('station_ID_byte_3', Byte),
Field('reserved', UShort),
],
21: [
Field('disk_space_remaining', Long, unit='kB'),
Field('current_data_file_size', Long, unit='kB'),
],
22: [
Field('time_mismatch_counter', UShort),
Field('IMU_time_difference', UByte, unit='ms'),
Field('IMU_time_margin', UByte, unit='ms'),
Field('IMU_loop_time', UShort, unit='ms'),
Field('output_loop_time', UShort, unit='ms'),
],
23: [
Field('blended_navigation_lag_time', UShort, unit='ms'),
Field('INS_running_time', UShort), # TODO: Unit depends on value
Field('number_consecutive_GPS_position_updates_rejected', UByte),
Field('number_consecutive_GPS_velocity_updates_rejected', UByte),
Field('number_consecutive_GPS_orientation_updates_rejected', UByte),
Field('PTP_status', UByte, decode_value=decode_enum(ncom_ptp_status))
],
24: async_packet,
# 25: Reserved,
26: [
Field('output_displacement_lever_arm_x', Short, unit='mm'),
Field('output_displacement_lever_arm_y', Short, unit='mm'),
Field('output_displacement_lever_arm_z', Short, unit='mm'),
Field('validity', UByte),
Field('Reserved', UByte),
],
} | /rt_range-0.3.1.tar.gz/rt_range-0.3.1/src/rt_range/ethernet/ncom/batch_s.py | 0.459561 | 0.205615 | batch_s.py | pypi |
<p align="center">
<img src="https://raw.githubusercontent.com/qurit/rt-utils/main/src/rt-utils-logo.png" height="300"/>
</p>
<p align="center">
<em>A minimal Python library for RT Struct manipulation</em>
</p>
<p align="center">
<img src="https://github.com/qurit/rt-utils/workflows/Python%20application/badge.svg" height="18">
<img src="https://img.shields.io/pypi/pyversions/rt-utils" alt="Python version" height="18">
<a href="https://pypi.org/project/rt-utils"><img src="https://badge.fury.io/py/rt-utils.svg" alt="PyPI version" height="18"></a>
<img alt="PyPI - License" src="https://img.shields.io/github/license/qurit/rt-utils?color=g" height="18" />
</p>
---
RT-Utils is motivated to allow physicians and other users to view the results of segmentation performed on a series of DICOM images. RT-Utils allows you to create or load RT Structs, extract 3d masks from RT Struct ROIs, easily add one or more regions of interest, and save the resulting RT Struct in just a few lines!
## How it works
RT-Utils provides a builder class to faciliate the creation and loading of an RT Struct. From there, you can add ROIs through binary masks and optionally input the colour of the region along with the region name.
The format for the ROI mask is an nd numpy array of type bool. It is an array of 2d binary masks, one plane for each slice location within the DICOM series. The slices should be sorted in ascending order within the mask. Through these masks, we extract the contours of the regions of interest and place them within the RT Struct file. Note that there is currently only support for the use of one frame of reference UID and structered set ROI sequence. Also note that holes within the ROI may be handled poorly.
## Installation
```
pip install rt_utils
```
## Creating new RT Structs
```Python
from rt_utils import RTStructBuilder
# Create new RT Struct. Requires the DICOM series path for the RT Struct.
rtstruct = RTStructBuilder.create_new(dicom_series_path="./testlocation")
# ...
# Create mask through means such as ML
# ...
# Add the 3D mask as an ROI.
# The colour, description, and name will be auto generated
rtstruct.add_roi(mask=MASK_FROM_ML_MODEL)
# Add another ROI, this time setting the color, description, and name
rtstruct.add_roi(
mask=MASK_FROM_ML_MODEL,
color=[255, 0, 255],
name="RT-Utils ROI!"
)
rtstruct.save('new-rt-struct')
```
## Adding to existing RT Structs
```Python
from rt_utils import RTStructBuilder
import matplotlib.pyplot as plt
# Load existing RT Struct. Requires the series path and existing RT Struct path
rtstruct = RTStructBuilder.create_from(
dicom_series_path="./testlocation",
rt_struct_path="./testlocation/rt-struct.dcm"
)
# Add ROI. This is the same as the above example.
rtstruct.add_roi(
mask=MASK_FROM_ML_MODEL,
color=[255, 0, 255],
name="RT-Utils ROI!"
)
rtstruct.save('new-rt-struct')
```
## Creation Results
<p align="center">
<img src="https://raw.githubusercontent.com/qurit/rt-utils/main/src/contour.png" width="1000"/>
</p>
<p align="center">
The results of a generated ROI with a dummy mask, as viewed in Slicer.
</p>
<p align="center">
<img src="https://raw.githubusercontent.com/qurit/RT-Utils/main/src/liver-contour.png" width="1000"/>
</p>
<p align="center">
The results of a generated ROI with a liver segmentation model, as viewed in Slicer. (Note the underlying patient data has been hidden)
</p>
## Loading an existing RT Struct contour as a mask
```Python
from rt_utils import RTStructBuilder
import matplotlib.pyplot as plt
# Load existing RT Struct. Requires the series path and existing RT Struct path
rtstruct = RTStructBuilder.create_from(
dicom_series_path="./testlocation",
rt_struct_path="./testlocation/rt-struct.dcm"
)
# View all of the ROI names from within the image
print(rtstruct.get_roi_names())
# Loading the 3D Mask from within the RT Struct
mask_3d = rtstruct.get_roi_mask_by_name("ROI NAME")
# Display one slice of the region
first_mask_slice = mask_3d[:, :, 0]
plt.imshow(first_mask_slice)
plt.show()
```
## Loading Results
<p align="center">
<img src="https://raw.githubusercontent.com/qurit/rt-utils/main/src/loaded-mask.png" height="300"/>
</p>
<p align="center">
The results of a loading an exisiting ROI as a mask, as viewed in Python.
</p>
## Additional Parameters
The add_roi method of our RTStruct class has a multitude of optional parameters available. Below is a comprehensive list of all these parameters and what they do.
- <b>color</b>: This parameter can either be a colour string such as '#ffffff' or a RGB value as a list such as '[255, 255, 255]'. This parameter will dictate the colour of your ROI when viewed in a viewing program. If no colour is provided, RT Utils will pick from our internal colour palette based on the ROI Number of the ROI.
- <b>name</b>: A str value that defaults to none. Used to set the name of the ROI within the RT Struct. If the name is none, RT Utils will set a name of ROI-{ROI Number}.
- <b>description</b>: A str value that sets the description of the ROI within the RT Struct. If no value is provided, the description is just left blank.
- <b>use_pin_hole</b>: A boolean value that defaults to false. If set to true, lines will be erased through your mask such that each separate region within your image can be encapsulated via a single contour instead of contours nested within one another. Use this if your RT Struct viewer of choice does not support nested contours / contours with holes.
- <b>approximate_contours</b>: A boolean value that defaults to True which defines whether or not approximations are made when extracting contours from the input mask. Setting this to false will lead to much larger contour data within your RT Struct so only use this if as much precision as possible is required.
- <b>roi_generation_algorithm</b>: An enum value that defaults to 0 which defines what ROI generation algorithm will be used. 0=\'AUTOMATIC\', 1=\'SEMIAUTOMATIC\', or 2=\'MANUAL\'.
| /rt-utils-1.2.7.tar.gz/rt-utils-1.2.7/README.md | 0.822546 | 0.966156 | README.md | pypi |
from typing import List, Union
import numpy as np
from pydicom.dataset import FileDataset
from rt_utils.utils import ROIData
from . import ds_helper, image_helper
class RTStruct:
"""
Wrapper class to facilitate appending and extracting ROI's within an RTStruct
"""
def __init__(self, series_data, ds: FileDataset, ROIGenerationAlgorithm=0):
self.series_data = series_data
self.ds = ds
self.frame_of_reference_uid = ds.ReferencedFrameOfReferenceSequence[
-1
].FrameOfReferenceUID # Use last strucitured set ROI
def set_series_description(self, description: str):
"""
Set the series description for the RTStruct dataset
"""
self.ds.SeriesDescription = description
def add_roi(
self,
mask: np.ndarray,
color: Union[str, List[int]] = None,
name: str = None,
description: str = "",
use_pin_hole: bool = False,
approximate_contours: bool = True,
roi_generation_algorithm: Union[str, int] = 0,
):
"""
Add a ROI to the rtstruct given a 3D binary mask for the ROI's at each slice
Optionally input a color or name for the ROI
If use_pin_hole is set to true, will cut a pinhole through ROI's with holes in them so that they are represented with one contour
If approximate_contours is set to False, no approximation will be done when generating contour data, leading to much larger amount of contour data
"""
# TODO test if name already exists
self.validate_mask(mask)
roi_number = len(self.ds.StructureSetROISequence) + 1
roi_data = ROIData(
mask,
color,
roi_number,
name,
self.frame_of_reference_uid,
description,
use_pin_hole,
approximate_contours,
roi_generation_algorithm,
)
self.ds.ROIContourSequence.append(
ds_helper.create_roi_contour(roi_data, self.series_data)
)
self.ds.StructureSetROISequence.append(
ds_helper.create_structure_set_roi(roi_data)
)
self.ds.RTROIObservationsSequence.append(
ds_helper.create_rtroi_observation(roi_data)
)
def validate_mask(self, mask: np.ndarray) -> bool:
if mask.dtype != bool:
raise RTStruct.ROIException(
f"Mask data type must be boolean. Got {mask.dtype}"
)
if mask.ndim != 3:
raise RTStruct.ROIException(f"Mask must be 3 dimensional. Got {mask.ndim}")
if len(self.series_data) != np.shape(mask)[2]:
raise RTStruct.ROIException(
"Mask must have the save number of layers (In the 3rd dimension) as input series. "
+ f"Expected {len(self.series_data)}, got {np.shape(mask)[2]}"
)
if np.sum(mask) == 0:
print("[INFO]: ROI mask is empty")
return True
def get_roi_names(self) -> List[str]:
"""
Returns a list of the names of all ROI within the RTStruct
"""
if not self.ds.StructureSetROISequence:
return []
return [
structure_roi.ROIName for structure_roi in self.ds.StructureSetROISequence
]
def get_roi_mask_by_name(self, name) -> np.ndarray:
"""
Returns the 3D binary mask of the ROI with the given input name
"""
for structure_roi in self.ds.StructureSetROISequence:
if structure_roi.ROIName == name:
contour_sequence = ds_helper.get_contour_sequence_by_roi_number(
self.ds, structure_roi.ROINumber
)
return image_helper.create_series_mask_from_contour_sequence(
self.series_data, contour_sequence
)
raise RTStruct.ROIException(f"ROI of name `{name}` does not exist in RTStruct")
def save(self, file_path: str):
"""
Saves the RTStruct with the specified name / location
Automatically adds '.dcm' as a suffix
"""
# Add .dcm if needed
file_path = file_path if file_path.endswith(".dcm") else file_path + ".dcm"
try:
file = open(file_path, "w")
# Opening worked, we should have a valid file_path
print("Writing file to", file_path)
self.ds.save_as(file_path)
file.close()
except OSError:
raise Exception(f"Cannot write to file path '{file_path}'")
class ROIException(Exception):
"""
Exception class for invalid ROI masks
"""
pass | /rt-utils-1.2.7.tar.gz/rt-utils-1.2.7/rt_utils/rtstruct.py | 0.804406 | 0.52683 | rtstruct.py | pypi |
from typing import List
from pydicom.dataset import Dataset
from pydicom.filereader import dcmread
from rt_utils.utils import SOPClassUID
from . import ds_helper, image_helper
from .rtstruct import RTStruct
class RTStructBuilder:
"""
Class to help facilitate the two ways in one can instantiate the RTStruct wrapper
"""
@staticmethod
def create_new(dicom_series_path: str) -> RTStruct:
"""
Method to generate a new rt struct from a DICOM series
"""
series_data = image_helper.load_sorted_image_series(dicom_series_path)
ds = ds_helper.create_rtstruct_dataset(series_data)
return RTStruct(series_data, ds)
@staticmethod
def create_from(dicom_series_path: str, rt_struct_path: str, warn_only: bool = False) -> RTStruct:
"""
Method to load an existing rt struct, given related DICOM series and existing rt struct
"""
series_data = image_helper.load_sorted_image_series(dicom_series_path)
ds = dcmread(rt_struct_path)
RTStructBuilder.validate_rtstruct(ds)
RTStructBuilder.validate_rtstruct_series_references(ds, series_data, warn_only)
# TODO create new frame of reference? Right now we assume the last frame of reference created is suitable
return RTStruct(series_data, ds)
@staticmethod
def validate_rtstruct(ds: Dataset):
"""
Method to validate a dataset is a valid RTStruct containing the required fields
"""
if (
ds.SOPClassUID != SOPClassUID.RTSTRUCT
or not hasattr(ds, "ROIContourSequence")
or not hasattr(ds, "StructureSetROISequence")
or not hasattr(ds, "RTROIObservationsSequence")
):
raise Exception("Please check that the existing RTStruct is valid")
@staticmethod
def validate_rtstruct_series_references(ds: Dataset, series_data: List[Dataset], warn_only: bool = False):
"""
Method to validate RTStruct only references dicom images found within the input series_data
"""
for refd_frame_of_ref in ds.ReferencedFrameOfReferenceSequence:
# Study sequence references are optional so return early if it does not exist
if "RTReferencedStudySequence" not in refd_frame_of_ref:
return
for rt_refd_study in refd_frame_of_ref.RTReferencedStudySequence:
for rt_refd_series in rt_refd_study.RTReferencedSeriesSequence:
for contour_image in rt_refd_series.ContourImageSequence:
RTStructBuilder.validate_contour_image_in_series_data(
contour_image, series_data, warn_only
)
@staticmethod
def validate_contour_image_in_series_data(
contour_image: Dataset, series_data: List[Dataset], warning_only: bool = False
):
"""
Method to validate that the ReferencedSOPInstanceUID of a given contour image exists within the series data
"""
for series in series_data:
if contour_image.ReferencedSOPInstanceUID == series.SOPInstanceUID:
return
# ReferencedSOPInstanceUID is NOT available
msg = f"Loaded RTStruct references image(s) that are not contained in input series data. " \
f"Problematic image has SOP Instance Id: {contour_image.ReferencedSOPInstanceUID}"
if warning_only:
warnings.warn(msg)
else:
raise Exception(msg) | /rt-utils-1.2.7.tar.gz/rt-utils-1.2.7/rt_utils/rtstruct_builder.py | 0.709019 | 0.47098 | rtstruct_builder.py | pypi |
def makeFolderMigrator(context, src_type, dst_type):
""" generate a migrator for the given at-based folderish portal type """
from Products.contentmigration.archetypes import InplaceATFolderMigrator
class ATFolderMigrator(InplaceATFolderMigrator):
src_portal_type = src_type
dst_portal_type = dst_type
return ATFolderMigrator
def makeContentMigrator(context, src_type, dst_type):
""" generate a migrator for the given at-based portal type """
from Products.contentmigration.archetypes import InplaceATItemMigrator
class ATContentMigrator(InplaceATItemMigrator):
src_portal_type = src_type
dst_portal_type = dst_type
return ATContentMigrator
def migrateContents(context, src_type, dst_type):
from Products.contentmigration.walker import CustomQueryWalker
#BBB: i can't find a better way to know if a given portal_type is folderish or not
is_folderish = False
temp_obj = context.restrictedTraverse('portal_factory/%s/tmp_id' % src_type)
if temp_obj:
plone_view = temp_obj.restrictedTraverse('@@plone')
if plone_view.isStructuralFolder():
is_folderish = True
portal_types = context.portal_types
src_infos = portal_types.getTypeInfo(src_type)
dst_infos = portal_types.getTypeInfo(dst_type)
if is_folderish:
migrator = makeFolderMigrator(context,
src_type,
dst_type,)
else:
migrator = makeContentMigrator(context,
src_type,
dst_type,)
if migrator:
migrator.src_meta_type = src_infos.content_meta_type
migrator.dst_meta_type = dst_infos.content_meta_type
walker = CustomQueryWalker(context, migrator,
src_portal_type=src_type,
dst_portal_type=dst_type,
use_savepoint=True)
walker.go()
walk_infos = {'error': walker.errors,
'msg': walker.getOutput().splitlines(),
'counter': walker.counter}
return walk_infos | /rt.atmigrator-0.1.0.zip/rt.atmigrator-0.1.0/rt/atmigrator/migrator.py | 0.522689 | 0.230866 | migrator.py | pypi |
from zope.interface import implements
from zope.site.hooks import getSite
from plone.uuid.interfaces import IUUID
from plone.uuid.interfaces import IUUIDAware
from rt.bulkmodify import messageFactory as _
from rt.bulkmodify.interfaces import IBulkModifyReplacementHandler
class InternalLinkToUIDUtility(object):
implements(IBulkModifyReplacementHandler)
context = None
name = _('utility_internal_link_to_uid_name',
default=u"Convert internal links to resolveuid usage")
description = _('utility_internal_link_to_uid_description',
default=u"If the match contains a group called <url> and this group is an internal link to "
u"a site content, let's transform it to a link using resolveuid.")
@classmethod
def repl(cls, match):
groups = match.groupdict()
if groups.get('url'):
old_url = groups.get('url')
if not old_url.startswith('resolveuid/'):
site = getSite()
portal_url = site.portal_url
site_url = site.absolute_url()
if portal_url.isURLInPortal(old_url, cls.context or None):
path = old_url.replace('%s/' % site_url, '', 1)
suffix = []
content = None
while path:
content = site.unrestrictedTraverse(path, default=None)
if IUUIDAware.providedBy(content):
break
suffix.insert(0, path.split('/')[-1])
path = '/'.join(path.split('/')[:-1])
if content and IUUIDAware.providedBy(content):
uuid = IUUID(content)
suffix.insert(0, '')
new_url = 'resolveuid/%s' % uuid + '/'.join(suffix)
return match.string[match.start():match.end()].replace(old_url,
new_url)
return match.string[match.start():match.end()] | /rt.bulkmodify-0.4.1.zip/rt.bulkmodify-0.4.1/rt/bulkmodify/handler/to_uid.py | 0.495117 | 0.15084 | to_uid.py | pypi |
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.schema import SchemaInvalidatedEvent
from plone.registry.interfaces import IRegistry
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.interfaces import INonInstallable
from zope.component import getUtility
from zope.component import queryUtility
from zope.event import notify
from zope.interface import implementer
from zope.schema.interfaces import IVocabularyFactory
try:
from rer.sitesearch.custom_fields import IndexesValueField
from rer.sitesearch.interfaces import IRERSiteSearchSettings
except Exception:
pass
@implementer(INonInstallable)
class HiddenProfiles(object):
def getNonInstallableProfiles(self):
"""Hide uninstall profile from site-creation and quickinstaller."""
return ["rt.categorysupport:uninstall"]
def setRegistyIndexes(context, indexes_list):
"""
"""
pc = getToolByName(context, "portal_catalog")
catalog_indexes = pc.indexes()
new_items = []
for index in indexes_list:
index_id = index[0]
index_title = index[1]
if index_id in catalog_indexes:
new_value = IndexesValueField()
new_value.index = index_id
new_value.index_title = index_title
new_items.append(new_value)
return tuple(new_items)
def post_install(context):
"""Post install script"""
# get all content type of site
factory = getUtility(
IVocabularyFactory, "plone.app.vocabularies.PortalTypes"
) # noqa
vocabulary = factory(context)
types = [x.value for x in vocabulary]
# add behaviors to all dexterity content type
for type in types:
fti = queryUtility(IDexterityFTI, name=type)
if not fti:
continue
behaviors = [x for x in fti.behaviors]
behaviors.append(u"rt.categorysupport.behaviors.category.ICategory")
fti.behaviors = tuple(behaviors)
# invalidate schema cache
notify(SchemaInvalidatedEvent(type))
# check if rer.sitesearch was installed
qi = getToolByName(context, "portal_quickinstaller")
prods = qi.listInstallableProducts(skipInstalled=False)
prods = [x["id"] for x in prods if x["status"] == "installed"]
if "rer.sitesearch" in prods:
# add taxonomies index to rer.siteserach oredering criteria
registry = queryUtility(IRegistry)
settings = registry.forInterface(IRERSiteSearchSettings, check=False)
TAXONOMIES_INDEX = [("taxonomies", "Temi"), ("Subject", "Subject")]
indexes = setRegistyIndexes(context, TAXONOMIES_INDEX)
settings.available_indexes = indexes
# aggiungo il campo taxonomies a quelli visibili nella vista
if "taxonomies" not in settings.indexes_order:
settings.indexes_order += ("taxonomies",)
def uninstall(context):
"""Uninstall script"""
# get all content type of site
factory = getUtility(
IVocabularyFactory, "plone.app.vocabularies.PortalTypes"
) # noqa
vocabulary = factory(context)
types = [x.value for x in vocabulary]
# remove behavior to all dexterity content type
for type in types:
fti = queryUtility(IDexterityFTI, name=type)
if not fti:
continue
behaviors = [x for x in fti.behaviors]
if "rt.categorysupport.behaviors.category.ICategory" in behaviors:
behaviors.remove("rt.categorysupport.behaviors.category.ICategory")
fti.behaviors = tuple(behaviors)
# invalidate schema cache
notify(SchemaInvalidatedEvent(type)) | /rt.categorysupport-1.1.0.tar.gz/rt.categorysupport-1.1.0/src/rt/categorysupport/setuphandlers.py | 0.615319 | 0.182972 | setuphandlers.py | pypi |
from AccessControl import getSecurityManager
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from plone.app.layout.viewlets.common import ViewletBase
from plone.app.layout.viewlets.content import DocumentBylineViewlet as BaseDocumentBylineViewlet
from plone.memoize.view import memoize
from rt.lastmodifier.browser.changenote import ShowChangeNoteViewlet
from rt.lastmodifier.permissions import DocumentByLineViewAuthor
from rt.lastmodifier.permissions import DocumentByLineViewChangeNote
from rt.lastmodifier.permissions import DocumentByLineViewLastModifier
from rt.lastmodifier.permissions import DocumentByLineViewModifiedDate
from rt.lastmodifier.permissions import DocumentByLineViewPublishedDate
from rt.lastmodifier.permissions import DocumentByLineViewLongTimeFormat
from zope.component import getMultiAdapter
from zope.interface import Interface
class DocumentBylineViewlet(BaseDocumentBylineViewlet, ShowChangeNoteViewlet):
def update(self):
super(DocumentBylineViewlet, self).update()
sm = getSecurityManager()
self.can_see_author = sm.checkPermission(DocumentByLineViewAuthor, self.portal_state.portal())
self.can_see_last_modifier = sm.checkPermission(DocumentByLineViewLastModifier, self.portal_state.portal())
self.can_see_published = sm.checkPermission(DocumentByLineViewPublishedDate, self.portal_state.portal())
self.can_see_modified = sm.checkPermission(DocumentByLineViewModifiedDate, self.portal_state.portal())
self.can_see_change_note = sm.checkPermission(DocumentByLineViewChangeNote, self.portal_state.portal())
self.show_long_time = sm.checkPermission(DocumentByLineViewLongTimeFormat, self.portal_state.portal())
@memoize
def show(self):
if self.can_see_author or self.can_see_last_modifier or \
self.can_see_modified or self.can_see_published:
return True
return False
def pub_date(self):
"""Taken from recent Plone versions, to let viewlet template working also on old Plone
"""
# check if we are allowed to display publication date
if not self.can_see_published:
return None
# check if we have Effective Date set
date = self.context.EffectiveDate()
if not date or date == 'None':
return None
return DateTime(date)
@memoize
def last_modifier(self):
# check if we are allowed to display the last modifier
if not self.can_see_last_modifier:
return None
view_last_modifier = getMultiAdapter((self.context, self.request),
interface=Interface, name=u"lastmodifier")
if view_last_modifier:
return view_last_modifier.last_modifier()
def modifier(self):
membership = getToolByName(self.context, 'portal_membership')
return membership.getMemberInfo(self.last_modifier() or '')
def modifiername(self):
modifier = self.modifier()
return modifier and modifier['fullname'] or self.last_modifier()
def modification_date(self):
return self.context.ModificationDate()
class DocumentBylineFolderViewlet(DocumentBylineViewlet):
"""When on folders, last modifier and last modification date must be taken from
last modified content inside the folder
"""
def modification_date(self):
catalog = getToolByName(self.context, 'portal_catalog')
results = catalog(path='/'.join(self.context.getPhysicalPath()),
sort_on='modified', sort_order='reverse', sort_limit=1)
if results:
return results[0].modified
return super(DocumentBylineFolderViewlet, self).modification_date()
class DocumentBylineNullViewlet(ViewletBase):
"""Do not display any byline at all"""
def index(self):
return '' | /rt.lastmodifier-1.1.1.tar.gz/rt.lastmodifier-1.1.1/rt/lastmodifier/browser/document_byline.py | 0.729327 | 0.334182 | document_byline.py | pypi |
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.portlets.portlets import base
from plone.app.portlets.browser import formhelper
from plone.portlets.interfaces import IPortletDataProvider
from rt.maracas import _
from zope import schema
from zope.formlib import form
from zope.interface import implements
class IRtMaracasPortlet(IPortletDataProvider):
"""A portlet which renders the results of a collection object.
"""
title = schema.TextLine(
title=_(u"Title"),
description=_(u"Title of the maracas portlet"),
required=True
)
maraca_type = schema.Choice(
title=_(u"Type of maracas"),
description=_(
(
u"You can choose between a vanilla maracas "
u"or maracas with wistle"
)
),
values=(
u'maracas_simple',
u'maracas_whistle'
),
default=u'maracas_simple',
required=True,
)
autoplay = schema.Bool(
title=_(u"Autoplay"),
description=_(
u"Tick this if you want to display the media with autoplay"
),
required=True,
default=False,
)
controls = schema.Bool(
title=_(u"Controls"),
description=_(
u"Tick this if you want to display the media with controls"
),
required=True,
default=False
)
loop = schema.Bool(
title=_(u"Loop"),
description=_(
u"Tick this if you want the media to loop"
),
required=True,
default=False
)
class Assignment(base.Assignment):
"""
Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(IRtMaracasPortlet)
title = u""
maraca_type = u"maracas_simple"
autoplay = False
controls = False
loop = False
def __init__(
self,
title=u"",
maraca_type=u"maracas_simple",
autoplay=False,
controls=False,
loop=False
):
self.title = title
self.maraca_type = maraca_type
self.autoplay = autoplay
self.controls = controls
self.loop = loop
class Renderer(base.Renderer):
render = ViewPageTemplateFile('base.pt')
def __init__(self, *args):
base.Renderer.__init__(self, *args)
def maracas(self):
''' Return a dict with some options
'''
maracas = {}
for key in ('controls', 'autoplay', 'loop'):
value = getattr(self.data, key, None)
if value:
maracas[key] = key
return maracas
class AddForm(formhelper.AddForm):
form_fields = form.Fields(IRtMaracasPortlet)
label = _(u"Add Maracas Portlet")
description = _(u"This portlet renders an audio tag to listen maracas")
def create(self, data):
return Assignment(**data)
class EditForm(formhelper.EditForm):
form_fields = form.Fields(IRtMaracasPortlet)
label = _(u"Edit Maracas Portlet")
description = _(u"This portlet renders an audio tag to listen maracas") | /rt.maracas-0.9.0.zip/rt.maracas-0.9.0/rt/maracas/portlets/base.py | 0.651022 | 0.261214 | base.py | pypi |
from zope.component import adapts, getMultiAdapter
from zope.interface import implements
from zope.publisher.interfaces.browser import IBrowserView
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from Products.Archetypes.interfaces.base import IBaseFolder, IBaseObject
from Products.ATContentTypes.interfaces import IATImage
from Products.ATContentTypes.interfaces.topic import IATTopic
from rt.simpleslider.interfaces import ISliderSource, ISliderBrain
from rt.simpleslider import SIZE
class GenericSliderSource(object):
implements(ISliderSource)
adapts(IBrowserView, IBaseObject, IDefaultBrowserLayer)
def __init__(self, view, context, request):
self.context = context
self.request = request
self.view = view
@property
def caption_template(self):
return """<p class="bjqs-caption"><a href="%(url)s">%(caption)s</a></p>"""
def items(self):
yield self.context
def getCaption(self):
return self.context.title_or_id()
def getImage(self):
return ''
def getURL(self):
return '#'
def getSliderImages(self):
for item in self.items():
slide = getMultiAdapter((self.view, item, self.request),
ISliderSource)
img = slide.getImage()
caption = {'caption': slide.getCaption(),
'url': slide.getURL()}
yield {'image':img,
'caption': slide.caption_template % caption}
class FolderishSliderSource(GenericSliderSource):
implements(ISliderSource)
adapts(IBrowserView, IBaseFolder, IDefaultBrowserLayer)
def items(self):
return self.context.objectValues()
class TopicSliderSource(GenericSliderSource):
implements(ISliderSource)
adapts(IBrowserView, IATTopic, IDefaultBrowserLayer)
def items(self):
for item in self.context.queryCatalog():
brain = BrainWrapper(item, self.context)
if brain.getImage():
yield brain
class ImageSliderSource(GenericSliderSource):
implements(ISliderSource)
adapts(IBrowserView, IATImage, IDefaultBrowserLayer)
def getImage(self):
caption = self.getCaption()
return self.context.tag(title=caption, scale=SIZE)
@property
def caption_template(self):
return """<p class="bjqs-caption">%(caption)s</p>"""
class BrainWrapper(object):
implements(ISliderBrain)
def __init__(self, brain, context):
self.brain = brain
self.context = context
@property
def caption_template(self):
if self.brain.portal_type == 'Image':
return """<p class="bjqs-caption">%(caption)s</p>"""
else:
return """<p class="bjqs-caption"><a href="%(url)s">%(caption)s</a></p>"""
def getCaption(self):
return self.brain.Title
def getImage(self):
cl = getattr(self.brain, 'hasContentLeadImage', False)
if cl:
return '<img src="%s/leadImage_%s" title="%s"/>' % \
(self.brain.getURL(), SIZE, self.getCaption())
elif self.brain.portal_type == 'Image':
return '<img src="%s/image_%s" title="%s"/>' % \
(self.brain.getURL(), SIZE, self.getCaption())
elif self.brain.portal_type == 'Link':
return '<img src="%s/image_%s" title="%s"/>' % \
(self.brain.getURL(), SIZE, self.getCaption())
class BrainSliderSource(GenericSliderSource):
implements(ISliderSource)
adapts(IBrowserView, ISliderBrain, IDefaultBrowserLayer)
def __init__(self, view, context, request):
self.context = context.context
self.wrapper = context
self.brain = context.brain
self.request = request
self.view = view
def getCaption(self):
return self.wrapper.getCaption()
def getImage(self):
return self.wrapper.getImage()
def getURL(self):
return self.brain.getURL()
@property
def caption_template(self):
return self.wrapper.caption_template | /rt.simpleslider-1.3.4.tar.gz/rt.simpleslider-1.3.4/rt/simpleslider/browser/slidersource.py | 0.645232 | 0.187374 | slidersource.py | pypi |
[](https://discord.gg/qUtxnK2NMf)
# Robotic Transformer 2 (RT-2): The Vision-Language-Action Model

<div align="center">
[](https://github.com/kyegomez/RT-2/issues)
[](https://github.com/kyegomez/RT-2/network)
[](https://github.com/kyegomez/RT-2/stargazers)
[](https://github.com/kyegomez/RT-2/blob/master/LICENSE)
[](https://twitter.com/intent/tweet?text=Excited%20to%20introduce%20RT-2,%20the%20all-new%20robotics%20model%20with%20the%20potential%20to%20revolutionize%20automation.%20Join%20us%20on%20this%20journey%20towards%20a%20smarter%20future.%20%23RT1%20%23Robotics&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2)
[](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2)[](https://www.linkedin.com/shareArticle?mini=true&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2&title=Introducing%20RT-2%2C%20the%20All-New%20Robotics%20Model&summary=RT-2%20is%20the%20next-generation%20robotics%20model%20that%20promises%20to%20transform%20industries%20with%20its%20intelligence%20and%20efficiency.%20Join%20us%20to%20be%20a%20part%20of%20this%20revolutionary%20journey%20%23RT1%20%23Robotics&source=)

[](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2&title=Exciting%20Times%20Ahead%20with%20RT-2%2C%20the%20All-New%20Robotics%20Model%20%23RT1%20%23Robotics)
[](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2&t=Exciting%20Times%20Ahead%20with%20RT-2%2C%20the%20All-New%20Robotics%20Model%20%23RT1%20%23Robotics)
[](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=RT-2%2C%20the%20Revolutionary%20Robotics%20Model%20that%20will%20Change%20the%20Way%20We%20Work%20%23RT1%20%23Robotics)
[](https://api.whatsapp.com/send?text=I%20just%20discovered%20RT-2,%20the%20all-new%20robotics%20model%20that%20promises%20to%20revolutionize%20automation.%20Join%20me%20on%20this%20exciting%20journey%20towards%20a%20smarter%20future.%20%23RT1%20%23Robotics%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2FRT-2)
</div>
---
Robotic Transformer 2 (RT-2) leverages both web and robotics data to generate actionable instructions for robotic control.
[CLICK HERE FOR THE PAPER](https://robotics-transformer2.github.io/assets/rt2.pdf)
## Installation
RT-2 can be easily installed using pip:
```bash
pip install rt2
```
Additionally, you can manually install the dependencies:
```bash
pip install -r requirements.txt
```
# Usage
The `RT2` class is a PyTorch module that integrates the PALM-E model into the RT-2 class. Here are some examples of how to use it:
#### Initialization
First, you need to initialize the `RT2` class. You can do this by providing the necessary parameters to the constructor:
```python
import torch
from rt2.model import RT2
rt2 = RT2()
video = torch.randn(2, 3, 6, 224, 224)
instructions = [
"bring me an apple on that tree"
]
train_logits = rt2(video, instructions)
rt2.eval()
eval_logits = rt2(video, instructions, cond_scale=2)
```
#### Forward Pass
After initializing the RT2 class, you can perform a forward pass by calling the forward method and providing a video and optional texts:
```python
video = torch.rand((1, 3, 224, 224))
texts = ["this is a text"]
output = rt2(video, texts)
```
* The forward method returns the logits for the given video and texts.
#### Changing Parameters
You can also change the parameters of the RT2 class after initialization. For example, you can change the number of actions and action bins:
```python
rt2.num_actions = 5
rt2.action_bins = 128
```
* After changing the parameters, you can perform a forward pass as before:
```python
output = rt2(video, texts)
```
#### Saving and Loading The Model
```python
torch.save(rt2.state_dict(), 'rt3_model.pth')
```
* You can then load the model with `torch.load``
```python
from rt2.model import RT2
model = RT2()
model.load_state_dict(torch.load('rt_model.pth'))
```
#### Eval the Model
* Evaluate RT2 by setting it to eval mode and then performing a forward pass
```python
model.eval()
with torch.no_grad():
video = torch.randn((1, 3, 10, 224))
texts = ["this is atest"]
output = model(video, texts)
```
## Benefits
RT-2 stands at the intersection of vision, language, and action, delivering unmatched capabilities and significant benefits for the world of robotics.
- Leveraging web-scale datasets and firsthand robotic data, RT-2 provides exceptional performance in understanding and translating visual and semantic cues into robotic control actions.
- RT-2's architecture is based on well-established models, offering a high chance of success in diverse applications.
- With clear installation instructions and well-documented examples, you can integrate RT-2 into your systems quickly.
- RT-2 simplifies the complexities of multi-domaster understanding, reducing the burden on your data processing and action prediction pipeline.
## Model Architecture
RT-2 integrates a high-capacity Vision-Language model (VLM), initially pre-trained on web-scale data, with robotics data from RT-2. The VLM uses images as input to generate a sequence of tokens representing natural language text. To adapt this for robotic control, RT-2 outputs actions represented as tokens in the model’s output.
RT-2 is fine-tuned using both web and robotics data. The resultant model interprets robot camera images and predicts direct actions for the robot to execute. In essence, it converts visual and language patterns into action-oriented instructions, a remarkable feat in the field of robotic control.
# Datasets
[Head over to see some datasets they used in the paper](docs/DATASETS.md)
# Appreciation
* Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski,
* Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, Pete Florence, Chuyuan Fu,
* Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Kehang Han, Karol Hausman, Alexander Herzog,
* Jasmine Hsu, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, Dmitry Kalashnikov, Yuheng Kuang,
* Isabel Leal, Lisa Lee, Tsang-Wei Edward Lee, Sergey Levine, Yao Lu, Henryk Michalewski, Igor Mordatch,
* Karl Pertsch, Kanishka Rao, Krista Reymann, Michael Ryoo, Grecia Salazar, Pannag Sanketi,
* Pierre Sermanet, Jaspiar Singh, Anikait Singh, Radu Soricut, Huong Tran, Vincent Vanhoucke, Quan Vuong,
* Ayzaan Wahid, Stefan Welker, Paul Wohlhart, Jialin Wu, Fei Xia, Ted Xiao, Peng Xu, Sichun Xu, Tianhe Yu,
* and Brianna Zitkovich
for writing this amazing paper and advancing Humanity
* LucidRains for providing the base repositories for [PALM](https://github.com/lucidrains/PaLM-rlhf-pytorch) and [RT-1](https://github.com/kyegomez/RT-2)
* Any you yes the Human looking at this right now, I appreciate you and love you.
## Commercial Use Cases
The unique capabilities of RT-2 open up numerous commercial applications:
- **Automated Factories**: RT-2 can significantly enhance automation in factories by understanding and responding to complex visual and language cues.
- **Healthcare**: In robotic surgeries or patient care, RT-2 can assist in understanding and performing tasks based on both visual and verbal instructions.
- **Smart Homes**: Integration of RT-2 in smart home systems can lead to improved automation, understanding homeowner instructions in a much more nuanced manner.
## Examples and Documentation
Detailed examples and comprehensive documentation for using RT-2 can be found in the [examples](https://github.com/kyegomez/RT-2/tree/master/examples) directory and the [documentation](https://github.com/kyegomez/RT-2/tree/master/docs) directory, respectively.
## Contributing
Contributions to RT-2 are always welcome! Feel free to open an issue or pull request on the GitHub repository.
## License
RT-2 is provided under the MIT License. See the LICENSE file for details.
## Contact
For any queries or issues, kindly open a GitHub issue or get in touch with [kyegomez](https://github.com/kyegomez).
## Citation
```
@inproceedings{RT-2,2023,
title={},
author={Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Xi Chen, Krzysztof Choromanski,
Tianli Ding, Danny Driess, Avinava Dubey, Chelsea Finn, Pete Florence, Chuyuan Fu,
Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Kehang Han, Karol Hausman, Alexander Herzog,
Jasmine Hsu, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, Dmitry Kalashnikov, Yuheng Kuang,
Isabel Leal, Lisa Lee, Tsang-Wei Edward Lee, Sergey Levine, Yao Lu, Henryk Michalewski, Igor Mordatch,
Karl Pertsch, Kanishka Rao, Krista Reymann, Michael Ryoo, Grecia Salazar, Pannag Sanketi,
Pierre Sermanet, Jaspiar Singh, Anikait Singh, Radu Soricut, Huong Tran, Vincent Vanhoucke, Quan Vuong,
Ayzaan Wahid, Stefan Welker, Paul Wohlhart, Jialin Wu, Fei Xia, Ted Xiao, Peng Xu, Sichun Xu, Tianhe Yu,
and Brianna Zitkovich},
year={2024}
}
``` | /rt2-0.0.6.tar.gz/rt2-0.0.6/README.md | 0.499023 | 0.866189 | README.md | pypi |
import numpy as np
import sympy as sp
from pymoo.core.problem import Problem
from pymoo.optimize import minimize
from pymoo.termination.default import MaximumGenerationTermination
from rtb_toolbox.forward_kinematics import ForwardKinematic
from rtb_toolbox.frame import translation_matrix, zyz
from rtb_toolbox.utils import matrix_log6, inverse_transformation, se3_to_vec
from scipy.optimize import minimize as minimize_scp
class InverseKinematicProblem(Problem):
def __init__(
self,
desired_pose=None,
fk: ForwardKinematic = None,
):
lb = [fk.links[i].limits[0] for i in range(fk.len_links)]
ub = [fk.links[i].limits[1] for i in range(fk.len_links)]
super().__init__(n_var=fk.len_links, n_obj=1, n_constr=0, xl=lb, xu=ub)
self.desired_pose = desired_pose
self.fk = fk
def _evaluate(self, X, out, *args, **kwargs):
iters = X.shape[0]
F = np.zeros((iters, 1))
fk = self.fk
desired_pose = self.desired_pose
for i in range(iters):
Q = X[i, :]
htm = fk.compute_ee_transformation_matrix(Q)
i_htm = inverse_transformation(htm)
T_bd = i_htm @ desired_pose
log_tbd = matrix_log6(T_bd)
s = se3_to_vec(log_tbd)
n_s = np.linalg.norm(s)
F[i] = n_s
out["F"] = F
def evolutive_ik(
desired_transformation=None,
fk: ForwardKinematic = None,
initial_guess=None,
max_iterations=2048,
verbose=False,
algorithm=None,
):
if initial_guess is None:
initial_guess = np.random.rand(fk.len_links)
desired_rotation = zyz(desired_transformation[3], desired_transformation[4],
desired_transformation[5])
desired_pose = sp.matrix2numpy(translation_matrix(desired_transformation[0], desired_transformation[1],
desired_transformation[2]) @ desired_rotation, dtype=np.float64)
termination = MaximumGenerationTermination(
n_max_gen=max_iterations
)
problem = InverseKinematicProblem(
desired_pose=desired_pose,
fk=fk,
)
if algorithm is None:
from pymoo.algorithms.soo.nonconvex.cmaes import CMAES
algorithm = CMAES(
sigma=.5,
tolfun=1e-8,
tolx=1e-8,
)
res = minimize(
problem,
algorithm,
termination,
verbose=verbose,
save_history=False,
)
f = res.F.min()
optimal_theta = res.X
return optimal_theta, f
def position_ik(
desired_position=None,
fk: ForwardKinematic = None,
initial_guess=None,
f_tolerance=1e-7,
max_iterations=1500,
verbose=False,
):
desired_position = np.array([
[desired_position[0]],
[desired_position[1]],
[desired_position[2]]
])
if initial_guess is None:
initial_guess = np.random.rand(6)
theta_i = initial_guess.copy()
def cost(thetas):
P_i = fk.compute_ee_position(thetas)
G = P_i - desired_position
F = .5 * G.T @ G
return F
res = minimize_scp(
cost,
theta_i,
options={
'maxiter': max_iterations,
'disp': verbose,
'gtol': f_tolerance,
},
method='BFGS',
)
optimal_theta = res.x
f = res.fun
return optimal_theta, f
def full_ik(
desired_transformation=None,
fk: ForwardKinematic = None,
initial_guess=None,
epsilon=1e-5,
max_iterations=1000,
verbose=False,):
# transformation_data = [x, y, z, rx, ry, rz]
# x, y, z: position of the end effector
# rx, ry, rz: orientation of the end effector
# returns: the joint angles
if initial_guess is None:
initial_guess = initial_guess = np.random.rand(6)
desired_rotation = zyz(desired_transformation[3], desired_transformation[4],
desired_transformation[5])
desired_pose = sp.matrix2numpy(translation_matrix(desired_transformation[0], desired_transformation[1],
desired_transformation[2]) @ desired_rotation, dtype=np.float64)
theta_i = initial_guess.copy()
def cost(thetas):
htm = fk.compute_ee_transformation_matrix(thetas)
i_htm = inverse_transformation(htm)
T_bd = i_htm @ desired_pose
log_tbd = matrix_log6(T_bd)
s = se3_to_vec(log_tbd)
return np.linalg.norm(s)
res = minimize_scp(
cost,
theta_i,
options={
'maxiter': max_iterations,
'disp': verbose,
'gtol': epsilon,
},
method='BFGS',
)
optimal_theta = res.x
f = res.fun
return optimal_theta, f | /rtb_toolbox-0.1.62-py3-none-any.whl/rtb_toolbox/inverse_kinematics/__init__.py | 0.817866 | 0.420421 | __init__.py | pypi |
import sympy as sp
def x_rotation_matrix(yaw):
"""
Rotation matrix around the x-axis
"""
return sp.Matrix([[1, 0, 0, 0],
[0, sp.cos(yaw), -sp.sin(yaw), 0],
[0, sp.sin(yaw), sp.cos(yaw), 0],
[0, 0, 0, 1]])
def y_rotation_matrix(pitch):
"""
Rotation matrix around the y-axis
"""
return sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch), 0],
[0, 1, 0, 0],
[-sp.sin(pitch), 0, sp.cos(pitch), 0],
[0, 0, 0, 1]])
def z_rotation_matrix(roll):
"""
Rotation matrix around the z-axis
"""
return sp.Matrix([[sp.cos(roll), -sp.sin(roll), 0, 0],
[sp.sin(roll), sp.cos(roll), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def xyz_rotation_matrix(yaw, pitch, roll):
"""
Rotation matrix around the x, y and z axis
"""
return x_rotation_matrix(yaw) @ y_rotation_matrix(pitch) @ z_rotation_matrix(roll)
def arbitrary_vector_rotation_matrix(theta, v):
"""
Rotation matrix around an arbitrary vector
"""
return sp.eye(4) + sp.sin(theta) * v + (1 - sp.cos(theta)) * v ** 2
def zyz(phi, theta, psi):
return z_rotation_matrix(phi) @ y_rotation_matrix(theta) @ z_rotation_matrix(psi)
def transformation_matrix(dx, dy, dz, roll, pitch, yaw):
return translation_matrix(dx, dy, dz) @ xyz_rotation_matrix(roll, pitch, yaw)
def translation_matrix(dx, dy, dz):
return sp.Matrix([
[1, 0, 0, dx],
[0, 1, 0, dy],
[0, 0, 1, dz],
[0, 0, 0, 1]])
class Frame:
def __init__(self, x, y, z, yaw=0, pitch=0, roll=0):
self.position = translation_matrix(x, y, z)
self.orientation = xyz_rotation_matrix(yaw, pitch, roll)
def translate(self, dx, dy, dz):
self.position = translation_matrix(dx, dy, dz) @ self.position
return self.position
def rotate(self, yaw, pitch, roll):
self.orientation = xyz_rotation_matrix(yaw, pitch, roll) @ self.orientation
def rotate_around_arbitrary_vector(self, theta, v):
self.orientation = arbitrary_vector_rotation_matrix(theta, v) @ self.orientation
def get_x_component(self):
return self.position[0, 3]
def get_y_component(self):
return self.position[1, 3]
def get_z_component(self):
return self.position[2, 3]
def rotation_matrix(self):
return self.orientation
def rotation_to(self, other):
yaw = sp.atan2(other.orientation[2, 1], other.orientation[2, 2]) - sp.atan2(self.orientation[2, 1],
self.orientation[2, 2])
pitch = sp.atan2(other.orientation[2, 0], other.orientation[2, 2]) - sp.atan2(self.orientation[2, 0],
self.orientation[2, 2])
roll = sp.atan2(other.orientation[1, 0], other.orientation[0, 0]) - sp.atan2(self.orientation[1, 0],
self.orientation[0, 0])
return sp.Matrix([yaw, pitch, roll]) | /rtb_toolbox-0.1.62-py3-none-any.whl/rtb_toolbox/frame/__init__.py | 0.853425 | 0.845113 | __init__.py | pypi |
import numpy as np
import sympy as sp
from rtb_toolbox.frame import z_rotation_matrix, translation_matrix, x_rotation_matrix
def near_zero(s, epsilon=1e-6):
"""
Returns True if the value is small enough to be considered zero.
:param s: The value to check.
:param epsilon: The threshold.
"""
return np.abs(s) < epsilon
def vec_to_so3(v):
"""
Converts a 3-vector to a so(3) representation
:param v: A 3-vector
:return: The skew symmetric representation of v
"""
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
def so3_to_vec(so3):
"""
Converts a so(3) representation to a 3-vector
:param so3: A 3x3 skew-symmetric matrix
:return: The 3-vector represented by so3
"""
return np.array([so3[2, 1], so3[0, 2], so3[1, 0]])
def se3_to_vec(se3):
"""
Converts a 4x4 matrix in se3 into a 6-vector representing a spatial velocity
:param se3: A 4x4 matrix in se3
:return: A 6-vector representing a spatial velocity
"""
return np.r_[[se3[2][1], se3[0][2], se3[1][0]], [se3[0][3], se3[1][3], se3[2][3]]]
def transform_to_rotation_and_translation(T):
"""
Converts a homogeneous transformation matrix into a rotation matrix and a position vector
:param T: A 4x4 homogeneous transformation matrix
:return: A 3x3 rotation matrix and a 3-vector
"""
return T[:3, :3], T[:3, 3]
def rotation_and_translation_to_transform(R, p):
"""
Converts a rotation matrix and a position vector into a homogeneous transformation
matrix
:param R: A 3x3 rotation matrix
:param p: A 3-vector
:return: A 4x4 homogeneous transformation matrix
"""
return np.r_[np.c_[R, p], [[0, 0, 0, 1]]]
def inverse_rotation(R):
"""
Returns the inverse of a rotation matrix.
:param R: The rotation matrix.
:return: The inverse of rot.
"""
return R.T
def inverse_transformation(T):
"""
Computes the inverse of a homogeneous transformation matrix
:param T: A 4x4 homogeneous transformation matrix
:return: The inverse of T
"""
R, p = transform_to_rotation_and_translation(T)
R_inv = inverse_rotation(R)
return rotation_and_translation_to_transform(R_inv, -(R_inv @ p))
def matrix_log3(R):
"""
Computes the matrix log of a rotation matrix
:param R: A 3x3 rotation matrix
:return: The matrix log of rot
"""
tr_r = (np.trace(R) - 1) / 2.0
if tr_r >= 1:
return 0, np.zeros((3, 3))
elif tr_r <= -1:
if not near_zero(1 + R[2][2]):
s = (1.0 / np.sqrt(2 * (1 + R[2][2]))) * np.array([R[0][2], R[1][2], 1 + R[2][2]])
elif not near_zero(1 + R[1][1]):
s = (1.0 / np.sqrt(2 * (1 + R[1][1]))) * np.array([R[0][1], 1 + R[1][1], R[2][1]])
else:
s = (1.0 / np.sqrt(2 * (1 + R[0][0]))) * np.array([1 + R[0][0], R[1][0], R[2][0]])
return np.pi, vec_to_so3(np.pi * s)
else:
theta = np.arccos(tr_r)
return theta, theta / 2.0 / np.sin(theta) * (R - np.array(R).T)
def matrix_log6(T):
"""
Computes the matrix log of a homogeneous transformation matrix
:param T: A matrix in SE3
:return: The matrix log of T
"""
R, p = transform_to_rotation_and_translation(T)
_, l3 = matrix_log3(R)
if np.array_equal(l3, np.zeros((3, 3))):
return np.r_[np.c_[np.zeros((3, 3)), p], [[0, 0, 0, 0]]]
else:
theta = np.arccos((np.trace(R) - 1) / 2.0)
return np.r_[np.c_[l3, np.dot(
np.eye(3) - l3 / 2.0 + (1.0 / theta - 1.0 / np.tan(theta / 2.0) / 2) * np.dot(l3, l3) / theta,
p)], [[0, 0, 0, 0]]]
def compute_link_transformation(dhp, offset=0, link_type='R'):
theta = dhp[0]
d = dhp[1]
a = dhp[2]
alpha = dhp[3]
if link_type == 'R':
theta += offset
elif link_type == 'P':
d += offset
rz = z_rotation_matrix(theta)
tz = translation_matrix(0, 0, d)
tx = translation_matrix(a, 0, 0)
rx = x_rotation_matrix(alpha)
return rz @ tz @ tx @ rx
def compute_homogeneous_transformation(links, start, end):
if end == 0:
return sp.eye(4)
transformation_matrix = links[start].get_transformation_matrix()
for i in range(start + 1, end):
transformation_matrix_i = links[i].get_transformation_matrix()
transformation_matrix = transformation_matrix @ transformation_matrix_i
return transformation_matrix | /rtb_toolbox-0.1.62-py3-none-any.whl/rtb_toolbox/utils/__init__.py | 0.909246 | 0.893402 | __init__.py | pypi |
import sympy as sp
from rtb_toolbox.symbols import g, t
class ForwardDynamics:
def __init__(self, forward_kinematics):
self.jacobian = forward_kinematics.jacobian
self.links = forward_kinematics.links_zero_i
self.q = sp.Matrix([link.generalized_coordinate for link in self.links])
self.dq_dt = self.q.diff(t)
self.d2q_dt = self.dq_dt.diff(t)
self.len_q = len(self.q)
self.w = self.jacobian[:3, :]
D, C, G, taus = self.get_system_equations_of_motion()
self.D = D
self.C = C
self.G = G
self.taus = taus
def get_system_equations_of_motion(self):
D, P = self.get_inertia_matrix_and_potential_energy()
C = sp.zeros(self.len_q, self.len_q)
G = P.diff(self.q)
taus = [sp.Symbol(f'tau_{i + 1}') for i in range(self.len_q)]
taus = sp.Matrix(taus)
for k in range(len(self.links)):
qk = self.q[k]
for i in range(len(self.links)):
qi = self.q[i]
dki = D[k, i]
ckj = 0
for j in range(len(self.links)):
qj = self.q[j]
dkj = D[k, j]
dij = D[i, j]
cijk = sp.Rational(1, 2) * (dkj.diff(qi) + dki.diff(qj) - dij.diff(qk))
ckj += cijk*self.dq_dt[j]
C[k, i] = ckj
taus = D@self.d2q_dt + C@self.dq_dt + G
return D, C, G, taus
def get_inertia_matrix_and_potential_energy(self):
potential_energy = sp.zeros(1, 1)
D = sp.zeros(self.len_q, self.len_q)
G = sp.Matrix([0, -g, 0])
for i in range(len(self.links)):
m = self.links[i].mass
I = self.links[i].inertia_tensor
Jvi = sp.zeros(3, len(self.q))
Jwi = sp.zeros(3, len(self.q))
r = self.links[i].transformation_matrix[:3, 3]
dr_dq = [r.diff(q) for q in self.q]
for j in range(self.len_q):
Jvi[:, j] = dr_dq[j]
Jwi[:, :i + 1] = self.w[:, :i + 1]
D += (m * Jvi.T @ Jvi) + (Jwi.T @ I @ Jwi)
potential_energy += m * G.T @ r
return D, potential_energy[0] | /rtb_toolbox-0.1.62-py3-none-any.whl/rtb_toolbox/forward_dynamics/__init__.py | 0.445168 | 0.370595 | __init__.py | pypi |
import numpy as np
import sympy as sp
from rtb_toolbox.link import Link
from rtb_toolbox.utils import compute_homogeneous_transformation
class ForwardKinematic:
def __init__(self,
links,
):
self.links = links
self.len_links = len(self.links)
self.generalized_coordinates = [self.links[i].generalized_coordinate for i in range(self.len_links)]
self.links_zero_i = np.empty(self.len_links, dtype=Link)
for i in range(1, self.len_links + 1):
m = sp.Symbol(f'm_{i}')
transformation = self.get_transformation(0, i)
link_im = self.links[i - 1]
I_tensor = link_im.inertia_tensor
if I_tensor == 0:
I = sp.Matrix([
[sp.Symbol(f'I_{i}(xx)'), sp.Symbol(f'I_{i}(xy)'), sp.Symbol(f'I_{i}(xz)')],
[sp.Symbol(f'I_{i}(xy)'), sp.Symbol(f'I_{i}(yy)'), sp.Symbol(f'I_{i}(yz)')],
[sp.Symbol(f'I_{i}(xz)'), sp.Symbol(f'I_{i}(yz)'), sp.Symbol(f'I_{i}(zz)')],
])
R = transformation[:3, :3]
I_tensor = R@I@R.T
if link_im.link_type == 'P':
I_tensor = np.zeros((3, 3))
self.links_zero_i[i - 1] = Link(
dhp = link_im.dhp,
mass = m,
transformation_matrix = transformation,
inertia_tensor = I_tensor,
link_type = link_im.link_type,
offset = link_im.offset,
)
self.ee_transformation_matrix = self.get_transformation(0, self.len_links)
self.jacobian = self.get_jacobian()
self.lambdify_jacobian = sp.lambdify(
[self.generalized_coordinates],
self.jacobian,
modules=['numpy'],
)
self.lambdify_ee_transformation_matrix = sp.lambdify(
[self.generalized_coordinates],
self.ee_transformation_matrix,
modules=['numpy'],
)
self.lambdify_ee_position = sp.lambdify(
[self.generalized_coordinates],
self.ee_transformation_matrix[:3, 3],
modules=['numpy'],
)
self.lambdify_ee_orientation = sp.lambdify(
[self.generalized_coordinates],
self.ee_transformation_matrix[:3, :3],
modules=['numpy'],
)
def get_transformation(self, start, end):
tf = compute_homogeneous_transformation(self.links, start, end)
return tf
def get_ee_transformation_matrix(self):
return self.ee_transformation_matrix
def compute_jacobian(self, q):
return self.lambdify_jacobian(q)
def compute_ee_transformation_matrix(self, q):
return self.lambdify_ee_transformation_matrix(q)
def compute_ee_position(self, q):
return self.lambdify_ee_position(q)
def compute_ee_orientation(self, q):
return self.lambdify_ee_orientation(q)
def get_spacial_jacobian(self):
return self.jacobian[:3, :]
def get_rotational_jacobian(self):
return self.jacobian[3:, :]
def get_jacobian(self):
htm = self.ee_transformation_matrix
j = sp.zeros(6, self.len_links)
P = htm[:3, 3]
p_i = sp.zeros(3, 1)
z_i = sp.Matrix([0, 0, 1])
if self.links[0].link_type == 'P':
z_i = sp.zeros(3, 1)
for i in range(self.len_links):
p_diff = (P - p_i)
J_vi = z_i
J_wi = sp.zeros(3, 1)
if self.links[i].link_type == 'R':
J_vi = z_i.cross(p_diff)
J_wi = z_i
J = sp.Matrix([J_wi, J_vi])
j[:, i] = J
transformation = self.links_zero_i[i].transformation_matrix
p_i = transformation[:3, 3]
z_i = transformation[:3, 2]
return j | /rtb_toolbox-0.1.62-py3-none-any.whl/rtb_toolbox/forward_kinematics/__init__.py | 0.652241 | 0.267988 | __init__.py | pypi |
# pylint: disable=too-many-arguments
import dataclasses
import warnings
from datetime import date, timedelta
from json import JSONDecodeError
from types import TracebackType
from typing import (
Any,
AsyncIterable,
Dict,
Generator,
Iterable,
List,
Optional,
Type,
Union,
)
import httpx
from . import __version__ as sdk_version
from . import schema
from .exceptions import (
ApiException,
ApiRateLimitException,
ApiRequestException,
ApiVersionMismatchException,
ErrorDetails,
)
API_BASE_URL = "https://api.panel.rtbhouse.com"
API_VERSION = "v5"
DEFAULT_TIMEOUT = timedelta(seconds=60.0)
MAX_CURSOR_ROWS = 10000
@dataclasses.dataclass
class BasicAuth:
username: str
password: str
@dataclasses.dataclass
class BasicTokenAuth:
token: str
class Client:
"""
A standard synchronous API client.
The simplest way is to use it like:
```
cli = Client(...)
info = cli.get_user_info()
adv = cli.get_advertiser(hash)
cli.close()
```
It's also possible to use it as context manager:
```
with Client(...) as cli:
info = cli.get_user_info()
adv = cli.get_advertiser(hash)
```
"""
def __init__(
self,
auth: Union[BasicAuth, BasicTokenAuth],
timeout: timedelta = DEFAULT_TIMEOUT,
):
self._httpx_client = httpx.Client(
base_url=build_base_url(),
auth=_choose_auth_backend(auth),
headers=_build_headers(),
timeout=timeout.total_seconds(),
)
def close(self) -> None:
self._httpx_client.close()
def __enter__(self) -> "Client":
self._httpx_client.__enter__()
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_value: BaseException,
traceback: TracebackType,
) -> None:
self._httpx_client.__exit__(exc_type, exc_value, traceback)
def _get(self, path: str, params: Optional[Dict[str, Any]] = None) -> Any:
response = self._httpx_client.get(path, params=params)
_validate_response(response)
try:
resp_json = response.json()
return resp_json["data"]
except (ValueError, KeyError) as exc:
raise ApiException("Invalid response format") from exc
def _get_dict(self, path: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
data = self._get(path, params)
if not isinstance(data, dict):
raise ValueError("Result is not a dict")
return data
def _get_list_of_dicts(self, path: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
data = self._get(path, params)
if not isinstance(data, list) or not all(isinstance(item, dict) for item in data):
raise ValueError("Result is not a list of dicts")
return data
def _get_list_of_dicts_from_cursor(self, path: str, params: Dict[str, Any]) -> Iterable[Dict[str, Any]]:
request_params = {
"limit": MAX_CURSOR_ROWS,
}
request_params.update(params or {})
while True:
resp_data = self._get_dict(path, params=request_params)
yield from resp_data["rows"]
next_cursor = resp_data["nextCursor"]
if next_cursor is None:
break
request_params["nextCursor"] = next_cursor
def get_user_info(self) -> schema.UserInfo:
data = self._get_dict("/user/info")
return schema.UserInfo(**data)
def get_advertisers(self) -> List[schema.Advertiser]:
data = self._get_list_of_dicts("/advertisers")
return [schema.Advertiser(**adv) for adv in data]
def get_advertiser(self, adv_hash: str) -> schema.Advertiser:
data = self._get_dict(f"/advertisers/{adv_hash}")
return schema.Advertiser(**data)
def get_invoicing_data(self, adv_hash: str) -> schema.InvoiceData:
data = self._get_dict(f"/advertisers/{adv_hash}/client")
return schema.InvoiceData(**data["invoicing"])
def get_offer_categories(self, adv_hash: str) -> List[schema.Category]:
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/offer-categories")
return [schema.Category(**cat) for cat in data]
def get_offers(self, adv_hash: str) -> List[schema.Offer]:
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/offers")
return [schema.Offer(**offer) for offer in data]
def get_advertiser_campaigns(self, adv_hash: str) -> List[schema.Campaign]:
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/campaigns")
return [schema.Campaign(**camp) for camp in data]
def get_billing(
self,
adv_hash: str,
day_from: date,
day_to: date,
) -> schema.Billing:
data = self._get_dict(f"/advertisers/{adv_hash}/billing", {"dayFrom": day_from, "dayTo": day_to})
return schema.Billing(**data)
def get_rtb_creatives(
self,
adv_hash: str,
subcampaigns: Union[None, List[str], schema.SubcampaignsFilter] = None,
active_only: Optional[bool] = None,
) -> List[schema.Creative]:
params = _build_rtb_creatives_params(subcampaigns, active_only)
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/rtb-creatives", params=params)
return [schema.Creative(**cr) for cr in data]
def get_rtb_conversions(
self,
adv_hash: str,
day_from: date,
day_to: date,
convention_type: schema.CountConvention = schema.CountConvention.ATTRIBUTED_POST_CLICK,
) -> Iterable[schema.Conversion]:
rows = self._get_list_of_dicts_from_cursor(
f"/advertisers/{adv_hash}/conversions",
params={
"dayFrom": day_from,
"dayTo": day_to,
"countConvention": convention_type.value,
},
)
for conv in rows:
yield schema.Conversion(**conv)
def get_rtb_stats(
self,
adv_hash: str,
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
user_segments: Optional[List[schema.UserSegment]] = None,
device_types: Optional[List[schema.DeviceType]] = None,
) -> List[schema.Stats]:
params = _build_rtb_stats_params(
day_from, day_to, group_by, metrics, count_convention, subcampaigns, user_segments, device_types
)
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/rtb-stats", params)
return [schema.Stats(**st) for st in data]
def get_summary_stats(
self,
adv_hash: str,
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
) -> List[schema.Stats]:
params = _build_summary_stats_params(day_from, day_to, group_by, metrics, count_convention, subcampaigns)
data = self._get_list_of_dicts(f"/advertisers/{adv_hash}/summary-stats", params)
return [schema.Stats(**st) for st in data]
class AsyncClient:
"""
An asynchronous API client.
Usage is the same as with synchronous client with the only difference of `await` keyword.
```
cli = AsyncClient(...)
info = await cli.get_user_info()
await cli.close()
```
"""
def __init__(
self,
auth: Union[BasicAuth, BasicTokenAuth],
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
self._httpx_client = httpx.AsyncClient(
base_url=build_base_url(),
auth=_choose_auth_backend(auth),
headers=_build_headers(),
timeout=timeout.total_seconds(),
)
async def close(self) -> None:
await self._httpx_client.aclose()
async def __aenter__(self) -> "AsyncClient":
await self._httpx_client.__aenter__()
return self
async def __aexit__(
self,
exc_type: Type[BaseException],
exc_value: BaseException,
traceback: TracebackType,
) -> None:
await self._httpx_client.__aexit__(exc_type, exc_value, traceback)
async def _get(self, path: str, params: Optional[Dict[str, Any]] = None) -> Any:
response = await self._httpx_client.get(path, params=params)
_validate_response(response)
try:
resp_json = response.json()
return resp_json["data"]
except (ValueError, KeyError) as exc:
raise ApiException("Invalid response format") from exc
async def _get_dict(self, path: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
data = await self._get(path, params)
if not isinstance(data, dict):
raise ValueError("Result is not a dict")
return data
async def _get_list_of_dicts(self, path: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
data = await self._get(path, params)
if not isinstance(data, list) or not all(isinstance(item, dict) for item in data):
raise ValueError("Result is not of a list of dicts")
return data
async def _get_list_of_dicts_from_cursor(self, path: str, params: Dict[str, Any]) -> AsyncIterable[Dict[str, Any]]:
request_params = {
"limit": MAX_CURSOR_ROWS,
}
request_params.update(params or {})
while True:
resp_data = await self._get_dict(path, params=request_params)
for row in resp_data["rows"]:
yield row
next_cursor = resp_data["nextCursor"]
if next_cursor is None:
break
request_params["nextCursor"] = next_cursor
async def get_user_info(self) -> schema.UserInfo:
data = await self._get_dict("/user/info")
return schema.UserInfo(**data)
async def get_advertisers(self) -> List[schema.Advertiser]:
data = await self._get_list_of_dicts("/advertisers")
return [schema.Advertiser(**adv) for adv in data]
async def get_advertiser(self, adv_hash: str) -> schema.Advertiser:
data = await self._get_dict(f"/advertisers/{adv_hash}")
return schema.Advertiser(**data)
async def get_invoicing_data(self, adv_hash: str) -> schema.InvoiceData:
data = await self._get_dict(f"/advertisers/{adv_hash}/client")
return schema.InvoiceData(**data["invoicing"])
async def get_offer_categories(self, adv_hash: str) -> List[schema.Category]:
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/offer-categories")
return [schema.Category(**cat) for cat in data]
async def get_offers(self, adv_hash: str) -> List[schema.Offer]:
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/offers")
return [schema.Offer(**offer) for offer in data]
async def get_advertiser_campaigns(self, adv_hash: str) -> List[schema.Campaign]:
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/campaigns")
return [schema.Campaign(**camp) for camp in data]
async def get_billing(
self,
adv_hash: str,
day_from: date,
day_to: date,
) -> schema.Billing:
data = await self._get_dict(f"/advertisers/{adv_hash}/billing", {"dayFrom": day_from, "dayTo": day_to})
return schema.Billing(**data)
async def get_rtb_creatives(
self,
adv_hash: str,
subcampaigns: Union[None, List[str], schema.SubcampaignsFilter] = None,
active_only: Optional[bool] = None,
) -> List[schema.Creative]:
params = _build_rtb_creatives_params(subcampaigns, active_only)
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/rtb-creatives", params=params)
return [schema.Creative(**cr) for cr in data]
async def get_rtb_conversions(
self,
adv_hash: str,
day_from: date,
day_to: date,
convention_type: schema.CountConvention = schema.CountConvention.ATTRIBUTED_POST_CLICK,
) -> AsyncIterable[schema.Conversion]:
rows = self._get_list_of_dicts_from_cursor(
f"/advertisers/{adv_hash}/conversions",
params={
"dayFrom": day_from,
"dayTo": day_to,
"countConvention": convention_type.value,
},
)
async for conv in rows:
yield schema.Conversion(**conv)
async def get_rtb_stats(
self,
adv_hash: str,
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
user_segments: Optional[List[schema.UserSegment]] = None,
device_types: Optional[List[schema.DeviceType]] = None,
) -> List[schema.Stats]:
params = _build_rtb_stats_params(
day_from, day_to, group_by, metrics, count_convention, subcampaigns, user_segments, device_types
)
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/rtb-stats", params)
return [schema.Stats(**st) for st in data]
async def get_summary_stats(
self,
adv_hash: str,
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
) -> List[schema.Stats]:
params = _build_summary_stats_params(day_from, day_to, group_by, metrics, count_convention, subcampaigns)
data = await self._get_list_of_dicts(f"/advertisers/{adv_hash}/summary-stats", params)
return [schema.Stats(**st) for st in data]
class _HttpxBasicTokenAuth(httpx.Auth):
"""Basic token auth backend."""
def __init__(self, token: str):
self._token = token
def auth_flow(self, request: httpx.Request) -> Generator[httpx.Request, httpx.Response, None]:
request.headers["Authorization"] = f"Token {self._token}"
yield request
def build_base_url() -> str:
return f"{API_BASE_URL}/{API_VERSION}"
def _build_headers() -> Dict[str, str]:
return {
"user-agent": f"rtbhouse-python-sdk/{sdk_version}",
}
def _choose_auth_backend(auth: Union[BasicAuth, BasicTokenAuth]) -> httpx.Auth:
if isinstance(auth, BasicAuth):
return httpx.BasicAuth(auth.username, auth.password)
if isinstance(auth, BasicTokenAuth):
return _HttpxBasicTokenAuth(auth.token)
raise ValueError("Unknown auth method")
def _validate_response(response: httpx.Response) -> None:
try:
response_data = response.json()
except JSONDecodeError:
error_details = None
else:
error_details = ErrorDetails(
app_code=response_data.get("appCode"),
errors=response_data.get("errors"),
message=response_data.get("message"),
)
if response.status_code == 410:
newest_version = response.headers.get("X-Current-Api-Version")
raise ApiVersionMismatchException(
f"Unsupported api version ({API_VERSION}), use newest version ({newest_version}) "
f"by updating rtbhouse_sdk package."
)
if response.status_code == 429:
raise ApiRateLimitException(
"Resource usage limits reached",
details=error_details,
usage_header=response.headers.get("X-Resource-Usage"),
)
if response.is_error:
raise ApiRequestException(
error_details.message if error_details else "Unexpected error",
details=error_details,
)
current_version = response.headers.get("X-Current-Api-Version")
if current_version is not None and current_version != API_VERSION:
warnings.warn(
f"Used api version ({API_VERSION}) is outdated, use newest version ({current_version}) "
f"by updating rtbhouse_sdk package."
)
def _build_rtb_creatives_params(
subcampaigns: Union[None, List[str], schema.SubcampaignsFilter] = None,
active_only: Optional[bool] = None,
) -> Dict[str, Any]:
params: Dict[str, Any] = {}
if subcampaigns:
if isinstance(subcampaigns, schema.SubcampaignsFilter):
params["subcampaigns"] = subcampaigns.value
elif isinstance(subcampaigns, (list, tuple, set)):
params["subcampaigns"] = "-".join(str(sub) for sub in subcampaigns)
if active_only is not None:
params["activeOnly"] = active_only
return params
def _build_rtb_stats_params(
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
user_segments: Optional[List[schema.UserSegment]] = None,
device_types: Optional[List[schema.DeviceType]] = None,
) -> Dict[str, Any]:
params = {
"dayFrom": day_from,
"dayTo": day_to,
"groupBy": "-".join(gb.value for gb in group_by),
"metrics": "-".join(m.value for m in metrics),
}
if count_convention is not None:
params["countConvention"] = count_convention.value
if subcampaigns is not None:
params["subcampaigns"] = "-".join(str(sub) for sub in subcampaigns)
if user_segments is not None:
params["userSegments"] = "-".join(us.value for us in user_segments)
if device_types is not None:
params["deviceTypes"] = "-".join(dt.value for dt in device_types)
return params
def _build_summary_stats_params(
day_from: date,
day_to: date,
group_by: List[schema.StatsGroupBy],
metrics: List[schema.StatsMetric],
count_convention: Optional[schema.CountConvention] = None,
subcampaigns: Optional[List[str]] = None,
) -> Dict[str, Any]:
params = {
"dayFrom": day_from,
"dayTo": day_to,
"groupBy": "-".join(gb.value for gb in group_by),
"metrics": "-".join(m.value for m in metrics),
}
if count_convention is not None:
params["countConvention"] = count_convention.value
if subcampaigns is not None:
params["subcampaigns"] = "-".join(str(sub) for sub in subcampaigns)
return params | /rtbhouse_sdk-10.2.0.tar.gz/rtbhouse_sdk-10.2.0/rtbhouse_sdk/client.py | 0.795658 | 0.324262 | client.py | pypi |
import glob
import os
import shutil
import numpy as np
def copy_files_in_folder(path_to_file, new_folder):
"""Expects 'path_to_file' to be a path with filename, but
without extension. The function then copies all files with that
name to the new_folder."""
for output_file in glob.glob(path_to_file + ".*"):
output_file_name = os.path.basename(output_file)
shutil.copyfile(output_file, os.path.join(new_folder, output_file_name))
class ExportResultsEachPriorityMixin:
""" Include this mixin in your optimization problem class to
write the results for the optimization run for each priority. """
def priority_completed(self, priority):
super().priority_completed(priority)
self.write()
# Move all output files to a priority-specific folder
num_len = 3
subfolder_name = "priority_{:0{}}".format(priority, num_len)
if self.csv_ensemble_mode:
ensemble = np.genfromtxt(
os.path.join(self._input_folder, self.csv_ensemble_basename + ".csv"),
delimiter=",",
deletechars="",
dtype=None,
names=True,
encoding=None,
)
for ensemble_member in ensemble["name"]:
new_output_folder = os.path.join(
self._output_folder, ensemble_member, subfolder_name
)
os.makedirs(new_output_folder, exist_ok=True)
file_to_copy_stem = os.path.join(
self._output_folder, ensemble_member, self.timeseries_export_basename
)
copy_files_in_folder(file_to_copy_stem, new_output_folder)
else:
new_output_folder = os.path.join(self._output_folder, subfolder_name)
os.makedirs(new_output_folder, exist_ok=True)
file_to_copy_stem = os.path.join(self._output_folder, self.timeseries_export_basename)
copy_files_in_folder(file_to_copy_stem, new_output_folder) | /rtc_tools_diagnostics-0.2.0-py3-none-any.whl/rtctools_diagnostics/export_results.py | 0.402627 | 0.205217 | export_results.py | pypi |
from typing import Dict
from pymoca.backends.casadi.alias_relation import AliasRelation
from .base_component_type_mixin import BaseComponentTypeMixin
from .heat_network_common import NodeConnectionDirection
from .topology import Topology
class ModelicaComponentTypeMixin(BaseComponentTypeMixin):
def pre(self):
components = self.heat_network_components
nodes = components.get("node", [])
pipes = components["pipe"]
buffers = components.get("buffer", [])
# Figure out which pipes are connected to which nodes, which pipes
# are connected in series, and which pipes are connected to which buffers.
pipes_set = set(pipes)
parameters = [self.parameters(e) for e in range(self.ensemble_size)]
node_connections = {}
# Figure out if we are dealing with a Heat model, or a QTH model
try:
_ = self.variable(f"{pipes[0]}.HeatIn.Heat")
heat_network_model_type = "Heat"
except KeyError:
heat_network_model_type = "QTH"
for n in nodes:
n_connections = [ens_params[f"{n}.n"] for ens_params in parameters]
if len(set(n_connections)) > 1:
raise Exception(
"Nodes cannot have differing number of connections per ensemble member"
)
n_connections = n_connections[0]
# Note that we do this based on temperature, because discharge may
# be an alias of yet some other further away connected pipe.
node_connections[n] = connected_pipes = {}
for i in range(n_connections):
cur_port = f"{n}.{heat_network_model_type}Conn[{i + 1}]"
prop = "T" if heat_network_model_type == "QTH" else "Heat"
aliases = [
x
for x in self.alias_relation.aliases(f"{cur_port}.{prop}")
if not x.startswith(n) and x.endswith(f".{prop}")
]
if len(aliases) > 1:
raise Exception(f"More than one connection to {cur_port}")
elif len(aliases) == 0:
raise Exception(f"Found no connection to {cur_port}")
in_suffix = ".QTHIn.T" if heat_network_model_type == "QTH" else ".HeatIn.Heat"
out_suffix = ".QTHOut.T" if heat_network_model_type == "QTH" else ".HeatOut.Heat"
if aliases[0].endswith(out_suffix):
pipe_w_orientation = (
aliases[0][: -len(out_suffix)],
NodeConnectionDirection.IN,
)
else:
assert aliases[0].endswith(in_suffix)
pipe_w_orientation = (
aliases[0][: -len(in_suffix)],
NodeConnectionDirection.OUT,
)
assert pipe_w_orientation[0] in pipes_set
connected_pipes[i] = pipe_w_orientation
# Note that a pipe series can include both hot and cold pipes for
# QTH models. It is only about figuring out which pipes are
# related direction-wise.
# For Heat models, only hot pipes are allowed to be part of pipe
# series, as the cold part is zero heat by construction.
if heat_network_model_type == "QTH":
alias_relation = self.alias_relation
elif heat_network_model_type == "Heat":
# There is no proper AliasRelation yet (because there is heat loss in pipes).
# So we build one, as that is the easiest way to figure out which pipes are
# connected to each other in series. We do this by making a temporary/shadow
# discharge (".Q") variable per pipe, as that way we can share the processing
# logic for determining pipe series with that of QTH models.
alias_relation = AliasRelation()
# Look for aliases only in the hot pipes. All cold pipes are zero by convention anyway.
hot_pipes = self.hot_pipes.copy()
pipes_map = {f"{pipe}.HeatIn.Heat": pipe for pipe in hot_pipes}
pipes_map.update({f"{pipe}.HeatOut.Heat": pipe for pipe in hot_pipes})
for p in hot_pipes:
for port in ["In", "Out"]:
heat_port = f"{p}.Heat{port}.Heat"
connected = self.alias_relation.aliases(heat_port).intersection(
pipes_map.keys()
)
connected.remove(heat_port)
if connected:
other_pipe_port = next(iter(connected))
if other_pipe_port.endswith(f".Heat{port}.Heat"):
sign_prefix = "-"
else:
sign_prefix = ""
other_pipe = pipes_map[other_pipe_port]
if f"{other_pipe}.Q" not in alias_relation.canonical_variables:
alias_relation.add(f"{p}.Q", f"{sign_prefix}{other_pipe}.Q")
canonical_pipe_qs = {p: alias_relation.canonical_signed(f"{p}.Q") for p in pipes}
# Move sign from canonical to alias
canonical_pipe_qs = {(p, d): c for p, (c, d) in canonical_pipe_qs.items()}
# Reverse the dictionary from `Dict[alias, canonical]` to `Dict[canonical, Set[alias]]`
pipe_sets = {}
for a, c in canonical_pipe_qs.items():
pipe_sets.setdefault(c, []).append(a)
pipe_series_with_orientation = list(pipe_sets.values())
# Check that all pipes in the series have the same orientation
pipe_series = []
for ps in pipe_series_with_orientation:
if not len({orientation for _, orientation in ps}) == 1:
raise Exception(f"Pipes in series {ps} do not all have the same orientation")
pipe_series.append([name for name, _ in ps])
buffer_connections = {}
for b in buffers:
buffer_connections[b] = []
for k in ["In", "Out"]:
b_conn = f"{b}.{heat_network_model_type}{k}"
prop = "T" if heat_network_model_type == "QTH" else "Heat"
aliases = [
x
for x in self.alias_relation.aliases(f"{b_conn}.{prop}")
if not x.startswith(b) and x.endswith(f".{prop}")
]
if len(aliases) > 1:
raise Exception(f"More than one connection to {b_conn}")
elif len(aliases) == 0:
raise Exception(f"Found no connection to {b_conn}")
in_suffix = ".QTHIn.T" if heat_network_model_type == "QTH" else ".HeatIn.Heat"
out_suffix = ".QTHOut.T" if heat_network_model_type == "QTH" else ".HeatOut.Heat"
if aliases[0].endswith(out_suffix):
pipe_w_orientation = (
aliases[0][: -len(out_suffix)],
NodeConnectionDirection.IN,
)
else:
assert aliases[0].endswith(in_suffix)
pipe_w_orientation = (
aliases[0][: -len(in_suffix)],
NodeConnectionDirection.OUT,
)
assert pipe_w_orientation[0] in pipes_set
if k == "In":
assert self.is_hot_pipe(pipe_w_orientation[0])
else:
assert self.is_cold_pipe(pipe_w_orientation[0])
buffer_connections[b].append(pipe_w_orientation)
buffer_connections[b] = tuple(buffer_connections[b])
self.__topology = Topology(node_connections, pipe_series, buffer_connections)
super().pre()
@property
def heat_network_components(self) -> Dict[str, str]:
try:
return self.__hn_component_types
except AttributeError:
string_parameters = self.string_parameters(0)
# Find the components in model, detection by string
# (name.component_type: type)
component_types = sorted({v for k, v in string_parameters.items()})
components = {}
for c in component_types:
components[c] = sorted({k[:-15] for k, v in string_parameters.items() if v == c})
self.__hn_component_types = components
return components
@property
def heat_network_topology(self) -> Topology:
return self.__topology | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/modelica_component_type_mixin.py | 0.776199 | 0.402862 | modelica_component_type_mixin.py | pypi |
import math
import numpy as np
from .constants import GRAVITATIONAL_CONSTANT
def _kinematic_viscosity(temperature):
"""
The kinematic viscosity barely changes with pressure. The below polynomial
fit has been deduced using the `iapws` package, which implements the IAPWS
standard. The valid range of temperatures is 20 - 130 C.
Below the snippet of code used to get the polynomial coefficients
(possibly useful when refitting):
.. code-block:: python
from iapws import IAPWS95
import numpy as np
res = []
for t in np.linspace(20, 130, 1000):
res.append((t, IAPWS95(T=273.15 + t, P=0.5).nu))
print(np.polyfit(*zip(*res), 4))
"""
if temperature < 20 or temperature > 130:
raise Exception(
"Temperature should be in the range 20 - 130 °C.\n"
"Note that we use Celcius as the unit, not Kelvin."
)
return np.polyval(
[7.53943453e-15, -3.01485854e-12, 4.75924986e-10, -3.79135487e-08, 1.58737429e-06],
temperature,
)
def _colebrook_white(reynolds, relative_roughness, friction_factor=0.015):
for _ in range(1000):
friction_factor_old = friction_factor
reynolds_star = (
1 / math.sqrt(8.0) * reynolds * math.sqrt(friction_factor) * relative_roughness
)
friction_factor = (
1.0
/ (
-2.0
* math.log10(
2.51 / reynolds / math.sqrt(friction_factor) * (1 + reynolds_star / 3.3)
)
)
** 2
)
if (
abs(friction_factor - friction_factor_old) / max(friction_factor, friction_factor_old)
< 1e-6
):
return friction_factor
else:
raise Exception("Colebrook-White did not converge")
def friction_factor(velocity, diameter, wall_roughness, temperature):
"""
Darcy-weisbach friction factor calculation from both laminar and turbulent
flow.
"""
kinematic_viscosity = _kinematic_viscosity(temperature)
reynolds = velocity * diameter / kinematic_viscosity
assert velocity >= 0
if velocity == 0.0 or diameter == 0.0:
return 0.0
elif reynolds <= 2000.0:
friction_factor = 64.0 / reynolds
elif reynolds >= 4000.0:
friction_factor = _colebrook_white(reynolds, wall_roughness / diameter)
else:
fac_turb = _colebrook_white(4000.0, wall_roughness / diameter)
fac_laminar = 64.0 / 2000.0
w = (reynolds - 2000.0) / 2000.0
friction_factor = w * fac_turb + (1 - w) * fac_laminar
return friction_factor
def head_loss(velocity, diameter, length, wall_roughness, temperature):
"""
Head loss for a circular pipe of given length.
"""
f = friction_factor(velocity, diameter, wall_roughness, temperature)
return length * f / (2 * GRAVITATIONAL_CONSTANT) * velocity**2 / diameter
def get_linear_pipe_dh_vs_q_fit(
diameter, length, wall_roughness, temperature, n_lines=10, v_max=2.0
):
area = math.pi * diameter**2 / 4
v_points = np.linspace(0.0, v_max, n_lines + 1)
q_points = v_points * area
h_points = np.array(
[head_loss(v, diameter, length, wall_roughness, temperature) for v in v_points]
)
a = np.diff(h_points) / np.diff(q_points)
b = h_points[1:] - a * q_points[1:]
return a, b | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/_darcy_weisbach.py | 0.880283 | 0.648605 | _darcy_weisbach.py | pypi |
import logging
from abc import abstractmethod
from enum import IntEnum
from typing import List, Optional, Tuple, Type, Union
import casadi as ca
import numpy as np
from rtctools._internal.alias_tools import AliasDict
from rtctools.optimization.goal_programming_mixin_base import Goal, _GoalProgrammingMixinBase
from rtctools.optimization.optimization_problem import BT, OptimizationProblem
import rtctools_heat_network._darcy_weisbach as darcy_weisbach
from rtctools_heat_network.base_component_type_mixin import BaseComponentTypeMixin
from .constants import GRAVITATIONAL_CONSTANT
from .pipe_class import PipeClass
logger = logging.getLogger("rtctools_heat_network")
class HeadLossOption(IntEnum):
r"""
Enumeration for the possible options to take head loss in pipes into account.
Also see :py:meth:`._HeadLossMixin.heat_network_options` for related options.
.. note::
Not all options are supported by :py:class:`.HeatMixin`, due to the focus
on MILP formulations.
NO_HEADLOSS
The NO_HEADLOSS option assumes that there is no headloss in the pipelines.
There are no constraints added relating the discharge to the head.
CQ2_INEQUALITY
As the name implies, this adds a quadratic inquality constraint between
the head and the discharge in a pipe:
.. math::
dH \ge C \cdot Q^2
This expression of the headloss requires a system-specific estimation of
the constant C.
As dH is always positive, a boolean is needed when flow directions are not
fixed in a mixed-integer formulation to determine if
.. math::
dH = H_{up} - H_{down}
or (when the :math:`Q < 0`)
.. math::
dH = H_{down} - H_{up}
LINEARIZED_DW
Just like ``CQ2_INEQUALITY``, this option adds inequality constraints:
.. math::
\Delta H \ge \vec{a} \cdot Q + \vec{b}
with :math:`\vec{a}` and :math:`\vec{b}` the linearization coefficients.
This approach can more easily be explain with a plot, showing the Darcy-Weisbach
head loss, and the linear lines approximating it. Note that the number of
supporting lines is an option that can be set by the user by overriding
:py:meth:`._HeadLossMixin.heat_network_options`. Also note that, just like
``CQ2_INEQUALITY``, a boolean is needed when flow directions are not fixed.
.. image:: /images/DWlinearization.PNG
LINEAR
This option uses a linear head loss formulation.
A single constraint of the type
.. math::
H_{up} - H_{down} = dH = C \cdot Q
is added.
Note that no boolean are required to support the case where flow directions
are not fixed yet, at the cost of reduced fidelity in the head-loss relationship.
The exact velocity to use to linearize can be set by overriding
:py:meth:`._HeadLossMixin.heat_network_options`.
CQ2_EQUALITY
This option adds **equality** constraints of the type:
.. math::
dH = C \cdot Q^2
This equation is non-convex, and can therefore lead to convergence issues.
"""
NO_HEADLOSS = 1
CQ2_INEQUALITY = 2
LINEARIZED_DW = 3
LINEAR = 4
CQ2_EQUALITY = 5
class _MinimizeHeadLosses(Goal):
order = 1
priority = 2**31 - 1
def __init__(self, optimization_problem: "_HeadLossMixin", *args, **kwargs):
super().__init__(*args, **kwargs)
self.optimization_problem = optimization_problem
self.function_nominal = len(optimization_problem.times())
def function(self, optimization_problem: "_HeadLossMixin", ensemble_member):
sum_ = 0.0
parameters = optimization_problem.parameters(ensemble_member)
options = optimization_problem.heat_network_options()
pumps = optimization_problem.heat_network_components.get("pump", [])
sources = optimization_problem.heat_network_components.get("source", [])
for p in pumps:
sum_ += optimization_problem.state(f"{p}.dH")
# If sources have an accompanying pump, we prefer the produced head to
# be shifted to that pump. We therefore penalize the head of the
# sources twice as much.
for s in sources:
sum_ += 2 * optimization_problem.state(f"{s}.dH")
assert options["head_loss_option"] != HeadLossOption.NO_HEADLOSS
for p in optimization_problem.heat_network_components["pipe"]:
if not parameters[f"{p}.has_control_valve"] and not parameters[f"{p}.length"] == 0.0:
sym_name = optimization_problem._hn_pipe_to_head_loss_map[p]
sum_ += optimization_problem.state(sym_name)
return sum_
class _HeadLossMixin(BaseComponentTypeMixin, _GoalProgrammingMixinBase, OptimizationProblem):
"""
Adds handling of discharge - head (loss) relationship to the model.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__pipe_head_bounds = {}
self.__pipe_head_loss_var = {}
self.__pipe_head_loss_bounds = {}
self.__pipe_head_loss_nominals = {}
self.__pipe_head_loss_zero_bounds = {}
self._hn_pipe_to_head_loss_map = {}
self.__priority = None
def pre(self):
super().pre()
self.__initialize_nominals_and_bounds()
options = self.heat_network_options()
parameters = self.parameters(0)
# It is not allowed to mix NO_HEADLOSS with other head loss options as
# that just leads to weird and undefined behavior.
head_loss_values = {
options["head_loss_option"],
}
for p in self.heat_network_components["pipe"]:
head_loss_values.add(self._hn_get_pipe_head_loss_option(p, options, parameters))
if HeadLossOption.NO_HEADLOSS in head_loss_values and len(head_loss_values) > 1:
raise Exception(
"Mixing .NO_HEADLOSS with other head loss options is not allowed. "
"Either all pipes should have .NO_HEADLOSS set, or none. "
"The global value returned by heat_network_options() also need to match."
)
def heat_network_options(self):
r"""
Returns a dictionary of heat network specific options.
+--------------------------------+-----------+-----------------------------------+
| Option | Type | Default value |
+================================+===========+===================================+
| ``minimum_pressure_far_point`` | ``float`` | ``1.0`` bar |
+--------------------------------+-----------+-----------------------------------+
| ``wall_roughness`` | ``float`` | ``0.002`` m |
+--------------------------------+-----------+-----------------------------------+
| ``head_loss_option`` | ``enum`` | ``HeadLossOption.CQ2_INEQUALITY`` |
+--------------------------------+-----------+-----------------------------------+
| ``estimated_velocity`` | ``float`` | ``1.0`` m/s (CQ2_* & LINEAR) |
+--------------------------------+-----------+-----------------------------------+
| ``maximum_velocity`` | ``float`` | ``2.5`` m/s (LINEARIZED_DW) |
+--------------------------------+-----------+-----------------------------------+
| ``n_linearization_lines`` | ``int`` | ``5`` (LINEARIZED_DW) |
+--------------------------------+-----------+-----------------------------------+
| ``minimize_head_losses`` | ``bool`` | ``True`` |
+--------------------------------+-----------+-----------------------------------+
| ``pipe_minimum_pressure`` | ``float`` | ``-np.inf`` |
+--------------------------------+-----------+-----------------------------------+
| ``pipe_maximum_pressure`` | ``float`` | ``np.inf`` |
+--------------------------------+-----------+-----------------------------------+
The ``minimum_pressure_far_point`` gives the minimum pressure
requirement at any demand node, which means that the pressure at the
furthest point is also satisfied without inspecting the topology.
The ``wall_roughness`` of the pipes plays a role in determining the
resistance of the pipes.
To model the head loss in pipes, the ``head_loss_option`` refers to
one of the ways this can be done. See :class:`HeadLossOption` for more
explanation on what each option entails. Note that some options model
the head loss as an inequality, i.e. :math:`\Delta H \ge f(Q)`, whereas
others model it as an equality.
When ``HeadLossOption.CQ2_INEQUALITY`` is used, the wall roughness at
``estimated_velocity`` determines the `C` in :math:`\Delta H \ge C
\cdot Q^2`.
When ``HeadLossOption.LINEARIZED_DW`` is used, the
``maximum_velocity`` needs to be set. The Darcy-Weisbach head loss
relationship from :math:`v = 0` until :math:`v = \text{maximum_velocity}`
will then be linearized using ``n_linearization`` lines.
When ``HeadLossOption.LINEAR`` is used, the wall roughness at
``estimated_velocity`` determines the `C` in :math:`\Delta H = C \cdot
Q`. For pipes that contain a control valve, the formulation of
``HeadLossOption.CQ2_INEQUALITY`` is used.
When ``HeadLossOption.CQ2_EQUALITY`` is used, the wall roughness at
``estimated_velocity`` determines the `C` in :math:`\Delta H = C \cdot
Q^2`. Note that this formulation is non-convex. At `theta < 1` we
therefore use the formulation ``HeadLossOption.LINEAR``. For pipes
that contain a control valve, the formulation of
``HeadLossOption.CQ2_INEQUALITY`` is used.
When ``minimize_head_losses`` is set to True (default), a last
priority is inserted where the head losses in the system are
minimized if the ``head_loss_option`` is not `NO_HEADLOSS`.
This is related to the assumption that control valves are
present in the system to steer water in the right direction the case
of multiple routes. If such control valves are not present, enabling
this option will give warnings in case the found solution is not
feasible. In case the option is False, both the minimization and
checks are skipped.
The ``pipe_minimum_pressure`` is the global minimum pressured allowed
in the network. Similarly, ``pipe_maximum_pressure`` is the maximum
one.
"""
options = {}
options["minimum_pressure_far_point"] = 1.0
options["wall_roughness"] = 2e-3
options["head_loss_option"] = HeadLossOption.CQ2_INEQUALITY
options["estimated_velocity"] = 1.0
options["maximum_velocity"] = 2.5
options["n_linearization_lines"] = 5
options["minimize_head_losses"] = True
options["pipe_minimum_pressure"] = -np.inf
options["pipe_maximum_pressure"] = np.inf
return options
@abstractmethod
def _hn_get_pipe_head_loss_option(
self, pipe, heat_network_options, parameters, **kwargs
) -> HeadLossOption:
"""
The global user head loss option is not necessarily the same as the
head loss option for a specific pipe. For example, when a control
valve is present, a .LINEAR global head loss option could mean a
.CQ2_INEQUALITY formulation should be used instead.
See also the explanation of `head_loss_option` (and its values) in
:py:meth:`.heat_network_options`.
"""
raise NotImplementedError
@abstractmethod
def _hn_pipe_head_loss_constraints(self, ensemble_member) -> List[Tuple[ca.MX, float, float]]:
"""
This method should be implemented to relate the three variables:
- discharge: e.g. `pipe.Q`
- head difference: e.g. `pipe.dH`
- head loss of the pipe (note: proxy symbol that is >= abs(actual head loss))
The internal variable name/symbol for the head loss can be retried via
`self._hn_pipe_to_head_loss_map[pipe]`. The method that should/can be called to
relate these three variables is :py:meth:`._hn_pipe_head_loss`.
"""
raise NotImplementedError
@property
def _hn_minimization_goal_class(self) -> Type[Goal]:
return _MinimizeHeadLosses
def __initialize_nominals_and_bounds(self):
self.__pipe_head_loss_nominals = AliasDict(self.alias_relation)
options = self.heat_network_options()
parameters = self.parameters(0)
min_pressure = options["pipe_minimum_pressure"]
max_pressure = options["pipe_maximum_pressure"]
assert (
max_pressure > min_pressure
), "The global maximum pressure must be larger than the minimum one."
if np.isfinite(min_pressure) or np.isfinite(max_pressure):
for p in self.heat_network_components["pipe"]:
# No elevation data available yet. Assume 0 mDAT for now.
pipe_elevation = 0.0
min_head = min_pressure * 10.2 + pipe_elevation
max_head = max_pressure * 10.2 + pipe_elevation
self.__pipe_head_bounds[f"{p}.H_in"] = (min_head, max_head)
self.__pipe_head_bounds[f"{p}.H_out"] = (min_head, max_head)
head_loss_option = options["head_loss_option"]
if head_loss_option not in HeadLossOption.__members__.values():
raise Exception(f"Head loss option '{head_loss_option}' does not exist")
for p in self.heat_network_components["pipe"]:
length = parameters[f"{p}.length"]
if length < 0.0:
raise ValueError("Pipe length has to be larger than or equal to zero")
head_loss_option = self._hn_get_pipe_head_loss_option(p, options, parameters)
if head_loss_option == HeadLossOption.NO_HEADLOSS or (
length == 0.0 and not parameters[f"{p}.has_control_valve"]
):
self.__pipe_head_loss_zero_bounds[f"{p}.dH"] = (0.0, 0.0)
else:
q_nominal = self._hn_pipe_nominal_discharge(options, parameters, p)
head_loss_nominal = self._hn_pipe_head_loss(p, options, parameters, q_nominal)
self.__pipe_head_loss_nominals[f"{p}.dH"] = head_loss_nominal
# The .dH is by definition "Out - In". The .__head_loss is by
# definition larger than or equal to the absolute value of dH.
head_loss_var = f"{p}.__head_loss"
self._hn_pipe_to_head_loss_map[p] = head_loss_var
self.__pipe_head_loss_var[head_loss_var] = ca.MX.sym(head_loss_var)
self.__pipe_head_loss_nominals[head_loss_var] = head_loss_nominal
self.__pipe_head_loss_bounds[head_loss_var] = (0.0, np.inf)
def _hn_pipe_nominal_discharge(self, heat_network_options, parameters, pipe: str) -> float:
return parameters[f"{pipe}.area"] * heat_network_options["estimated_velocity"]
def _hn_pipe_head_loss(
self,
pipe: str,
heat_network_options,
parameters,
discharge: Union[ca.MX, float, np.ndarray],
head_loss: Optional[ca.MX] = None,
dh: Optional[ca.MX] = None,
is_disconnected: Union[ca.MX, int] = 0,
big_m: Optional[float] = None,
pipe_class: Optional[PipeClass] = None,
) -> Union[List[Tuple[ca.MX, BT, BT]], float, np.ndarray]:
"""
This function has two purposes:
- return the head loss constraint expression(s) or
- compute the head loss numerically (always positive).
Note that there are different head loss formulations (see
:class:`HeadLossOption`). Some formulations require the passing of
`head_loss` (a symbol that is always positive by definition), and
others require the passing of `dh` (which is negative when the flow is
positive).
When `head_loss` or `dh` is its corresponding MX symbol/expression,
the appropriate constraint expression is returned. When `head_loss`
and `dh` are both None, the `discharge` is assumed numerical, and the
numerical computation of the appropriate head loss formulation is
returned. Note that this returned numerical value is always positive,
regardless of the sign of the discharge.
`is_disconnected` can be used to specify whether a pipe is
disconnected or not. This is most useful if a (boolean) ca.MX symbol
is passed, which can then be used with a big-M formulation. The big-M
itself then also needs to be passed via the `big_m` keyword argument.
"""
if head_loss is None and dh is None:
symbolic = False
else:
symbolic = True
head_loss_option = self._hn_get_pipe_head_loss_option(
pipe, heat_network_options, parameters
)
assert (
head_loss_option != HeadLossOption.NO_HEADLOSS
), "This method should be skipped when NO_HEADLOSS is set."
length = parameters[f"{pipe}.length"]
if length == 0.0:
if not symbolic:
return np.zeros_like(discharge)
else:
# dH is set to zero in bounds
return []
if isinstance(is_disconnected, ca.MX) and not isinstance(big_m, float):
raise ValueError("When `is_disconnected` is symbolic, `big_m` must be passed as well")
if not symbolic and isinstance(is_disconnected, ca.MX):
raise ValueError(
"`is_disconnected` cannot be symbolic if the other dH/Q symbols are numeric"
)
if isinstance(is_disconnected, (float, int)) and is_disconnected == 1.0:
if symbolic:
# Pipe is always disconnected, so no head loss relationship needed
return []
else:
# By definition we choose the head loss over disconnected
# pipes to be zero.
return 0.0
if big_m is None:
assert is_disconnected == 0.0
else:
assert big_m != 0.0
estimated_velocity = heat_network_options["estimated_velocity"]
wall_roughness = heat_network_options["wall_roughness"]
if pipe_class is not None:
diameter = pipe_class.inner_diameter
area = pipe_class.area
maximum_velocity = pipe_class.maximum_velocity
else:
diameter = parameters[f"{pipe}.diameter"]
area = parameters[f"{pipe}.area"]
maximum_velocity = heat_network_options["maximum_velocity"]
temperature = parameters[f"{pipe}.temperature"]
has_control_valve = parameters[f"{pipe}.has_control_valve"]
if head_loss_option == HeadLossOption.LINEAR:
assert not has_control_valve
ff = darcy_weisbach.friction_factor(
estimated_velocity, diameter, wall_roughness, temperature
)
# Compute c_v constant (where |dH| ~ c_v * v^2)
c_v = length * ff / (2 * GRAVITATIONAL_CONSTANT) / diameter
linearization_velocity = estimated_velocity
linearization_head_loss = c_v * linearization_velocity**2
linearization_discharge = linearization_velocity * area
expr = linearization_head_loss * discharge / linearization_discharge
if symbolic:
q_nominal = self.variable_nominal(f"{pipe}.Q")
head_loss_nominal = self.variable_nominal(f"{pipe}.dH")
constraint_nominal = (
head_loss_nominal
* linearization_head_loss
* q_nominal
/ linearization_discharge
) ** 0.5
# Interior point solvers, like IPOPT, do not like linearly dependent
# tight inequality constraints. For this reason, we split the
# constraints depending whether the Big-M formulation is used or not.
if big_m is None:
return [((-1 * dh - expr) / constraint_nominal, 0.0, 0.0)]
else:
constraint_nominal = (constraint_nominal * big_m) ** 0.5
return [
(
(-1 * dh - expr + is_disconnected * big_m) / constraint_nominal,
0.0,
np.inf,
),
(
(-1 * dh - expr - is_disconnected * big_m) / constraint_nominal,
-np.inf,
0.0,
),
]
else:
return expr * np.sign(discharge)
elif head_loss_option in {
HeadLossOption.CQ2_INEQUALITY,
HeadLossOption.CQ2_EQUALITY,
}:
ff = darcy_weisbach.friction_factor(
estimated_velocity, diameter, wall_roughness, temperature
)
# Compute c_v constant (where |dH| ~ c_v * v^2)
c_v = length * ff / (2 * GRAVITATIONAL_CONSTANT) / diameter
v = discharge / area
expr = c_v * v**2
if symbolic:
q_nominal = self.variable_nominal(f"{pipe}.Q")
head_loss_nominal = self.variable_nominal(f"{pipe}.dH")
constraint_nominal = (head_loss_nominal * c_v * (q_nominal / area) ** 2) ** 0.5
if head_loss_option == HeadLossOption.CQ2_INEQUALITY:
ub = np.inf
else:
ub = 0.0
# Interior point solvers, like IPOPT, do not like linearly dependent
# tight inequality constraints. For this reason, we split the
# constraints depending whether the Big-M formulation is used or n
if big_m is None:
equations = [((head_loss - expr) / constraint_nominal, 0.0, ub)]
else:
constraint_nominal = (constraint_nominal * big_m) ** 0.5
equations = [
(
(head_loss - expr + is_disconnected * big_m) / constraint_nominal,
0.0,
np.inf,
)
]
if head_loss_option == HeadLossOption.CQ2_EQUALITY:
equations.append(
(
(head_loss - expr - is_disconnected * big_m) / constraint_nominal,
-np.inf,
0.0,
),
)
return equations
else:
return expr
elif head_loss_option == HeadLossOption.LINEARIZED_DW:
n_lines = heat_network_options["n_linearization_lines"]
a, b = darcy_weisbach.get_linear_pipe_dh_vs_q_fit(
diameter,
length,
wall_roughness,
temperature=temperature,
n_lines=n_lines,
v_max=maximum_velocity,
)
# The function above only gives result in the positive quadrant
# (positive head loss, positive discharge). We also need a
# positive head loss for _negative_ discharges.
a = np.hstack([-a, a])
b = np.hstack([b, b])
# Vectorize constraint for speed
if symbolic:
q_nominal = self.variable_nominal(f"{pipe}.Q")
head_loss_nominal = self.variable_nominal(f"{pipe}.dH")
head_loss_vec = ca.repmat(head_loss, len(a))
discharge_vec = ca.repmat(discharge, len(a))
if isinstance(is_disconnected, ca.MX):
is_disconnected_vec = ca.repmat(is_disconnected, len(a))
else:
is_disconnected_vec = is_disconnected
a_vec = np.repeat(a, discharge.size1())
b_vec = np.repeat(b, discharge.size1())
constraint_nominal = np.abs(head_loss_nominal * a_vec * q_nominal) ** 0.5
if big_m is None:
# We write the equation such that big_m is always used, even if
# it is None (i.e. not used). We do have to be sure to set it to 0,
# because we cannot multiple with "None".
big_m_lin = 0.0
else:
big_m_lin = big_m
constraint_nominal = (constraint_nominal * big_m_lin) ** 0.5
return [
(
(
head_loss_vec
- (a_vec * discharge_vec + b_vec)
+ is_disconnected_vec * big_m_lin
)
/ constraint_nominal,
0.0,
np.inf,
)
]
else:
ret = np.amax(a * np.tile(discharge, (len(a), 1)).transpose() + b, axis=1)
if isinstance(discharge, float):
ret = ret[0]
return ret
def __pipe_head_loss_path_constraints(self, _ensemble_member):
constraints = []
# We set this constraint relating .dH to the upstream and downstream
# heads here in the Mixin for scaling purposes (dH nominal is
# calculated in pre()).
for pipe in self.heat_network_components["pipe"]:
dh = self.state(f"{pipe}.dH")
h_down = self.state(f"{pipe}.H_out")
h_up = self.state(f"{pipe}.H_in")
constraint_nominal = (
self.variable_nominal(f"{pipe}.dH") * self.variable_nominal(f"{pipe}.H_in")
) ** 0.5
constraints.append(((dh - (h_down - h_up)) / constraint_nominal, 0.0, 0.0))
return constraints
def __demand_head_loss_path_constraints(self, _ensemble_member):
constraints = []
options = self.heat_network_options()
components = self.heat_network_components
# Convert minimum pressure at far point from bar to meter (water) head
min_head_loss = options["minimum_pressure_far_point"] * 10.2
for d in components["demand"]:
constraints.append(
(
self.state(f"{d}.H_in") - self.state(f"{d}.H_out"),
min_head_loss,
np.inf,
)
)
return constraints
def constraints(self, ensemble_member):
constraints = super().constraints(ensemble_member)
if self.heat_network_options()["head_loss_option"] != HeadLossOption.NO_HEADLOSS:
constraints.extend(self._hn_pipe_head_loss_constraints(ensemble_member))
return constraints
def path_constraints(self, ensemble_member):
constraints = super().path_constraints(ensemble_member).copy()
options = self.heat_network_options()
# Add source/demand head loss constrains only if head loss is non-zero
if options["head_loss_option"] != HeadLossOption.NO_HEADLOSS:
constraints.extend(self.__pipe_head_loss_path_constraints(ensemble_member))
constraints.extend(self.__demand_head_loss_path_constraints(ensemble_member))
return constraints
def priority_started(self, priority):
super().priority_started(priority)
self.__priority = priority
def priority_completed(self, priority):
super().priority_completed(priority)
options = self.heat_network_options()
if (
options["minimize_head_losses"]
and options["head_loss_option"] != HeadLossOption.NO_HEADLOSS
and priority == self._hn_minimization_goal_class.priority
):
components = self.heat_network_components
rtol = 1e-5
atol = 1e-4
for ensemble_member in range(self.ensemble_size):
parameters = self.parameters(ensemble_member)
results = self.extract_results(ensemble_member)
for pipe in components["pipe"]:
if parameters[f"{pipe}.has_control_valve"]:
continue
# Just like with a control valve, if pipe is disconnected
# there is nothing to check.
q_full = results[f"{pipe}.Q"]
if parameters[f"{pipe}.disconnectable"]:
inds = q_full != 0.0
else:
inds = np.arange(len(q_full), dtype=int)
if parameters[f"{pipe}.diameter"] == 0.0:
# Pipe is disconnected. Head loss is free, so nothing to check.
continue
q = results[f"{pipe}.Q"][inds]
head_loss_target = self._hn_pipe_head_loss(pipe, options, parameters, q, None)
if options["head_loss_option"] == HeadLossOption.LINEAR:
head_loss = np.abs(results[f"{pipe}.dH"][inds])
else:
head_loss = results[self._hn_pipe_to_head_loss_map[pipe]][inds]
if not np.allclose(head_loss, head_loss_target, rtol=rtol, atol=atol):
logger.warning(
f"Pipe {pipe} has artificial head loss; "
f"at least one more control valve should be added to the network."
)
min_head_loss_target = options["minimum_pressure_far_point"] * 10.2
min_head_loss = None
for demand in components["demand"]:
head_loss = results[f"{demand}.H_in"] - results[f"{demand}.H_out"]
if min_head_loss is None:
min_head_loss = head_loss
else:
min_head_loss = np.minimum(min_head_loss, head_loss)
if not np.allclose(min_head_loss, min_head_loss_target, rtol=rtol, atol=atol):
logger.warning("Minimum head at demands is higher than target minimum.")
def path_goals(self):
g = super().path_goals().copy()
options = self.heat_network_options()
if (
options["minimize_head_losses"]
and options["head_loss_option"] != HeadLossOption.NO_HEADLOSS
):
g.append(self._hn_minimization_goal_class(self))
return g
@property
def path_variables(self):
variables = super().path_variables.copy()
variables.extend(self.__pipe_head_loss_var.values())
return variables
def variable_nominal(self, variable):
try:
return self.__pipe_head_loss_nominals[variable]
except KeyError:
return super().variable_nominal(variable)
def bounds(self):
bounds = super().bounds().copy()
bounds.update(self.__pipe_head_loss_bounds)
bounds.update(self.__pipe_head_loss_zero_bounds)
for k, v in self.__pipe_head_bounds.items():
bounds[k] = self.merge_bounds(bounds[k], v)
return bounds | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/head_loss_mixin.py | 0.915587 | 0.513729 | head_loss_mixin.py | pypi |
import math
from typing import List, Tuple, Union
import numpy as np
def heat_loss_u_values_pipe(
inner_diameter: float,
insulation_thicknesses: Union[float, List[float], np.ndarray] = None,
conductivities_insulation: Union[float, List[float], np.ndarray] = 0.033,
conductivity_subsoil: float = 2.3,
depth: float = 1.0,
h_surface: float = 15.4,
pipe_distance: float = None,
) -> Tuple[float, float]:
"""
Calculate the U_1 and U_2 heat loss values for a pipe based for either
single- or multi-layer insultion.
If the `insulation_thicknesses` is provided as a list, the length should be
equal to the length of `conductivities_insulation`. If both are floats, a
single layer of insulation is assumed.
:inner_diameter: Inner diameter of the pipes [m]
:insulation_thicknesses: Thicknesses of the insulation [m]
Default of None means a thickness of 0.5 * inner diameter.
:conductivities_insulation: Thermal conductivities of the insulation layers [W/m/K]
:conductivity_subsoil: Subsoil thermal conductivity [W/m/K]
:h_surface: Heat transfer coefficient at surface [W/m^2/K]
:param depth: Depth of outer top of the pipeline [m]
:param pipe_distance: Distance between pipeline feed and return pipeline centers [m].
Default of None means 2 * outer diameter
:return: U-values (U_1 / U_2) for heat losses of pipes [W/(m*K)]
"""
if insulation_thicknesses is None:
insulation_thicknesses = 0.5 * inner_diameter
if not type(insulation_thicknesses) == type(conductivities_insulation):
raise Exception("Insulation thicknesses and conductivities should have the same type.")
if hasattr(insulation_thicknesses, "__iter__"):
if not len(insulation_thicknesses) == len(conductivities_insulation):
raise Exception(
"Number of insulation thicknesses should match number of conductivities"
)
insulation_thicknesses = np.array(insulation_thicknesses)
conductivities_insulation = np.array(conductivities_insulation)
else:
insulation_thicknesses = np.array([insulation_thicknesses])
conductivities_insulation = np.array([conductivities_insulation])
diam_inner = inner_diameter
diam_outer = diam_inner + 2 * sum(insulation_thicknesses)
if pipe_distance is None:
pipe_distance = 2 * diam_outer
depth_center = depth + 0.5 * diam_outer
depth_corrected = depth_center + conductivity_subsoil / h_surface
# NOTE: We neglect the heat resistance due to convection inside the pipe,
# i.e. we assume perfect mixing, or that this resistance is much lower
# than the resistance of the outer insulation layers.
# Heat resistance of the subsoil
r_subsoil = (
1 / (2 * math.pi * conductivity_subsoil) * math.log(4.0 * depth_corrected / diam_outer)
)
# Heat resistance due to insulation
outer_diameters = diam_inner + 2.0 * np.cumsum(insulation_thicknesses)
inner_diameters = np.array([inner_diameter, *outer_diameters[:-1]])
r_ins = sum(
np.log(outer_diameters / inner_diameters) / (2.0 * math.pi * conductivities_insulation)
)
# Heat resistance due to neighboring pipeline
r_m = (
1
/ (4 * math.pi * conductivity_subsoil)
* math.log(1 + (2 * depth_corrected / pipe_distance) ** 2)
)
u_1 = (r_subsoil + r_ins) / ((r_subsoil + r_ins) ** 2 - r_m**2)
u_2 = r_m / ((r_subsoil + r_ins) ** 2 - r_m**2)
return u_1, u_2 | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/_heat_loss_u_values_pipe.py | 0.908717 | 0.849535 | _heat_loss_u_values_pipe.py | pypi |
import json
import xml.etree.ElementTree as ET # noqa: N817
import requests
from rtctools_heat_network._heat_loss_u_values_pipe import heat_loss_u_values_pipe
def main():
logstore = requests.get("https://edr.hesi.energy/api/edr_list?category=Assets/Logstore").json()
asset_id_map = {x["title"].rsplit(".", maxsplit=1)[0]: x["key"] for x in logstore}
# Build a dictionary of EDR pipe name to its properties.
pipe_properties_map = {}
for a, key in asset_id_map.items():
xml_string = requests.get(f"https://edr.hesi.energy/api/edr_item?id={key}").json()[
"esdl_string"
]
tree = ET.fromstring(xml_string)
inner_diameter = float(tree.get("innerDiameter"))
components = tree.findall(".//component")
insulation_thicknesses = []
conductivities_insulation = []
for c in components:
insulation_thicknesses.append(float(c.get("layerWidth")))
matters = c.findall("matter")
assert len(matters) == 1
conductivities_insulation.append(float(matters[0].get("thermalConductivity")))
u_1, u_2 = heat_loss_u_values_pipe(
inner_diameter=inner_diameter,
insulation_thicknesses=insulation_thicknesses,
conductivities_insulation=conductivities_insulation,
)
pipe_properties_map[a] = {
"inner_diameter": inner_diameter,
"u_1": u_1,
"u_2": u_2,
"insulation_thicknesses": insulation_thicknesses,
"conductivies_insulation": conductivities_insulation,
"xml_string": xml_string,
}
# Sort the list based on prefix and diameter
def _sort_series_dn(name):
a, b = name.rsplit("-", maxsplit=1)
return a, int(b)
pipe_properties_map = dict(
sorted(pipe_properties_map.items(), key=lambda x: _sort_series_dn(x[0]))
)
# Export it to an indented (=human readable) JSON file
json.dump(pipe_properties_map, open("_edr_pipes.json", "w"), indent=4)
if __name__ == "__main__":
main() | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/esdl/_update_edr_pipes_json.py | 0.594904 | 0.374476 | _update_edr_pipes_json.py | pypi |
import json
import logging
import math
import os
from pathlib import Path
from typing import Dict, Tuple, Type, Union
import esdl
from rtctools_heat_network.pycml import Model as _Model
from .common import Asset
from .esdl_model_base import _RetryLaterException, _SkipAssetException
logger = logging.getLogger("rtctools_heat_network")
MODIFIERS = Dict[str, Union[str, int, float]]
class _AssetToComponentBase:
# A map of pipe class name to edr asset in _edr_pipes.json
STEEL_S1_PIPE_EDR_ASSETS = {
"DN20": "Steel-S1-DN-20",
"DN25": "Steel-S1-DN-25",
"DN32": "Steel-S1-DN-32",
"DN40": "Steel-S1-DN-40",
"DN50": "Steel-S1-DN-50",
"DN65": "Steel-S1-DN-65",
"DN80": "Steel-S1-DN-80",
"DN100": "Steel-S1-DN-100",
"DN125": "Steel-S1-DN-125",
"DN150": "Steel-S1-DN-150",
"DN200": "Steel-S1-DN-200",
"DN250": "Steel-S1-DN-250",
"DN300": "Steel-S1-DN-300",
"DN350": "Steel-S1-DN-350",
"DN400": "Steel-S1-DN-400",
"DN450": "Steel-S1-DN-450",
"DN500": "Steel-S1-DN-500",
"DN600": "Steel-S1-DN-600",
"DN700": "Steel-S1-DN-700",
"DN800": "Steel-S1-DN-800",
"DN900": "Steel-S1-DN-900",
"DN1000": "Steel-S1-DN-1000",
"DN1100": "Steel-S1-DN-1100",
"DN1200": "Steel-S1-DN-1200",
}
component_map = {
"GenericConsumer": "demand",
"HeatingDemand": "demand",
"GasHeater": "source",
"GenericProducer": "source",
"GeothermalSource": "source",
"ResidualHeatSource": "source",
"Joint": "node",
"Pipe": "pipe",
"Pump": "pump",
"HeatStorage": "buffer",
"Sensor": "skip",
"Valve": "control_valve",
"CheckValve": "check_valve",
}
def __init__(self):
self._port_to_q_nominal = {}
self._port_to_esdl_component_type = {}
self._edr_pipes = json.load(
open(os.path.join(Path(__file__).parent, "_edr_pipes.json"), "r")
)
def convert(self, asset: Asset) -> Tuple[Type[_Model], MODIFIERS]:
"""
Converts an asset to a PyCML Heat component type and its modifiers.
With more descriptive variable names the return type would be:
Tuple[pycml_heat_component_type, Dict[component_attribute, new_attribute_value]]
"""
for port in [asset.in_port, asset.out_port]:
self._port_to_esdl_component_type[port] = asset.asset_type
dispatch_method_name = f"convert_{self.component_map[asset.asset_type]}"
return getattr(self, dispatch_method_name)(asset)
def _pipe_get_diameter_and_insulation(self, asset: Asset):
# There are multiple ways to specify pipe properties like diameter and
# material / insulation. We assume that DN `diameter` takes precedence
# over `innerDiameter` and `material` (while logging warnings if both
# are specified)
full_name = f"{asset.asset_type} '{asset.name}'"
if asset.attributes["innerDiameter"] and asset.attributes["diameter"].value > 0:
logger.warning(
f"{full_name}' has both 'innerDiameter' and 'diameter' specified. "
f"Diameter of {asset.attributes['diameter'].name} will be used."
)
if asset.attributes["material"] and asset.attributes["diameter"].value > 0:
logger.warning(
f"{full_name}' has both 'material' and 'diameter' specified. "
f"Insulation properties of {asset.attributes['diameter'].name} will be used."
)
if asset.attributes["material"] and (
asset.attributes["diameter"].value == 0 and not asset.attributes["innerDiameter"]
):
logger.warning(
f"{full_name}' has only 'material' specified, but no information on diameter. "
f"Diameter and insulation properties of DN200 will be used."
)
if asset.attributes["diameter"].value == 0 and not asset.attributes["innerDiameter"]:
if asset.attributes["material"]:
logger.warning(
f"{full_name}' has only 'material' specified, but no information on diameter. "
f"Diameter and insulation properties of DN200 will be used."
)
else:
logger.warning(
f"{full_name}' has no DN size or innerDiameter specified. "
f"Diameter and insulation properties of DN200 will be used. "
)
edr_dn_size = None
if asset.attributes["diameter"].value > 0:
edr_dn_size = str(asset.attributes["diameter"].name)
elif not asset.attributes["innerDiameter"]:
edr_dn_size = "DN200"
# NaN means the default values will be used
insulation_thicknesses = math.nan
conductivies_insulation = math.nan
if edr_dn_size:
# Get insulation and diameter properties from EDR asset with this size.
edr_asset = self._edr_pipes[self.STEEL_S1_PIPE_EDR_ASSETS[edr_dn_size]]
diameter = edr_asset["inner_diameter"]
insulation_thicknesses = edr_asset["insulation_thicknesses"]
conductivies_insulation = edr_asset["conductivies_insulation"]
else:
assert asset.attributes["innerDiameter"]
diameter = asset.attributes["innerDiameter"]
# Insulation properties
material = asset.attributes["material"]
if material is not None:
if isinstance(material, esdl.esdl.MatterReference):
material = material.reference
assert isinstance(material, esdl.esdl.CompoundMatter)
components = material.component.items
if components:
insulation_thicknesses = [x.layerWidth for x in components]
conductivies_insulation = [x.matter.thermalConductivity for x in components]
return diameter, insulation_thicknesses, conductivies_insulation
def _is_disconnectable_pipe(self, asset):
# Source and buffer pipes are disconnectable by default
connected_type_in = self._port_to_esdl_component_type.get(
asset.in_port.connectedTo[0], None
)
connected_type_out = self._port_to_esdl_component_type.get(
asset.out_port.connectedTo[0], None
)
types = {k for k, v in self.component_map.items() if v in {"source", "buffer"}}
if types.intersection({connected_type_in, connected_type_out}):
return True
elif connected_type_in is None or connected_type_out is None:
raise _RetryLaterException(
f"Could not determine if {asset.asset_type} '{asset.name}' "
f"is a source or buffer pipe"
)
else:
return False
def _set_q_nominal(self, asset, q_nominal):
self._port_to_q_nominal[asset.in_port] = q_nominal
self._port_to_q_nominal[asset.out_port] = q_nominal
def _get_connected_q_nominal(self, asset):
try:
connected_port = asset.in_port.connectedTo[0]
q_nominal = self._port_to_q_nominal[connected_port]
except KeyError:
connected_port = asset.out_port.connectedTo[0]
q_nominal = self._port_to_q_nominal.get(connected_port, None)
if q_nominal is not None:
self._set_q_nominal(asset, q_nominal)
return q_nominal
else:
raise _RetryLaterException(
f"Could not determine nominal discharge for {asset.asset_type} '{asset.name}'"
)
@staticmethod
def _get_supply_return_temperatures(asset: Asset) -> Tuple[float, float]:
carrier = asset.global_properties["carriers"][asset.in_port.carrier.id]
supply_temperature = carrier["supplyTemperature"]
return_temperature = carrier["returnTemperature"]
assert supply_temperature > return_temperature
# This is a bit dangerous, but the default (not-set) value is 0.0. We
# however require it to be explicitly set.
assert supply_temperature != 0.0
assert return_temperature != 0.0
return supply_temperature, return_temperature
def _supply_return_temperature_modifiers(self, asset: Asset) -> MODIFIERS:
supply_temperature, return_temperature = self._get_supply_return_temperatures(asset)
return {"T_supply": supply_temperature, "T_return": return_temperature}
def convert_skip(self, asset: Asset):
raise _SkipAssetException(asset) | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/esdl/asset_to_component_base.py | 0.782621 | 0.237471 | asset_to_component_base.py | pypi |
import logging
from rtctools_heat_network.pycml import Model as _Model
logger = logging.getLogger("rtctools_heat_network")
RETRY_LOOP_LIMIT = 100
class _RetryLaterException(Exception):
pass
class _SkipAssetException(Exception):
pass
class _ESDLModelBase(_Model):
def _esdl_convert(self, converter, assets, prefix):
# Sometimes we need information of one component in order to convert
# another. For example, the nominal discharg of a pipe is used to set
# the nominal discharge of its connected components.
retry_assets = list(assets.values())
skip_assets = list()
for _ in range(RETRY_LOOP_LIMIT):
current_assets = retry_assets
retry_assets = []
for asset in current_assets:
try:
pycml_type, modifiers = converter.convert(asset)
except _SkipAssetException:
skip_assets.append(asset)
continue
except _RetryLaterException:
retry_assets.append(asset)
continue
self.add_variable(pycml_type, asset.name, **modifiers)
if not retry_assets:
break
else:
raise Exception("Parsing of assets exceeded maximum iteration limit.")
in_suf = f"{prefix}In"
out_suf = f"{prefix}Out"
node_suf = f"{prefix}Conn"
skip_asset_ids = {a.id for a in skip_assets}
node_assets = [
a for a in assets.values() if a.asset_type == "Joint" and a.id not in skip_asset_ids
]
non_node_assets = [
a for a in assets.values() if a.asset_type != "Joint" and a.id not in skip_asset_ids
]
# First we map all port ids to their respective PyCML ports. We only
# do this for non-nodes, as for nodes we don't quite know what port
# index a connection has to use yet.
port_map = {}
for asset in non_node_assets:
component = getattr(self, asset.name)
port_map[asset.in_port.id] = getattr(component, in_suf)
port_map[asset.out_port.id] = getattr(component, out_suf)
# Nodes are special in that their in/out ports can have multiple
# connections. This means we have some bookkeeping to do per node. We
# therefore do the nodes first, and do all remaining connections
# after.
connections = set()
for asset in node_assets:
component = getattr(self, asset.name)
i = 1
for port in (asset.in_port, asset.out_port):
for connected_to in port.connectedTo.items:
conn = (port.id, connected_to.id)
if conn in connections or tuple(reversed(conn)) in connections:
continue
self.connect(getattr(component, node_suf)[i], port_map[connected_to.id])
connections.add(conn)
i += 1
skip_port_ids = set()
for a in skip_assets:
if a.in_port is not None:
skip_port_ids.add(a.in_port.id)
if a.out_port is not None:
skip_port_ids.add(a.out_port.id)
# All non-Joints/nodes
for asset in non_node_assets:
for port in (asset.in_port, asset.out_port):
connected_ports = [p for p in port.connectedTo.items if p.id not in skip_port_ids]
if len(connected_ports) != 1:
logger.warning(
f"{asset.asset_type} '{asset.name}' has multiple connections"
f" to a single port. "
)
assert len(connected_ports) == 1
for connected_to in connected_ports:
conn = (port.id, connected_to.id)
if conn in connections or tuple(reversed(conn)) in connections:
continue
self.connect(port_map[port.id], port_map[connected_to.id])
connections.add(conn) | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/esdl/esdl_model_base.py | 0.460046 | 0.273511 | esdl_model_base.py | pypi |
import logging
import math
from typing import Dict, Tuple, Type
import esdl
from rtctools_heat_network.pycml.component_library.heat import (
Buffer,
CheckValve,
ControlValve,
Demand,
GeothermalSource,
Node,
Pipe,
Pump,
Source,
)
from .asset_to_component_base import MODIFIERS, _AssetToComponentBase
from .common import Asset
from .esdl_model_base import _ESDLModelBase
logger = logging.getLogger("rtctools_heat_network")
class AssetToHeatComponent(_AssetToComponentBase):
def __init__(
self,
*args,
v_nominal=1.0,
v_max=5.0,
rho=988.0,
cp=4200.0,
min_fraction_tank_volume=0.05,
**kwargs,
):
super().__init__(*args, **kwargs)
self.v_nominal = v_nominal
self.v_max = v_max
self.rho = rho
self.cp = cp
self.min_fraction_tank_volume = min_fraction_tank_volume
@property
def _rho_cp_modifiers(self):
return dict(rho=self.rho, cp=self.cp)
def convert_buffer(self, asset: Asset) -> Tuple[Type[Buffer], MODIFIERS]:
assert asset.asset_type == "HeatStorage"
supply_temperature, return_temperature = self._get_supply_return_temperatures(asset)
# Assume that:
# - the capacity is the relative heat that can be stored in the buffer;
# - the tanks are always at least `min_fraction_tank_volume` full;
# - same height as radius to compute dimensions.
if asset.attributes["capacity"] and asset.attributes["volume"]:
logger.warning(
f"{asset.asset_type} '{asset.name}' has both capacity and volume specified. "
f"Volume with value of {asset.attributes['volume']} m3 will be used."
)
capacity = 0.0
if asset.attributes["volume"]:
capacity = (
asset.attributes["volume"]
* self.rho
* self.cp
* (supply_temperature - return_temperature)
)
elif asset.attributes["capacity"]:
capacity = asset.attributes["capacity"]
else:
logger.error(
f"{asset.asset_type} '{asset.name}' has both not capacity and volume specified. "
f"Please specify one of the two"
)
assert capacity > 0.0
min_fraction_tank_volume = self.min_fraction_tank_volume
r = (
capacity
* (1 + min_fraction_tank_volume)
/ (self.rho * self.cp * (supply_temperature - return_temperature) * math.pi)
) ** (1.0 / 3.0)
min_heat = capacity * min_fraction_tank_volume
max_heat = capacity * (1 + min_fraction_tank_volume)
assert max_heat > 0.0
hfr_charge_max = asset.attributes.get("maxChargeRate", math.inf) or math.inf
hfr_discharge_max = asset.attributes.get("maxDischargeRate", math.inf) or math.inf
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
height=r,
radius=r,
heat_transfer_coeff=1.0,
min_fraction_tank_volume=min_fraction_tank_volume,
Stored_heat=dict(min=min_heat, max=max_heat),
Heat_buffer=dict(min=-hfr_discharge_max, max=hfr_charge_max),
init_Heat=min_heat,
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return Buffer, modifiers
def convert_demand(self, asset: Asset) -> Tuple[Type[Demand], MODIFIERS]:
assert asset.asset_type in {"GenericConsumer", "HeatingDemand"}
max_demand = asset.attributes["power"] if asset.attributes["power"] else math.inf
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
Heat_demand=dict(max=max_demand),
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return Demand, modifiers
def convert_node(self, asset: Asset) -> Tuple[Type[Node], MODIFIERS]:
assert asset.asset_type == "Joint"
sum_in = 0
sum_out = 0
for x in asset.attributes["port"].items:
if type(x) == esdl.esdl.InPort:
sum_in += len(x.connectedTo)
if type(x) == esdl.esdl.OutPort:
sum_out += len(x.connectedTo)
modifiers = dict(
n=sum_in + sum_out,
)
return Node, modifiers
def convert_pipe(self, asset: Asset) -> Tuple[Type[Pipe], MODIFIERS]:
assert asset.asset_type == "Pipe"
supply_temperature, return_temperature = self._get_supply_return_temperatures(asset)
if "_ret" in asset.attributes["name"]:
temperature = return_temperature
else:
temperature = supply_temperature
(
diameter,
insulation_thicknesses,
conductivies_insulation,
) = self._pipe_get_diameter_and_insulation(asset)
# Compute the maximum heat flow based on an assumed maximum velocity
area = math.pi * diameter**2 / 4.0
q_max = area * self.v_max
q_nominal = area * self.v_nominal
self._set_q_nominal(asset, q_nominal)
# TODO: This might be an underestimation. We need to add the total
# heat losses in the system to get a proper upper bound. Maybe move
# calculation of Heat bounds to the HeatMixin?
delta_temperature = supply_temperature - return_temperature
hfr_max = self.rho * self.cp * q_max * delta_temperature * 2
assert hfr_max > 0.0
modifiers = dict(
Q_nominal=q_nominal,
length=asset.attributes["length"],
diameter=diameter,
temperature=temperature,
disconnectable=self._is_disconnectable_pipe(asset),
HeatIn=dict(
Heat=dict(min=-hfr_max, max=hfr_max),
Q=dict(min=-q_max, max=q_max),
),
HeatOut=dict(
Heat=dict(min=-hfr_max, max=hfr_max),
Q=dict(min=-q_max, max=q_max),
),
insulation_thickness=insulation_thicknesses,
conductivity_insulation=conductivies_insulation,
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return Pipe, modifiers
def convert_pump(self, asset: Asset) -> Tuple[Type[Pump], MODIFIERS]:
assert asset.asset_type == "Pump"
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return Pump, modifiers
def convert_source(self, asset: Asset) -> Tuple[Type[Source], MODIFIERS]:
assert asset.asset_type in {
"GasHeater",
"GenericProducer",
"GeothermalSource",
"ResidualHeatSource",
}
max_supply = asset.attributes["power"]
if not max_supply:
logger.error(f"{asset.asset_type} '{asset.name}' has no max power specified. ")
assert max_supply > 0.0
# get price per unit of energy,
# assume cost of 1. if nothing is given (effectively heat loss minimization)
price = 1.0
if "costInformation" in asset.attributes.keys():
if hasattr(asset.attributes["costInformation"], "variableOperationalCosts"):
if hasattr(asset.attributes["costInformation"].variableOperationalCosts, "value"):
price = asset.attributes["costInformation"].variableOperationalCosts.value
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
price=price,
Heat_source=dict(min=0.0, max=max_supply, nominal=max_supply / 2.0),
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
if asset.asset_type == "GeothermalSource":
# Note that the ESDL target flow rate is in kg/s, but we want m3/s
try:
modifiers["target_flow_rate"] = asset.attributes["flowRate"] / self.rho
except KeyError:
logger.warning(
f"{asset.asset_type} '{asset.name}' has no desired flow rate specified. "
f"'{asset.name}' will not be actuated in a constant manner"
)
return GeothermalSource, modifiers
else:
return Source, modifiers
def convert_control_valve(self, asset: Asset) -> Tuple[Type[ControlValve], MODIFIERS]:
assert asset.asset_type == "Valve"
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return ControlValve, modifiers
def convert_check_valve(self, asset: Asset) -> Tuple[Type[CheckValve], MODIFIERS]:
assert asset.asset_type == "CheckValve"
modifiers = dict(
Q_nominal=self._get_connected_q_nominal(asset),
**self._supply_return_temperature_modifiers(asset),
**self._rho_cp_modifiers,
)
return CheckValve, modifiers
class ESDLHeatModel(_ESDLModelBase):
def __init__(self, assets: Dict[str, Asset], converter_class=AssetToHeatComponent, **kwargs):
super().__init__(None)
converter = converter_class(**kwargs)
self._esdl_convert(converter, assets, "Heat") | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/esdl/esdl_heat_model.py | 0.844633 | 0.315341 | esdl_heat_model.py | pypi |
from typing import Dict, List, Tuple, Union
import casadi as ca
import numpy as np
from pymoca.backends.casadi.model import Variable as _Variable
MATHEMATICAL_OPERATORS = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__truediv__",
"__mod__",
"__pow__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__iadd__",
"__isub__",
"__imul__",
"__idiv__",
"__ifloordiv__",
"__imod__",
"__ipow__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__long__",
"__float__",
"__oct__",
"__hex__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__ge__",
"__gt__",
]
class BaseVariable(_Variable):
_attr_set = {"value", "start", "min", "max", "nominal", "fixed"}
def __new__(cls, *args, **kwargs):
if not hasattr(cls, MATHEMATICAL_OPERATORS[0]):
for attr in MATHEMATICAL_OPERATORS:
def _f(self, *args, attr=attr, **kwargs):
return getattr(self.symbol, attr)(*args, **kwargs)
setattr(cls, attr, _f)
return super().__new__(cls)
def __init__(self, name, dimensions=None, **kwargs):
update_attrs = {}
for k, v in kwargs.items():
if k in Variable._attr_set:
if "k" == "start":
raise NotImplementedError("Setting 'start' attribute is not supported yet")
update_attrs[k] = v
for k in update_attrs:
kwargs.pop(k)
if dimensions is None:
dimensions = []
super().__init__(ca.MX.sym(name, *dimensions), **kwargs)
for k, v in update_attrs.items():
setattr(self, k, v)
def __MX__(self): # noqa: N802
return self.symbol
def __getitem__(self, key):
return self.symbol.__getitem__(key)
class Variable(BaseVariable):
def der(self):
try:
return self._derivative
except AttributeError:
self._derivative = Variable(f"der({self.symbol.name()})")
return self._derivative
@property
def has_derivative(self):
return hasattr(self, "_derivative")
class ControlInput(BaseVariable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fixed = False
class ConstantInput(BaseVariable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fixed = True
class SymbolicParameter(BaseVariable):
def __init__(self, name, *args, **kwargs):
if args and "value" not in kwargs:
kwargs["value"] = args[0]
super().__init__(name, *args, **kwargs)
@property
def name(self):
return self.symbol.name()
class Array:
def __init__(self, type_, var_name, dimensions, **modifiers):
self._array = np.empty(dimensions, dtype=object)
self._names = np.empty(dimensions, dtype=object)
for index in np.ndindex(dimensions):
str_suffix = "[{}]".format(",".join(str(x + 1) for x in index))
self._array[index] = type_(f"{var_name}{str_suffix}", **modifiers)
self._names[index] = str_suffix
def __getitem__(self, key):
if isinstance(key, int):
return self._array[key - 1]
else:
return self._array[tuple(x - 1 for x in key)]
class Model:
_modifiers = {}
_variables: Dict[str, Union[Array, BaseVariable, "Model"]] = {}
_numeric_parameters: Dict[str, Union[float, int, bool, str]] = {}
_derivatives: Dict[str, Variable] = {}
_equations: List[ca.MX] = []
_initial_equations: List[ca.MX] = []
_inequalities: List[Tuple[ca.MX, float, float]] = []
_initial_inequalities: List[Tuple[ca.MX, float, float]] = []
_skip_variables = None
def __init__(self, name, **modifiers):
# Note that this method should be such that it's allowed to be called
# multiple times, e.g. when a (modifiable) parameter is passed as a
# modifier to its super class.
self._variables = {}
self._numeric_parameters = {}
self._derivatives = {}
self._equations = []
self._initial_equations = []
self.name = name
# Value assignment can be done directly, but we move it to the value attribute to
# make sure that all modifiers are a dictionary
for k, v in modifiers.items():
if not isinstance(v, dict):
modifiers[k] = dict(value=v)
self._modifiers = modifiers
self.__prefix = "" if name is None else f"{self.name}."
if Model._skip_variables is None:
Model._skip_variables = dir(self)
def add_variable(self, type_, var_name, *dimensions, **kwargs):
if var_name in self._variables:
raise Exception(f"Variable with name '{var_name}' already exists")
if var_name in self._modifiers:
kwargs = self.merge_modifiers(kwargs, self._modifiers.pop(var_name))
# Explicit conversion to MX for our wrapper classes
for k, v in kwargs.items():
if isinstance(v, BaseVariable):
kwargs[k] = ca.MX(v)
if dimensions:
var = self._variables[var_name] = Array(
type_, f"{self.__prefix}{var_name}", dimensions, **kwargs
)
else:
var = self._variables[var_name] = type_(f"{self.__prefix}{var_name}", **kwargs)
if isinstance(var, (Variable, ControlInput, ConstantInput)) and (
isinstance(var.value, (ca.MX, BaseVariable)) or not np.isnan(var.value)
):
# For states and algebraic states, we move the "value" part to an equation
self.add_equation(var - var.value)
var.value = np.nan
def add_equation(self, equation, lb=None, ub=None):
if lb is None and ub is None:
self._equations.append(equation)
elif lb is not None and ub is not None and lb == ub:
self._equations.append(equation - lb)
else:
self._inequalities.append((equation, lb, ub))
def add_initial_equation(self, equation, lb=None, ub=None):
if lb is None and ub is None:
self._initial_equations.append(equation)
elif lb is not None and ub is not None and lb == ub:
self._initial_equations.append(equation - lb)
else:
self._initial_inequalities.append((equation, lb, ub))
def connect(self, a: "Connector", b: "Connector"):
if not a.variables.keys() == b.variables.keys():
raise Exception(
f"Cannot connect port {a} of type {type(a)} to port {b} "
f"of type {type(b)} as they have different variables."
)
self._equations.extend([a.variables[k] - b.variables[k] for k in a.variables.keys()])
def der(self, var: Variable):
return var.der()
@property
def variables(self):
return self._variables.copy()
@property
def numeric_parameters(self):
return self._numeric_parameters.copy()
@property
def equations(self):
return self._equations.copy()
@property
def initial_equations(self):
return self._initial_equations.copy()
@property
def inequalities(self):
return self._inequalities.copy()
@property
def initial_inequalities(self):
return self._initial_inequalities.copy()
@staticmethod
def merge_modifiers(a: dict, b: dict):
"""
Recursive (not in place) merge of dictionaries.
:param a: Base dictionary to merge.
:param b: Dictionary to merge on top of base dictionary.
:return: Merged dictionary
"""
b = b.copy()
for k, v in a.items():
if isinstance(v, dict):
b_node = b.setdefault(k, {})
b[k] = Model.merge_modifiers(v, b_node)
else:
if k not in b:
b[k] = v
return b
def __MX__(self): # noqa: N802
return self.symbol
def __getattr__(self, attr):
try:
return self._variables[attr]
except KeyError:
pass
try:
return self._numeric_parameters[attr]
except KeyError:
raise AttributeError(f"Attribute '{attr}' not found")
def __setattr__(self, key, value):
if self._skip_variables is None or key in self._skip_variables:
super().__setattr__(key, value)
else:
try:
value = self._modifiers.pop(key)["value"]
except KeyError:
pass
self._numeric_parameters[key] = value
def __repr__(self):
return self.name
def __str__(self):
return self.name
def flatten(self):
if self._modifiers:
raise Exception("Cannot flatten a model with remaining modifiers")
m = FlattenedModel()
all_variables = {}
all_parameters = {}
all_equations = []
all_initial_equations = []
all_inequalities = []
all_initial_inequalities = []
# First we expand arrays
variables = {}
for k, var in self._variables.items():
if isinstance(var, Array):
for el, suff in zip(var._array.ravel(), var._names.ravel()):
variables[f"{k}{suff}"] = el
else:
variables[k] = var
# Move variables to flattened model
for k, var in variables.items():
if isinstance(var, Model):
flatten_var = var.flatten()
all_variables.update(flatten_var._variables)
all_parameters.update(flatten_var._numeric_parameters)
all_equations.extend(flatten_var._equations)
all_initial_equations.extend(flatten_var._initial_equations)
all_inequalities.extend(flatten_var._inequalities)
all_initial_inequalities.extend(flatten_var._initial_inequalities)
else:
all_variables[f"{self.__prefix}{k}"] = var
all_parameters.update(
{f"{self.__prefix}{p}": v for p, v in self._numeric_parameters.items()}
)
all_equations.extend(self.equations)
all_initial_equations.extend(self.initial_equations)
all_inequalities.extend(self.inequalities)
all_initial_inequalities.extend(self.initial_inequalities)
m._variables = all_variables
m._numeric_parameters = all_parameters
m._equations = all_equations
m._initial_equations = all_initial_equations
m._inequalities = all_inequalities
m._initial_inequalities = all_initial_inequalities
return m
class FlattenedModel(Model):
def __init__(self):
super().__init__(None)
class Component(Model):
pass
class Connector(Component):
pass | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/pycml/model_base.py | 0.847258 | 0.163112 | model_base.py | pypi |
import math
from numpy import nan
from rtctools_heat_network.pycml import Variable
from .heat_two_port import HeatTwoPort
class Buffer(HeatTwoPort):
def __init__(self, name, **modifiers):
super().__init__(name, **modifiers)
self.component_type = "buffer"
self.Q_nominal = 1.0
self.T_supply = nan
self.T_return = nan
self.dT = self.T_supply - self.T_return
self.cp = 4200.0
self.rho = 988.0
self.Heat_nominal = self.cp * self.rho * self.dT * self.Q_nominal
self.heat_transfer_coeff = 1.0
self.height = 5.0
self.radius = 10.0
self.volume = math.pi * self.radius**2 * self.height
self.heat_loss_coeff = 2 * self.heat_transfer_coeff / (self.radius * self.rho * self.cp)
# The hot/cold tank can have a lower bound on its volume.
# Meaning that they might always be, for e.g., 5% full.
self.min_fraction_tank_volume = 0.05
# Initial values
self.init_V_hot_tank = nan
self.init_Heat = nan
# Minimum/maximum values
self.min_stored_heat = (
self.volume * self.min_fraction_tank_volume * self.dT * self.cp * self.rho
)
self.max_stored_heat = (
self.volume * (1 - self.min_fraction_tank_volume) * self.dT * self.cp * self.rho
)
# Stored_heat is the heat that is contained in the buffer.
# Heat_buffer is the amount of heat added to or extracted from the buffer
# per timestep.
# HeatHot (resp. HeatCold) is the amount of heat added or extracted from
# the hot (resp. cold) line.
# As by construction the cold line should have zero heat, we fix HeatCold to zero.
# Thus Heat_buffer = HeatHot = der(Stored_heat).
self.add_variable(Variable, "Heat_buffer", nominal=self.Heat_nominal)
# Assume the storage fills in about an hour at typical rate
self._typical_fill_time = 3600.0
self._nominal_stored_heat = self.Heat_nominal * self._typical_fill_time
self.add_variable(
Variable,
"Stored_heat",
min=self.min_stored_heat,
max=self.max_stored_heat,
nominal=self._nominal_stored_heat,
)
# For nicer constraint coefficient scaling, we shift a bit more error into
# the state vector entry of `Heat_loss`. In other words, with a factor of
# 10.0, we aim for a state vector entry of ~0.1 (instead of 1.0)
self._heat_loss_error_to_state_factor = 10.0
self._nominal_heat_loss = (
self._nominal_stored_heat * self.heat_loss_coeff * self._heat_loss_error_to_state_factor
)
self.add_variable(Variable, "Heat_loss", min=0.0, nominal=self._nominal_heat_loss)
self.add_variable(Variable, "HeatHot", nominal=self.Heat_nominal)
self.add_variable(Variable, "HeatCold", min=0.0, max=0.0, nominal=self.Heat_nominal)
self._heat_loss_eq_nominal_buf = (self.Heat_nominal * self._nominal_heat_loss) ** 0.5
self.add_equation(self.HeatIn.Q - self.HeatOut.Q)
# Heat stored in the buffer
self.add_equation(
(self.der(self.Stored_heat) - self.Heat_buffer + self.Heat_loss)
/ self._heat_loss_eq_nominal_buf
)
self.add_equation(
(self.Heat_loss - self.Stored_heat * self.heat_loss_coeff) / self._nominal_heat_loss
)
self.add_equation((self.Heat_buffer - (self.HeatHot - self.HeatCold)) / self.Heat_nominal)
# Set in Mixin. We want HeatHot to be positive when the buffer is
# charging, which means we need to know the orientation of the connected
# pipe.
# (HeatCold + cold_pipe_orientation * HeatOut.Heat) / Heat_nominal = 0.0;
# (HeatHot - hot_pipe_orientation * HeatIn.Heat) / Heat_nominal = 0.0; | /rtc-tools-heat-network-0.1.8.tar.gz/rtc-tools-heat-network-0.1.8/src/rtctools_heat_network/pycml/component_library/heat/buffer.py | 0.86674 | 0.414543 | buffer.py | pypi |
import re
import numpy as np
class _ObjectParameterWrapper(object):
"""
Python wrapper class for Modelica models/classes.
Non-nested parameters in the model can be accessed as an attributes of the
Python object. This is not the case for non-parameters, e.g. control
variables or input time series.
"""
def __init__(self, optimization_problem):
self.optimization_problem = optimization_problem
def _parse_array(self, optimization_problem, ks):
# Figure out dimension of array
inds = re.search(r"\[(.*?)\]$", ks[0])
inds = re.findall(r"(\d+)", inds.group(1))
n_dim = len(inds)
pattern = r".*?\[" + ",".join([r"(\d+)"]*n_dim) + r"\]$"
prog = re.compile(pattern)
indices = [prog.match(x).groups() for x in ks]
shape = np.zeros(n_dim, dtype=int)
for i in range(n_dim):
shape[i] = max((int(x[i]) for x in indices))
arr = np.zeros(shape)
parameters = optimization_problem.parameters(0)
# TODO: Why are parameters stored as individual elements? Would be much easier to just to
# parameters[k].getMatrixValue.toArray() or .toMatrix, to avoid looping/regex string parsing.
for k in ks:
inds = tuple(int(x)-1 for x in prog.match(k).groups())
arr[inds] = float(parameters[k])
return arr
def __getattr__(self, attr):
"""
Not found in regular class member variables or functions. Lookup in
Modelica model's parameters.
"""
try:
# Array parameters are not stored as arrays, but as individual
# elements. So if the parameter is an array, we will have to put
# it back together again.
ks = [x for x in self.optimization_problem.parameters(0).keys()
if x.startswith(self.symbol + "." + attr + "[")]
if ks:
return self._parse_array(self.optimization_problem, ks)
else:
return self.optimization_problem.parameters(0)[self.symbol + "." + attr]
except KeyError:
raise AttributeError | /rtc-tools-hydraulic-structures-2.0.0a15.tar.gz/rtc-tools-hydraulic-structures-2.0.0a15/src/rtctools_hydraulic_structures/util.py | 0.506591 | 0.520131 | util.py | pypi |
from math import atan2, pi, sqrt
TWO_PI = (2.0 * pi)
class DeadEndError(Exception):
pass
def enclosing_segments(point, lines, return_lines=False):
lines = _split_lines(lines)
# Get all segments in all lines
line_segments = [(s, i) for i, points in enumerate(lines) for s in zip(points[:-1], points[1:])]
# TODO: We are assuming unique segments here. It should be possible for a
# segment to be part of more than one line.
segment_to_line = {s: i for s, i in line_segments}
# All segments twice, with start and end points reversed.
segment_to_line.update({tuple(reversed(s)): i for s, i in line_segments})
# Process on segments only. Mapping to original line number will be done
# later.
segments = [s for s, i in line_segments]
# Some segments might be zero length with the start and ending point
# equal. Get rid of them, as we cannot derive any direction from them (and
# do not need them anyway).
segments = [(s, e) for s, e in segments if s != e]
segment_dict = {}
for s in segments:
# Original segment
segment_dict.setdefault(s[0], []).append(s)
# Flipped segment
flip_s = tuple(reversed(s))
segment_dict.setdefault(flip_s[0], []).append(flip_s)
# Find closest segment/point
sorted_segments = sorted(segments, key=lambda x: _distance_point_to_segment(point, x))
# Determine in which way we should traverse the closest segment. If we
# cannot figure out the direction, we pick the next closest segment
# instead.
for closest_segment in sorted_segments:
(x1, y1), (x2, y2) = closest_segment
(x0, y0) = point
angle_start = atan2(y1 - y0, x1 - x0)
angle_end = atan2(y2 - y0, x2 - x0)
angle_diff = (angle_end - angle_start) % TWO_PI
if angle_diff > pi:
closest_segment = tuple(reversed(closest_segment))
if angle_diff != 0:
break
enclosing_segments = []
enclosing_segments.append(closest_segment)
# Start adding segments until we get back to our start point again.
start_point = enclosing_segments[0][0]
prev_segment = closest_segment
prev_end_point = closest_segment[1]
while prev_end_point != start_point:
results = segment_dict[prev_end_point]
# Remove the reverse of the last added segment in the possibilities
results = [x for x in results if x != tuple(reversed(prev_segment))]
if not results:
raise DeadEndError("Could not find another segment starting from {}".format(prev_end_point))
# Pick the segment which goes most counter-clockwise
cur_segment_angle = _segment_angle(prev_segment)
results = sorted(results, key=lambda x: (_segment_angle(x) - cur_segment_angle + pi) % TWO_PI, reverse=True) # noqa B023
next_segment = results[0]
enclosing_segments.append(next_segment)
prev_end_point = next_segment[1]
prev_segment = next_segment
# Select lines from the enclosing segments, based on their source line
enclosing_lines = []
source_lines = []
i_start, i_stop = 0, None # [i_start, i_stop)
prev_line_no = segment_to_line[enclosing_segments[0]]
for i, s in enumerate(enclosing_segments):
line_no = segment_to_line[s]
if line_no != prev_line_no:
i_stop = i
enclosing_lines.append(enclosing_segments[i_start:i_stop])
source_lines.append(prev_line_no)
i_start = i_stop
prev_line_no = line_no
# Last slice
enclosing_lines.append(enclosing_segments[i_start:])
source_lines.append(line_no)
# Check if last line and first line belong to the same source line. If so, merge them
if source_lines[0] == source_lines[-1]:
enclosing_lines[-1].extend(enclosing_lines.pop(0))
# Convert segments to points
for i, l in enumerate(enclosing_lines):
points = [s[0] for s in l]
points.append(l[-1][1])
enclosing_lines[i] = points
if return_lines:
return enclosing_segments, enclosing_lines
else:
return enclosing_segments
def _segment_angle(segment):
"""
Calculates the angle of the segment, with the start point as the origin.
The returned angle is in the range [0, 2*pi)
"""
(x1, y1), (x2, y2) = segment
return atan2(y2 - y1, x2 - x1) % TWO_PI
def _split_lines(lines):
"""
Input is a list of lines, each consisting of a list of points: [[(x1, y1),
(x2, y2), ...], ...]
The output is a similar structure, but with intersection points between
all lines added.
"""
new_lines = []
for points_i in lines:
new_line = [points_i[0]]
for segment in zip(points_i[:-1], points_i[1:]):
# Note that we also check other segments of the current line, as we
# have to account for the possibility that the line intersects itself.
other_segments = (s for points_j in lines for s in zip(points_j[:-1], points_j[1:]))
new_points = _split_segment(segment, other_segments)
new_line.extend(new_points[1:]) # Start point already was in the list
new_lines.append(new_line)
return new_lines
def _split_segment(segment, other_segments):
"""
If segment goes from (x_start, y_start) to (x_end, y_end), this function
will return a list [(x_start, y_start), (x2, y2), ..., (x_end, y_end)].
Each new point is a point where the segment intersects with any of the
other segments.
"""
def _sort_distance_points(a, b):
# Just for sorting, so no need to take the sqrt
xa, ya = a
xb, yb = b
return (xb - xa)**2 + (yb - ya)**2
start_point, end_point = segment
intersection_points = set()
for s in other_segments:
p = _segment_intersection(segment, s)
if p is not None:
intersection_points.add(p)
intersection_points.add(start_point)
intersection_points.add(end_point)
ret = sorted(intersection_points, key=lambda x: _sort_distance_points(start_point, x))
return ret
def _point_on_segment(point, segment):
"""
Checks whether a point lies on a particular segment.
"""
x0, y0 = point
(x1, y1), (x2, y2) = segment
x_min, x_max = min(x1, x2), max(x1, x2)
y_min, y_max = min(y1, y2), max(y1, y2)
return (x_min <= x0 <= x_max) and (y_min <= y1 <= y_max)
def _general_equation_form(segment):
"""
Calculates the general equation form (ax + by + c) of the line of which
this segment is part.
"""
(x1, y1), (x2, y2) = segment
a = y1 - y2
b = x2 - x1
c = x1 * y2 - x2 * y1
return a, b, c
def _distance_point_to_segment(point, segment):
"""
Calculates the distance from a point to a line segment.
"""
x0, y0 = point
(x1, y1), (x2, y2) = segment
# Zero-length segments sometimes occur in the segment list of e.g. contour
# plots. Although "segment" is then somewhat of a misnomer, we do want to
# handle such cases transparently. We therefore terminate early, to avoid
# divisions by zero.
if (x1, y1) == (x2, y2):
return sqrt((x1 - x0)**2 + (y1 - y0)**2)
# Formulate segment as ax + by + c = 0.
a, b, c = _general_equation_form(segment)
# Calculate position of closest point from point to line through segment
# See https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line for proof/derivation
xt = (b * (b * x0 - a * y0) - a * c) / (a**2 + b**2)
yt = (a * (-b * x0 + a * y0) - b * c) / (a**2 + b**2)
# We check whether this point is on the segment. If not, we take the
# minimum distance to either of the end points as the distance from the
# point to this segment.
if _point_on_segment((xt, yt), segment):
distance = sqrt((xt - x0)**2 + (yt - y0)**2)
else:
d_1 = sqrt((x1 - x0)**2 + (y1 - y0)**2)
d_2 = sqrt((x2 - x0)**2 + (y2 - y0)**2)
distance = min(d_1, d_2)
return distance
def _segment_intersection(segment_1, segment_2):
"""
Calculates the point of intersection between two segments. If lines are
parallel, or if the intersection is not on both segments, None is
returned.
"""
# Intersection of two segments
a1, b1, c1 = _general_equation_form(segment_1)
a2, b2, c2 = _general_equation_form(segment_2)
div = a1 * b2 - a2 * b1
if div != 0:
yt = (c1 * a2 - c2 * a1) / div
xt = (b1 * c2 - c1 * b2) / div
else:
# lines are parallel, and do not intersect
return None
if _point_on_segment((xt, yt), segment_1) and \
_point_on_segment((xt, yt), segment_2):
return (xt, yt)
else:
return None | /rtc-tools-hydraulic-structures-2.0.0a15.tar.gz/rtc-tools-hydraulic-structures-2.0.0a15/src/rtctools_hydraulic_structures/polygon_enclosure.py | 0.472197 | 0.528108 | polygon_enclosure.py | pypi |
import logging
import numpy as np
from rtctools.optimization.goal_programming_mixin import Goal
from rtctools.optimization.optimization_problem import OptimizationProblem
from rtctools.optimization.timeseries import Timeseries
logger = logging.getLogger("rtctools")
PATH_GOALS = ["minimization_path", "maximization_path", "range"]
NON_PATH_GOALS = []
GOAL_TYPES = PATH_GOALS + NON_PATH_GOALS
TARGET_DATA_TYPES = [
"value",
"parameter",
"timeseries",
]
class BaseGoal(Goal):
"""
Basic optimization goal for a given state.
:cvar goal_type:
Type of goal ('range' or 'minimization_path' or 'maximization_path')
:cvar target_data_type:
Type of target data ('value', 'parameter', 'timeseries').
If 'value', set the target bounds by value.
If 'parameter', set the bounds by a parameter. The target_min
and/or target_max are expected to be the name of the parameter.
If 'timeseries', set the bounds by a timeseries. The target_min
and/or target_max are expected to be the name of the timeseries.
"""
def __init__(
self,
optimization_problem: OptimizationProblem,
state,
goal_type="minimization_path",
function_min=np.nan,
function_max=np.nan,
function_nominal=np.nan,
target_data_type="value",
target_min=np.nan,
target_max=np.nan,
priority=1,
weight=1.0,
order=2,
):
self.state = state
self.goal_type = None
self._set_goal_type(goal_type)
if goal_type == "range":
self._set_function_bounds(
optimization_problem=optimization_problem,
function_min=function_min,
function_max=function_max,
)
self._set_function_nominal(function_nominal)
if goal_type == "range":
self._set_target_bounds(
optimization_problem=optimization_problem,
target_data_type=target_data_type,
target_min=target_min,
target_max=target_max,
)
self.priority = priority if np.isfinite(priority) else 1
self.weight = weight if np.isfinite(weight) else 1.0
self._set_order(order)
def function(self, optimization_problem, ensemble_member):
del ensemble_member
if self.goal_type == "maximization_path":
return -optimization_problem.state(self.state)
if self.goal_type in ["minimization_path", "range"]:
return optimization_problem.state(self.state)
raise ValueError("Unsupported goal type '{}', supported are {}".format(self.goal_type, GOAL_TYPES))
def _set_order(self, order):
"""Set the order of the goal."""
if np.isfinite(order):
self.order = order
elif self.goal_type in ["maximization_path", "minimization_path"]:
self.order = 1
else:
self.order = 2
if self.goal_type == "maximization_path" and self.order % 2 == 0:
logger.warning(
"Using even order '%i' for a maximization_path goal" + " results in a minimization_path goal.",
self.order,
)
def _set_goal_type(
self,
goal_type,
):
"""Set the goal type."""
if goal_type in GOAL_TYPES:
self.goal_type = goal_type
else:
raise ValueError(f"goal_type should be one of {GOAL_TYPES}.")
def _set_function_bounds(
self,
optimization_problem: OptimizationProblem,
function_min=np.nan,
function_max=np.nan,
):
"""Set function bounds and nominal."""
self.function_range = [function_min, function_max]
if not np.isfinite(function_min):
if isinstance(optimization_problem.bounds()[self.state][0], float):
self.function_range[0] = optimization_problem.bounds()[self.state][0]
elif isinstance(optimization_problem.bounds()[self.state][0], Timeseries):
self.function_range[0] = optimization_problem.bounds()[self.state][0].values
if not np.isfinite(function_max):
if isinstance(optimization_problem.bounds()[self.state][1], float):
self.function_range[1] = optimization_problem.bounds()[self.state][1]
elif isinstance(optimization_problem.bounds()[self.state][1], Timeseries):
self.function_range[1] = optimization_problem.bounds()[self.state][1].values
def _set_function_nominal(self, function_nominal):
"""Set function nominal"""
self.function_nominal = function_nominal
if not np.isfinite(self.function_nominal):
if isinstance(self.function_range, (list, tuple)):
if np.all(np.isfinite(self.function_range)):
self.function_nominal = np.sum(self.function_range) / 2
return
self.function_nominal = 1.0
logger.warning("Function nominal not specified, nominal is set to 1.0")
def _set_target_bounds(
self,
optimization_problem: OptimizationProblem,
target_data_type="value",
target_min=np.nan,
target_max=np.nan,
):
# Ignore too many ancestors, since the use of mixin classes is how rtc-tools is set up.
# pylint: disable=too-many-branches
"""Set the target bounds."""
if target_data_type not in TARGET_DATA_TYPES:
raise ValueError(f"target_data_type should be one of {TARGET_DATA_TYPES}.")
if target_data_type == "value":
self.target_min = float(target_min)
self.target_max = float(target_max)
elif target_data_type == "parameter":
if isinstance(target_max, str):
self.target_max = optimization_problem.parameters(0)[target_max]
if self.target_max is None:
self.target_max = optimization_problem.io.get_parameter(target_max)
elif np.isnan(target_max):
self.target_max = np.nan
if isinstance(target_min, str):
self.target_min = optimization_problem.parameters(0)[target_min]
if self.target_min is None:
self.target_min = optimization_problem.io.get_parameter(target_min)
elif np.isnan(target_min):
self.target_min = np.nan
elif target_data_type == "timeseries":
if isinstance(target_max, str):
self.target_max = optimization_problem.get_timeseries(target_max)
elif np.isnan(target_max):
self.target_max = np.nan
if isinstance(target_min, str):
self.target_min = optimization_problem.get_timeseries(target_min)
elif np.isnan(target_min):
self.target_min = np.nan | /rtc_tools_interface-0.1.1-py3-none-any.whl/rtctools_interface/optimization/base_goal.py | 0.684053 | 0.41745 | base_goal.py | pypi |
import logging
import math
import os
import copy
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from rtctools_interface.optimization.read_plot_table import read_plot_table
logger = logging.getLogger("rtctools")
def get_subplot(i_plot, n_rows, axs):
"""Determine the row and column index and returns the corresponding subplot object."""
i_c = math.ceil((i_plot + 1) / n_rows) - 1
i_r = i_plot - i_c * n_rows
subplot = axs[i_r, i_c]
return subplot
def plot_with_previous(subplot, state_name, t_datetime, results, results_dict_prev):
"""Add line with the results for a particular state. If previous results
are available, a line with the timeseries for those results is also plotted.
"""
subplot.plot(t_datetime, results[state_name], label=state_name)
if results_dict_prev:
results_prev = results_dict_prev["extract_result"]
subplot.plot(
t_datetime,
results_prev[state_name],
label=state_name + " at previous priority optimization",
color="gray",
linestyle="dotted",
)
def plot_additional_variables(subplot, t_datetime, results, results_dict_prev, subplot_config):
"""Plot the additional variables defined in the plot_table"""
for var in subplot_config.get("variables_style_1", []):
subplot.plot(t_datetime, results[var], label=var)
for var in subplot_config.get("variables_style_2", []):
subplot.plot(t_datetime, results[var], linestyle="solid", linewidth="0.5", label=var)
for var in subplot_config.get("variables_with_previous_result", []):
plot_with_previous(subplot, var, t_datetime, results, results_dict_prev)
def format_subplot(subplot, subplot_config):
"""Format the current axis and set legend and title."""
subplot.set_ylabel(subplot_config["y_axis_title"])
subplot.legend()
if "custom_title" in subplot_config and isinstance(subplot_config["custom_title"], str):
subplot.set_title(subplot_config["custom_title"])
elif subplot_config["specified_in"] == "goal_generator":
subplot.set_title(
"Goal for {} (active from priority {})".format(subplot_config["state"], subplot_config["priority"])
)
date_format = mdates.DateFormatter("%d%b%H")
subplot.xaxis.set_major_formatter(date_format)
subplot.grid(which="both", axis="x")
class PlotGoalsMixin:
"""
Class for plotting results.
"""
plot_max_rows = 4
def __init__(self, **kwargs):
super().__init__(**kwargs)
try:
plot_table_file = self.plot_table_file
except AttributeError:
plot_table_file = os.path.join(self._input_folder, "plot_table.csv")
self.plot_table = read_plot_table(plot_table_file, self.goal_table_file)
# Store list of variable-names that may not be present in the results.
variables_style_1 = [var for var_list in self.plot_table.get("variables_style_1", []) for var in var_list]
variables_style_2 = [var for var_list in self.plot_table.get("variables_style_2", []) for var in var_list]
variables_with_previous_result = [
var for var_list in self.plot_table.get("variables_with_previous_result", []) for var in var_list
]
self.custom_variables = variables_style_1 + variables_style_2 + variables_with_previous_result
def pre(self):
"""Tasks before optimizing."""
super().pre()
self.intermediate_results = []
def plot_goal_results_from_dict(self, result_dict, results_dict_prev=None):
"""Plot results, given a dict."""
self.plot_goals_results(result_dict, results_dict_prev)
def plot_goal_results_from_self(self, priority=None):
"""Plot results."""
result_dict = {
"extract_result": self.extract_results(),
"priority": priority,
}
self.plot_goals_results(result_dict)
def plot_goals_results(self, result_dict, results_prev=None):
"""Creates a figure with a subplot for each row in the plot_table."""
results = result_dict["extract_result"]
plot_config = self.plot_table.to_dict("records")
if len(plot_config) == 0:
logger.info(
"PlotGoalsMixin did not find anything to plot."
+ " Are there any goals that are active and described in the plot_table?"
)
return
# Initalize figure
n_cols = math.ceil(len(plot_config) / self.plot_max_rows)
n_rows = math.ceil(len(plot_config) / n_cols)
fig, axs = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(n_cols * 9, n_rows * 3), dpi=80, squeeze=False)
fig.suptitle("Results after optimizing until priority {}".format(result_dict["priority"]), fontsize=14)
i_plot = -1
# Add subplot for each row in the plot_table
for subplot_config in plot_config:
i_plot += 1
subplot = get_subplot(i_plot, n_rows, axs)
if subplot_config["specified_in"] == "goal_generator":
plot_with_previous(subplot, subplot_config["state"], np.array(self.io.datetimes), results, results_prev)
plot_additional_variables(subplot, np.array(self.io.datetimes), results, results_prev, subplot_config)
format_subplot(subplot, subplot_config)
if subplot_config["goal_type"] in ["range"]:
self.add_ranges(subplot, np.array(self.io.datetimes), subplot_config)
# Save figure
for i in range(0, n_cols):
axs[n_rows - 1, i].set_xlabel("Time")
os.makedirs("goal_figures", exist_ok=True)
fig.tight_layout()
new_output_folder = os.path.join(self._output_folder, "goal_figures")
os.makedirs(new_output_folder, exist_ok=True)
fig.savefig(os.path.join(new_output_folder, "after_priority_{}.png".format(result_dict["priority"])))
def priority_completed(self, priority: int) -> None:
"""Store results required for plotting"""
extracted_results = copy.deepcopy(self.extract_results())
results_custom_variables = {
custom_variable: self.get_timeseries(custom_variable)
for custom_variable in self.custom_variables
if custom_variable not in extracted_results
}
extracted_results.update(results_custom_variables)
to_store = {"extract_result": extracted_results, "priority": priority}
self.intermediate_results.append(to_store)
super().priority_completed(priority)
def post(self):
"""Tasks after optimizing. Creates a plot for for each priority."""
super().post()
for intermediate_result_prev, intermediate_result in zip(
[None] + self.intermediate_results[:-1], self.intermediate_results
):
self.plot_goal_results_from_dict(intermediate_result, intermediate_result_prev)
def add_ranges(self, subplot, t_datetime, subplot_config):
"""Add lines for the lower and upper target."""
t = self.times()
if subplot_config["target_data_type"] == "parameter":
try:
target_min = np.full_like(t, 1) * self.parameters(0)[subplot_config["target_min"]]
target_max = np.full_like(t, 1) * self.parameters(0)[subplot_config["target_max"]]
except TypeError:
target_min = np.full_like(t, 1) * self.io.get_parameter(subplot_config["target_min"])
target_max = np.full_like(t, 1) * self.io.get_parameter(subplot_config["target_max"])
elif subplot_config["target_data_type"] == "value":
target_min = np.full_like(t, 1) * float(subplot_config["target_min"])
target_max = np.full_like(t, 1) * float(subplot_config["target_max"])
elif subplot_config["target_data_type"] == "timeseries":
if isinstance(subplot_config["target_min"], str):
target_min = self.get_timeseries(subplot_config["target_min"]).values
else:
target_min = np.full_like(t, 1) * subplot_config["target_min"]
if isinstance(subplot_config["target_max"], str):
target_max = self.get_timeseries(subplot_config["target_max"]).values
else:
target_max = np.full_like(t, 1) * subplot_config["target_max"]
else:
message = "Target type {} not known.".format(subplot_config["target_data_type"])
logger.error(message)
raise ValueError(message)
if np.array_equal(target_min, target_max, equal_nan=True):
subplot.plot(t_datetime, target_min, "r--", label="Target")
else:
subplot.plot(t_datetime, target_min, "r--", label="Target min")
subplot.plot(t_datetime, target_max, "r--", label="Target max") | /rtc_tools_interface-0.1.1-py3-none-any.whl/rtctools_interface/optimization/plot_goals_mixin.py | 0.752377 | 0.430267 | plot_goals_mixin.py | pypi |
import cProfile
import logging
import os
import pstats
import re
import sys
import casadi
from . import __version__
from ._internal.alias_tools import OrderedSet
from .data import pi
from .optimization.pi_mixin import PIMixin as OptimizationPIMixin
from .simulation.pi_mixin import PIMixin as SimulationPIMixin
def _resolve_folder(kwargs, base_folder, subfolder_kw, default):
subfolder = kwargs.pop(subfolder_kw, default)
if os.path.isabs(subfolder):
return subfolder
else:
return os.path.join(base_folder, subfolder)
def run_optimization_problem(optimization_problem_class,
base_folder='..', log_level=logging.INFO, profile=False,
**kwargs):
"""
Sets up and solves an optimization problem.
This function makes the following assumptions:
1. That the ``base_folder`` contains subfolders ``input``, ``output``, and ``model``,
containing input data, output data, and the model, respectively.
2. When using :class:`.CSVLookupTableMixin`, that the base folder contains a subfolder ``lookup_tables``.
3. When using :class:`.ModelicaMixin`, that the base folder contains a subfolder ``model``.
4. When using :class:`.ModelicaMixin`, that the toplevel Modelica model name equals the class name.
:param optimization_problem_class: Optimization problem class to solve.
:param base_folder: Base folder.
:param log_level: The log level to use.
:param profile: Whether or not to enable profiling.
:returns: :class:`.OptimizationProblem` instance.
"""
if not os.path.isabs(base_folder):
# Resolve base folder relative to script folder
base_folder = os.path.join(sys.path[0], base_folder)
model_folder = _resolve_folder(kwargs, base_folder, 'model_folder', 'model')
input_folder = _resolve_folder(kwargs, base_folder, 'input_folder', 'input')
output_folder = _resolve_folder(kwargs, base_folder, 'output_folder', 'output')
# Set up logging
logger = logging.getLogger("rtctools")
# Add stream handler if it does not already exist.
if not logger.hasHandlers() and not any((isinstance(h, logging.StreamHandler) for h in logger.handlers)):
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Add pi.DiagHandler, if using PIMixin. Only add it if it does not already exist.
if (issubclass(optimization_problem_class, OptimizationPIMixin) and
not any((isinstance(h, pi.DiagHandler) for h in logger.handlers))):
handler = pi.DiagHandler(output_folder)
logger.addHandler(handler)
# Set log level
logger.setLevel(log_level)
# Log version info
logger.info(
"Using RTC-Tools {}.".format(__version__))
logger.debug(
"Using CasADi {}.".format(casadi.__version__))
# Check for some common mistakes in inheritance order
suggested_order = OrderedSet([
'HomotopyMixin',
'MinAbsGoalProgrammingMixin', 'LinearizedOrderGoalProgrammingMixin',
'SinglePassGoalProgrammingMixin', 'GoalProgrammingMixin',
'PIMixin', 'CSVMixin', 'ModelicaMixin',
'ControlTreeMixin', 'CollocatedIntegratedOptimizationProblem', 'OptimizationProblem'])
base_names = OrderedSet([b.__name__ for b in optimization_problem_class.__bases__])
if suggested_order & base_names != base_names & suggested_order:
msg = 'Please inherit from base classes in the following order: {}'.format(list(base_names & suggested_order))
logger.error(msg)
raise Exception(msg)
# Run
try:
prob = optimization_problem_class(
model_folder=model_folder, input_folder=input_folder, output_folder=output_folder,
**kwargs)
if profile:
filename = os.path.join(base_folder, "profile.prof")
cProfile.runctx("prob.optimize()", globals(), locals(), filename)
s = pstats.Stats(filename)
s.strip_dirs().sort_stats("time").print_stats()
else:
prob.optimize()
return prob
except Exception as e:
logger.error(str(e))
if isinstance(e, TypeError):
exc_info = sys.exc_info()
value = exc_info[1]
try:
failed_class = re.search(
"Can't instantiate (.*) with abstract methods", str(value)).group(1)
abstract_method = re.search(
' with abstract methods (.*)', str(value)).group(1)
logger.error(
'The {} is missing a mixin. Please add a mixin that instantiates '
'abstract method {}, so that the optimizer can run.'.format(
failed_class, abstract_method))
except Exception:
pass
for handler in logger.handlers:
handler.flush()
raise
def run_simulation_problem(simulation_problem_class,
base_folder='..', log_level=logging.INFO,
**kwargs):
"""
Sets up and runs a simulation problem.
:param simulation_problem_class: Optimization problem class to solve.
:param base_folder: Folder within which subfolders "input", "output", and "model" exist,
containing input and output data, and the model, respectively.
:param log_level: The log level to use.
:returns: :class:`SimulationProblem` instance.
"""
if base_folder is None:
# Check command line arguments
if len(sys.argv) != 2:
raise Exception("Usage: {} BASE_FOLDER".format(sys.argv[0]))
base_folder = sys.argv[1]
else:
if not os.path.isabs(base_folder):
# Resolve base folder relative to script folder
base_folder = os.path.join(sys.path[0], base_folder)
model_folder = _resolve_folder(kwargs, base_folder, 'model_folder', 'model')
input_folder = _resolve_folder(kwargs, base_folder, 'input_folder', 'input')
output_folder = _resolve_folder(kwargs, base_folder, 'output_folder', 'output')
# Set up logging
logger = logging.getLogger("rtctools")
if not logger.hasHandlers() and not any((isinstance(h, logging.StreamHandler) for h in logger.handlers)):
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Add pi.DiagHandler, if using PIMixin. Only add it if it does not already exist.
if (issubclass(simulation_problem_class, SimulationPIMixin) and
not any((isinstance(h, pi.DiagHandler) for h in logger.handlers))):
handler = pi.DiagHandler(output_folder)
logger.addHandler(handler)
logger.setLevel(log_level)
logger.info(
'Using RTC-Tools {}'.format(__version__))
logger.debug(
'Using CasADi {}.'.format(casadi.__version__))
# Run
prob = simulation_problem_class(
model_folder=model_folder, input_folder=input_folder, output_folder=output_folder,
**kwargs)
prob.simulate()
return prob | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/util.py | 0.539954 | 0.214774 | util.py | pypi |
import logging
from datetime import timedelta
import numpy as np
import rtctools.data.pi as pi
import rtctools.data.rtc as rtc
from rtctools.simulation.io_mixin import IOMixin
logger = logging.getLogger("rtctools")
class PIMixin(IOMixin):
"""
Adds `Delft-FEWS Published Interface
<https://publicwiki.deltares.nl/display/FEWSDOC/The+Delft-Fews+Published+Interface>`_
I/O to your simulation problem.
During preprocessing, files named ``rtcDataConfig.xml``, ``timeseries_import.xml``, and``rtcParameterConfig.xml``
are read from the ``input`` subfolder. ``rtcDataConfig.xml`` maps
tuples of FEWS identifiers, including location and parameter ID, to RTC-Tools time series identifiers.
During postprocessing, a file named ``timeseries_export.xml`` is written to the ``output`` subfolder.
:cvar pi_binary_timeseries: Whether to use PI binary timeseries format. Default is ``False``.
:cvar pi_parameter_config_basenames:
List of parameter config file basenames to read. Default is [``rtcParameterConfig``].
:cvar pi_check_for_duplicate_parameters: Check if duplicate parameters are read. Default is ``True``.
:cvar pi_validate_timeseries: Check consistency of timeseries. Default is ``True``.
"""
#: Whether to use PI binary timeseries format
pi_binary_timeseries = False
#: Location of rtcParameterConfig files
pi_parameter_config_basenames = ['rtcParameterConfig']
#: Check consistency of timeseries
pi_validate_timeseries = True
#: Check for duplicate parameters
pi_check_for_duplicate_parameters = True
#: Ensemble member to read from input
pi_ensemble_member = 0
def __init__(self, **kwargs):
# Call parent class first for default behaviour.
super().__init__(**kwargs)
# Load rtcDataConfig.xml. We assume this file does not change over the
# life time of this object.
self.__data_config = rtc.DataConfig(self._input_folder)
def read(self):
# Call parent class first for default behaviour.
super().read()
# rtcParameterConfig
self.__parameter_config = []
try:
for pi_parameter_config_basename in self.pi_parameter_config_basenames:
self.__parameter_config.append(pi.ParameterConfig(
self._input_folder, pi_parameter_config_basename))
except FileNotFoundError:
raise FileNotFoundError(
"PIMixin: {}.xml not found in {}.".format(pi_parameter_config_basename, self._input_folder))
# Make a parameters dict for later access
for parameter_config in self.__parameter_config:
for location_id, model_id, parameter_id, value in parameter_config:
try:
parameter = self.__data_config.parameter(parameter_id, location_id, model_id)
except KeyError:
parameter = parameter_id
self.io.set_parameter(parameter, value)
try:
self.__timeseries_import = pi.Timeseries(
self.__data_config, self._input_folder, self.timeseries_import_basename,
binary=self.pi_binary_timeseries, pi_validate_times=self.pi_validate_timeseries)
except FileNotFoundError:
raise FileNotFoundError('PIMixin: {}.xml not found in {}'.format(
self.timeseries_import_basename, self._input_folder))
self.__timeseries_export = pi.Timeseries(
self.__data_config, self._output_folder, self.timeseries_export_basename,
binary=self.pi_binary_timeseries, pi_validate_times=False, make_new_file=True)
# Convert timeseries timestamps to seconds since t0 for internal use
timeseries_import_times = self.__timeseries_import.times
# Timestamp check
if self.pi_validate_timeseries:
for i in range(len(timeseries_import_times) - 1):
if timeseries_import_times[i] >= timeseries_import_times[i + 1]:
raise ValueError(
'PIMixin: Time stamps must be strictly increasing.')
# Check if the timeseries are equidistant
dt = timeseries_import_times[1] - timeseries_import_times[0]
if self.pi_validate_timeseries:
for i in range(len(timeseries_import_times) - 1):
if timeseries_import_times[i + 1] - timeseries_import_times[i] != dt:
raise ValueError(
'PIMixin: Expecting equidistant timeseries, the time step '
'towards {} is not the same as the time step(s) before. Set '
'unit to nonequidistant if this is intended.'.format(
timeseries_import_times[i + 1]))
# Stick timeseries into an AliasDict
self.io.reference_datetime = self.__timeseries_import.forecast_datetime
debug = logger.getEffectiveLevel() == logging.DEBUG
for variable, values in self.__timeseries_import.items(self.pi_ensemble_member):
self.io.set_timeseries(variable, timeseries_import_times, values)
if debug and variable in self.get_variables():
logger.debug('PIMixin: Timeseries {} replaced another aliased timeseries.'.format(variable))
def write(self):
# Call parent class first for default behaviour.
super().write()
times = self._simulation_times
if len(set(np.diff(times))) == 1:
dt = timedelta(seconds=times[1] - times[0])
else:
dt = None
# Start of write output
# Write the time range for the export file.
self.__timeseries_export.times = [self.io.reference_datetime + timedelta(seconds=s) for s in times]
# Write other time settings
self.__timeseries_export.forecast_datetime = self.io.reference_datetime
self.__timeseries_export.dt = dt
self.__timeseries_export.timezone = self.__timeseries_import.timezone
# Write the ensemble properties for the export file.
self.__timeseries_export.ensemble_size = 1
self.__timeseries_export.contains_ensemble = self.__timeseries_import.contains_ensemble
# For all variables that are output variables the values are
# extracted from the results.
for variable in self._io_output_variables:
for alias in self.alias_relation.aliases(variable):
values = np.array(self._io_output[alias])
# Check if ID mapping is present
try:
self.__data_config.pi_variable_ids(alias)
# Add series to output file
self.__timeseries_export.set(alias, values, unit=self.__timeseries_import.get_unit(alias))
break
except KeyError:
logger.debug(
'PIMixin: variable {} has no mapping defined in rtcDataConfig '
'so cannot be added to the output file.'.format(alias))
continue
# Write output file to disk
self.__timeseries_export.write()
@property
def timeseries_import(self):
"""
:class:`pi.Timeseries` object containing the input data.
"""
return self.__timeseries_import
@property
def timeseries_import_times(self):
"""
List of time stamps for which input data is specified.
The time stamps are in seconds since t0, and may be negative.
"""
return self.io.times_sec
@property
def timeseries_export(self):
"""
:class:`pi.Timeseries` object for holding the output data.
"""
return self.__timeseries_export
def set_timeseries(self, variable, values, output=True, check_consistency=True, unit=None):
if check_consistency:
if len(self.times()) != len(values):
raise ValueError(
'PIMixin: Trying to set/append values {} with a different '
'length than the forecast length. Please make sure the '
'values cover forecastDate through endDate with timestep {}.'.format(
variable, self.__timeseries_import.dt))
if unit is None:
unit = self.__timeseries_import.get_unit(variable)
if output:
try:
self.__data_config.pi_variable_ids(variable)
except KeyError:
logger.debug(
'PIMixin: variable {} has no mapping defined in rtcDataConfig '
'so cannot be added to the output file.'.format(variable))
else:
self.__timeseries_export.set(variable, values, unit=unit)
self.__timeseries_import.set(variable, values, unit=unit)
self.io.set_timeseries(variable, self.io.datetimes, values)
def get_timeseries(self, variable):
_, values = self.io.get_timeseries(variable)
return values | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/simulation/pi_mixin.py | 0.644001 | 0.225993 | pi_mixin.py | pypi |
import logging
import os
import numpy as np
import rtctools.data.csv as csv
from rtctools._internal.caching import cached
from rtctools.simulation.io_mixin import IOMixin
logger = logging.getLogger("rtctools")
class CSVMixin(IOMixin):
"""
Adds reading and writing of CSV timeseries and parameters to your simulation problem.
During preprocessing, files named ``timeseries_import.csv``, ``initial_state.csv``,
and ``parameters.csv`` are read from the ``input`` subfolder.
During postprocessing, a file named ``timeseries_export.csv`` is written to the ``output`` subfolder.
:cvar csv_delimiter: Column delimiter used in CSV files. Default is ``,``.
:cvar csv_validate_timeseries: Check consistency of timeseries. Default is ``True``.
"""
#: Column delimiter used in CSV files
csv_delimiter = ','
#: Check consistency of timeseries
csv_validate_timeseries = True
def __init__(self, **kwargs):
# Call parent class first for default behaviour.
super().__init__(**kwargs)
def read(self):
# Call parent class first for default behaviour.
super().read()
# Helper function to check if initial state array actually defines
# only the initial state
def check_initial_state_array(initial_state):
"""
Check length of initial state array, throw exception when larger than 1.
"""
if initial_state.shape:
raise Exception(
'CSVMixin: Initial state file {} contains more than one row of data. '
'Please remove the data row(s) that do not describe the initial '
'state.'.format(os.path.join(self._input_folder, 'initial_state.csv')))
# Read CSV files
_timeseries = csv.load(
os.path.join(self._input_folder, self.timeseries_import_basename + '.csv'),
delimiter=self.csv_delimiter, with_time=True)
self.__timeseries_times = _timeseries[_timeseries.dtype.names[0]]
self.io.reference_datetime = self.__timeseries_times[0]
for key in _timeseries.dtype.names[1:]:
self.io.set_timeseries(
key,
self.__timeseries_times,
np.asarray(_timeseries[key], dtype=np.float64))
logger.debug("CSVMixin: Read timeseries.")
try:
_parameters = csv.load(
os.path.join(self._input_folder, 'parameters.csv'),
delimiter=self.csv_delimiter)
for key in _parameters.dtype.names:
self.io.set_parameter(key, float(_parameters[key]))
logger.debug("CSVMixin: Read parameters.")
except IOError:
pass
try:
_initial_state = csv.load(
os.path.join(self._input_folder, 'initial_state.csv'),
delimiter=self.csv_delimiter)
logger.debug("CSVMixin: Read initial state.")
check_initial_state_array(_initial_state)
self.__initial_state = {
key: float(_initial_state[key]) for key in _initial_state.dtype.names}
except IOError:
self.__initial_state = {}
# Check for collisions in __initial_state and timeseries import (CSV)
for collision in set(self.__initial_state) & set(_timeseries.dtype.names[1:]):
if self.__initial_state[collision] == _timeseries[collision][0]:
continue
else:
logger.warning(
'CSVMixin: Entry {} in initial_state.csv conflicts with '
'timeseries_import.csv'.format(collision))
# Timestamp check
if self.csv_validate_timeseries:
times = self.__timeseries_times
for i in range(len(times) - 1):
if times[i] >= times[i + 1]:
raise Exception(
'CSVMixin: Time stamps must be strictly increasing.')
times = self.__timeseries_times
dt = times[1] - times[0]
# Check if the timeseries are truly equidistant
if self.csv_validate_timeseries:
for i in range(len(times) - 1):
if times[i + 1] - times[i] != dt:
raise Exception(
'CSVMixin: Expecting equidistant timeseries, the time step '
'towards {} is not the same as the time step(s) before. '
'Set equidistant=False if this is intended.'.format(
times[i + 1]))
def write(self):
# Call parent class first for default behaviour.
super().write()
times = self._simulation_times
# Write output
names = ['time'] + sorted(set(self._io_output_variables))
formats = ['O'] + (len(names) - 1) * ['f8']
dtype = {'names': names, 'formats': formats}
data = np.zeros(len(times), dtype=dtype)
data['time'] = self.io.sec_to_datetime(times, self.io.reference_datetime)
for variable in self._io_output_variables:
data[variable] = np.array(self._io_output[variable])
fname = os.path.join(self._output_folder, self.timeseries_export_basename + '.csv')
csv.save(fname, data, delimiter=self.csv_delimiter, with_time=True)
@cached
def initial_state(self):
"""
The initial state. Includes entries from parent classes and initial_state.csv
:returns: A dictionary of variable names and initial state (t0) values.
"""
# Call parent class first for default values.
initial_state = super().initial_state()
# Set of model vars that are allowed to have an initial state
valid_model_vars = set(self.get_state_variables()) | set(self.get_input_variables())
# Load initial states from __initial_state
for variable, value in self.__initial_state.items():
# Get the cannonical vars and signs
canonical_var, sign = self.alias_relation.canonical_signed(variable)
# Only store variables that are allowed to have an initial state
if canonical_var in valid_model_vars:
initial_state[canonical_var] = value * sign
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("CSVMixin: Read initial state {} = {}".format(variable, value))
else:
logger.warning("CSVMixin: In initial_state.csv, {} is not an input or state variable.".format(variable))
return initial_state | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/simulation/csv_mixin.py | 0.652241 | 0.315762 | csv_mixin.py | pypi |
import bisect
import logging
from abc import ABCMeta, abstractmethod
from math import isfinite
import numpy as np
from rtctools._internal.alias_tools import AliasDict
from rtctools._internal.caching import cached
from rtctools.simulation.simulation_problem import SimulationProblem
logger = logging.getLogger("rtctools")
class IOMixin(SimulationProblem, metaclass=ABCMeta):
"""
Base class for all IO methods of optimization problems.
"""
def __init__(self, **kwargs):
# Call parent class first for default behaviour.
super().__init__(**kwargs)
self._simulation_times = []
self.__first_update_call = True
def pre(self) -> None:
# Call read method to read all input
self.read()
self._simulation_times = []
@abstractmethod
def read(self) -> None:
"""
Reads input data from files, storing it in the internal data store through the various set or add methods
"""
pass
def post(self) -> None:
# Call write method to write all output
self.write()
@abstractmethod
def write(self) -> None:
""""
Writes output data to files, getting the data from the data store through the various get methods
"""
pass
def initialize(self, config_file=None):
# Set up experiment
timeseries_import_times = self.io.times_sec
self.__dt = timeseries_import_times[1] - timeseries_import_times[0]
self.setup_experiment(0, timeseries_import_times[-1], self.__dt)
parameter_variables = set(self.get_parameter_variables())
logger.debug("Model parameters are {}".format(parameter_variables))
for parameter, value in self.io.parameters().items():
if parameter in parameter_variables:
logger.debug("IOMixin: Setting parameter {} = {}".format(parameter, value))
self.set_var(parameter, value)
# Load input variable names
self.__input_variables = set(self.get_input_variables().keys())
# Set input values
t_idx = bisect.bisect_left(timeseries_import_times, 0.0)
self.__set_input_variables(t_idx)
logger.debug("Model inputs are {}".format(self.__input_variables))
# Set first timestep
self._simulation_times.append(self.get_current_time())
# Empty output
self._io_output_variables = self.get_output_variables()
self._io_output = AliasDict(self.alias_relation)
# Call super, which will also initialize the model itself
super().initialize(config_file)
# Extract consistent t0 values
for variable in self._io_output_variables:
self._io_output[variable] = [self.get_var(variable)]
def __set_input_variables(self, t_idx, use_cache=False):
if not use_cache:
self.__cache_loop_timeseries = {}
timeseries_names = set(self.io.get_timeseries_names(0))
for v in self.get_variables():
if v in timeseries_names:
_, values = self.io.get_timeseries_sec(v)
self.__cache_loop_timeseries[v] = values
for variable, values in self.__cache_loop_timeseries.items():
value = values[t_idx]
if isfinite(value):
self.set_var(variable, value)
else:
logger.debug("IOMixin: Found bad value {} at index [{}] in timeseries aliased to input {}"
.format(value, t_idx, variable))
def update(self, dt):
# Time step
if dt < 0:
dt = self.__dt
# Current time stamp
t = self.get_current_time()
self._simulation_times.append(t + dt)
# Get current time index
t_idx = bisect.bisect_left(self.io.times_sec, t + dt)
# Set input values
self.__set_input_variables(t_idx, not self.__first_update_call)
# Call super
super().update(dt)
# Extract results
for variable, values in self._io_output.items():
values.append(self.get_var(variable))
self.__first_update_call = False
def extract_results(self):
"""
Extracts the results of output
:returns: An AliasDict of output variables and results array format.
"""
io_outputs_arrays = self._io_output.copy()
for k in io_outputs_arrays.keys():
io_outputs_arrays[k] = np.array(io_outputs_arrays[k])
return io_outputs_arrays
@cached
def parameters(self):
"""
Return a dictionary of parameters, including parameters in the input files files.
:returns: Dictionary of parameters
"""
# Call parent class first for default values.
parameters = super().parameters()
# Load parameters from input files (stored in internal data store)
for parameter_name, value in self.io.parameters().items():
parameters[parameter_name] = value
if logger.getEffectiveLevel() == logging.DEBUG:
for parameter_name in self.io.parameters().keys():
logger.debug("IOMixin: Read parameter {}".format(parameter_name))
return parameters
def times(self, variable=None):
"""
Return a list of all the timesteps in seconds.
:param variable: Variable name.
:returns: List of all the timesteps in seconds.
"""
idx = bisect.bisect_left(self.io.datetimes, self.io.reference_datetime)
return self.io.times_sec[idx:]
def timeseries_at(self, variable, t):
"""
Return the value of a time series at the given time.
:param variable: Variable name.
:param t: Time.
:returns: The interpolated value of the time series.
:raises: KeyError
"""
timeseries_times_sec, values = self.io.get_timeseries_sec(variable)
t_idx = bisect.bisect_left(timeseries_times_sec, t)
if timeseries_times_sec[t_idx] == t:
return values[t_idx]
else:
return np.interp(t, timeseries_times_sec, values) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/simulation/io_mixin.py | 0.822225 | 0.328233 | io_mixin.py | pypi |
import logging
import sys
from datetime import datetime
import numpy as np
logger = logging.getLogger("rtctools")
def _boolean_to_nan(data, fname):
"""
Empty columns are detected as boolean full of "False". We instead want this to be np.nan.
We cannot distinguish between explicitly desired boolean columns, so instead we convert all
boolean columns to np.nan, and raise a warning.
"""
data = data.copy()
dtypes_in = []
for i in range(0, len(data.dtype)):
dtypes_in.append(data.dtype.descr[i])
convert_to_nan = []
dtypes_out = []
for i, name in enumerate(data.dtype.names):
if dtypes_in[i][1][1] == 'b':
convert_to_nan.append(name)
dtypes_out.append((dtypes_in[i][0], '<f8'))
else:
dtypes_out.append(dtypes_in[i])
if convert_to_nan:
logger.warning("Column(s) {} were detected as boolean in '{}'; converting to NaN".format(
", ".join(["'{}'".format(name) for name in convert_to_nan]), fname))
data = data.astype(dtypes_out)
for name in convert_to_nan:
data[name] = np.nan
return data
def load(fname, delimiter=',', with_time=False):
"""
Check delimiter of csv and read contents to an array. Assumes no date-time conversion needed.
:param fname: Filename.
:param delimiter: CSV column delimiter.
:param with_time: Whether the first column is expected to contain time stamps.
:returns: A named numpy array with the contents of the file.
"""
c = {}
if with_time:
c.update({0: lambda str: datetime.strptime(
str.decode("utf-8"), '%Y-%m-%d %H:%M:%S')})
# Check delimiter of csv file. If semicolon, check if decimal separator is
# a comma.
if delimiter == ';':
with open(fname, 'rb') as csvfile:
# Read the first line, this should be a header. Count columns by
# counting separator.
sample_csvfile = csvfile.readline()
n_semicolon = sample_csvfile.count(b';')
# We actually only need one number to evaluate if commas are used as decimal separator, but
# certain csv writers don't use a decimal when the value has no meaningful decimal
# (e.g. 12.0 becomes 12) so we read the next 1024 bytes to make sure we catch a number.
sample_csvfile = csvfile.read(1024)
# Count the commas
n_comma_decimal = sample_csvfile.count(b',')
# If commas are used as decimal separator, we need additional
# converters.
if n_comma_decimal:
c.update({i + len(c): lambda str: float(str.decode("utf-8").replace(',', '.'))
for i in range(1 + n_semicolon - len(c))})
# Read the csv file and convert to array
try:
if len(c): # Converters exist, so use them.
try:
data = np.genfromtxt(fname, delimiter=delimiter, deletechars='', dtype=None, names=True, converters=c)
return _boolean_to_nan(data, fname)
except np.lib._iotools.ConverterError: # value does not conform to expected date-time format
type, value, traceback = sys.exc_info()
logger.error(
'CSVMixin: converter of csv reader failed on {}: {}'.format(fname, value))
raise ValueError(
'CSVMixin: wrong date time or value format in {}. '
'Should be %Y-%m-%d %H:%M:%S and numerical values everywhere.'.format(fname))
else:
data = np.genfromtxt(fname, delimiter=delimiter, deletechars='', dtype=None, names=True)
return _boolean_to_nan(data, fname)
except ValueError: # can occur when delimiter changes after first 1024 bytes of file, or delimiter is not , or ;
type, value, traceback = sys.exc_info()
logger.error(
'CSV: Value reader of csv reader failed on {}: {}'.format(fname, value))
raise ValueError(
"CSV: could not read all values from {}. Used delimiter '{}'. "
"Please check delimiter (should be ',' or ';' throughout the file) "
"and if all values are numbers.".format(fname, delimiter))
def save(fname, data, delimiter=',', with_time=False):
"""
Write the contents of an array to a csv file.
:param fname: Filename.
:param data: A named numpy array with the data to write.
:param delimiter: CSV column delimiter.
:param with_time: Whether to output the first column of the data as time stamps.
"""
if with_time:
data['time'] = [t.strftime("%Y-%m-%d %H:%M:%S") for t in data['time']]
fmt = ['%s'] + (len(data.dtype.names) - 1) * ['%f']
else:
fmt = len(data.dtype.names) * ['%f']
np.savetxt(fname, data, delimiter=delimiter, header=delimiter.join(
data.dtype.names), fmt=fmt, comments='') | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/data/csv.py | 0.429908 | 0.465691 | csv.py | pypi |
import logging
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from typing import Iterable, List, Tuple, Union
import numpy as np
from rtctools._internal.alias_tools import AliasDict, AliasRelation
logger = logging.getLogger("rtctools")
class DataStoreAccessor(metaclass=ABCMeta):
"""
Base class for all problems.
Adds an internal data store where timeseries and parameters can be stored.
Access to the internal data store is always done through the io accessor.
:cvar timeseries_import_basename:
Import file basename. Default is ``timeseries_import``.
:cvar timeseries_export_basename:
Export file basename. Default is ``timeseries_export``.
"""
#: Import file basename
timeseries_import_basename = 'timeseries_import'
#: Export file basename
timeseries_export_basename = 'timeseries_export'
def __init__(self, **kwargs):
# Save arguments
self._input_folder = kwargs['input_folder'] if 'input_folder' in kwargs else 'input'
self._output_folder = kwargs['output_folder'] if 'output_folder' in kwargs else 'output'
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("Expecting input files to be located in '" + self._input_folder + "'.")
logger.debug("Writing output files to '" + self._output_folder + "'.")
self.io = DataStore(self)
@property
@abstractmethod
def alias_relation(self) -> AliasRelation:
raise NotImplementedError
class DataStore:
"""
DataStore class used by the DataStoreAccessor.
Contains all methods needed to access the internal data store.
"""
def __init__(self, accessor):
self.__accessor = accessor
# Should all be set by subclass via setters
self.__reference_datetime = None
self.__timeseries_datetimes = None
self.__timeseries_times_sec = None
self.__timeseries_values = [AliasDict(self.__accessor.alias_relation)]
self.__parameters = [AliasDict(self.__accessor.alias_relation)]
self.__reference_datetime_fixed = False
self.__ensemble_size = 1
@property
def reference_datetime(self):
return self.__reference_datetime
@reference_datetime.setter
def reference_datetime(self, value):
if self.__reference_datetime_fixed and value != self.__reference_datetime:
raise RuntimeError("Cannot change reference datetime after times in seconds has been requested.")
self.__reference_datetime = value
@property
def ensemble_size(self):
return self.__ensemble_size
def __update_ensemble_size(self, ensemble_size):
while ensemble_size > len(self.__timeseries_values):
self.__timeseries_values.append(AliasDict(self.__accessor.alias_relation))
while ensemble_size > len(self.__parameters):
self.__parameters.append(AliasDict(self.__accessor.alias_relation))
assert len(self.__parameters) == len(self.__timeseries_values)
assert len(self.__parameters) == ensemble_size
self.__ensemble_size = ensemble_size
@property
def datetimes(self) -> List[datetime]:
""""
Returns the timeseries times in seconds.
:returns: timeseries datetimes, or None if there has been no call
to :py:meth:`set_timeseries`.
"""
return self.__timeseries_datetimes.copy()
@property
def times_sec(self) -> np.ndarray:
""""
Returns the timeseries times in seconds.
Note that once this method is called, it is no longer allowed to
change :py:attr:`reference_datetime`.
:returns: timeseries times in seconds.
"""
self._datetimes_to_seconds()
return self.__timeseries_times_sec
def _datetimes_to_seconds(self):
if self.__reference_datetime_fixed:
pass
else:
# Currently we only allow a reference datetime that exists in the
# timeseries datetimes. That way we can guarantee that we have
# "0.0" as one of our times in seconds. This restriction may be
# loosened in the future.
if self.reference_datetime not in self.__timeseries_datetimes:
raise Exception("Reference datetime {} should be equal to one of the timeseries datetimes {}".format(
self.reference_datetime, self.__timeseries_datetimes))
self.__timeseries_times_sec = self.datetime_to_sec(self.__timeseries_datetimes, self.reference_datetime)
self.__timeseries_times_sec.flags.writeable = False
self.__reference_datetime_fixed = True
def set_timeseries(self,
variable: str,
datetimes: Iterable[datetime],
values: np.ndarray,
ensemble_member: int = 0,
check_duplicates: bool = False) -> None:
"""
Stores input time series values in the internal data store.
:param variable: Variable name.
:param datetimes: Times as datetime objects.
:param values: The values to be stored.
:param ensemble_member: The ensemble member index.
:param check_duplicates: If True, a warning will be given when overwriting values.
If False, existing values can be silently overwritten with new values.
"""
datetimes = list(datetimes)
if not isinstance(datetimes[0], datetime):
raise TypeError("DateStore.set_timeseries() only support datetimes")
if self.__timeseries_datetimes is not None and datetimes != self.__timeseries_datetimes:
raise RuntimeError("Attempting to overwrite the input time series datetimes with different values. "
"Please ensure all input time series have the same datetimes.")
self.__timeseries_datetimes = datetimes
if len(self.__timeseries_datetimes) != len(values):
raise ValueError("Length of values ({}) must be the same as length of datetimes ({})"
.format(len(values), len(self.__timeseries_datetimes)))
if ensemble_member >= self.__ensemble_size:
self.__update_ensemble_size(ensemble_member + 1)
if check_duplicates and variable in self.__timeseries_values[ensemble_member].keys():
logger.warning("Time series values for ensemble member {} and variable {} set twice. "
"Overwriting old values.".format(ensemble_member, variable))
self.__timeseries_values[ensemble_member][variable] = values
def get_timeseries(self, variable: str, ensemble_member: int = 0) -> Tuple[List[datetime], np.ndarray]:
"""
Looks up the time series in the internal data store.
:return a tuple (datetimes, values)
"""
if ensemble_member >= self.__ensemble_size:
raise KeyError("ensemble_member {} does not exist".format(ensemble_member))
return self.__timeseries_datetimes, self.__timeseries_values[ensemble_member][variable]
def get_timeseries_names(self, ensemble_member: int = 0) -> Iterable[str]:
return self.__timeseries_values[ensemble_member].keys()
def set_timeseries_sec(self,
variable: str,
times_in_sec: np.ndarray,
values: np.ndarray,
ensemble_member: int = 0,
check_duplicates: bool = False) -> None:
"""
Stores input time series values in the internal data store.
Note that once this method is called, it is no longer allowed to
change :py:attr:`reference_datetime`.
:param variable: Variable name.
:param times_in_sec: The times in seconds.
:param values: The values to be stored.
:param ensemble_member: The ensemble member index.
:param check_duplicates: If True, a warning will be given when overwriting values.
If False, existing values can be silently overwritten with new values.
"""
self._datetimes_to_seconds()
if self.reference_datetime is None:
raise RuntimeError("Cannot use times in seconds before reference datetime is set.")
if self.__timeseries_times_sec is not None and not np.array_equal(times_in_sec, self.__timeseries_times_sec):
raise RuntimeError("Attempting to overwrite the input time series times with different values. "
"Please ensure all input time series have the same times.")
if len(self.__timeseries_datetimes) != len(values):
raise ValueError("Length of values ({}) must be the same as length of times ({})"
.format(len(values), len(self.__timeseries_datetimes)))
if ensemble_member >= self.__ensemble_size:
self.__update_ensemble_size(ensemble_member + 1)
if check_duplicates and variable in self.__timeseries_values[ensemble_member].keys():
logger.warning("Time series values for ensemble member {} and variable {} set twice. "
"Overwriting old values.".format(ensemble_member, variable))
self.__timeseries_values[ensemble_member][variable] = values
def get_timeseries_sec(self, variable: str, ensemble_member: int = 0) -> Tuple[np.ndarray, np.ndarray]:
"""
Looks up the time series in the internal data store.
Note that once this method is called, it is no longer allowed to
change :py:attr:`reference_datetime`.
:return a tuple (times, values)
"""
self._datetimes_to_seconds()
if ensemble_member >= self.__ensemble_size:
raise KeyError("ensemble_member {} does not exist".format(ensemble_member))
return self.__timeseries_times_sec, self.__timeseries_values[ensemble_member][variable]
def set_parameter(self,
parameter_name: str,
value: float,
ensemble_member: int = 0,
check_duplicates: bool = False) -> None:
"""
Stores the parameter value in the internal data store.
:param parameter_name: Parameter name.
:param value: The values to be stored.
:param ensemble_member: The ensemble member index.
:param check_duplicates: If True, a warning will be given when overwriting values.
If False, existing values can be silently overwritten with new values.
"""
if ensemble_member >= self.__ensemble_size:
self.__update_ensemble_size(ensemble_member + 1)
if check_duplicates and parameter_name in self.__parameters[ensemble_member].keys():
logger.warning("Attempting to set parameter value for ensemble member {} and name {} twice. "
"Using new value of {}.".format(ensemble_member, parameter_name, value))
self.__parameters[ensemble_member][parameter_name] = value
def get_parameter(self, parameter_name: str, ensemble_member: int = 0) -> float:
"""
Looks up the parameter value in the internal data store.
"""
if ensemble_member >= self.__ensemble_size:
raise KeyError("ensemble_member {} does not exist".format(ensemble_member))
return self.__parameters[ensemble_member][parameter_name]
def parameters(self, ensemble_member: int = 0) -> AliasDict:
"""
Returns an AliasDict of parameters to its values for the specified ensemble member.
"""
if ensemble_member >= self.__ensemble_size:
raise KeyError("ensemble_member {} does not exist".format(ensemble_member))
return self.__parameters[ensemble_member]
@staticmethod
def datetime_to_sec(d: Union[Iterable[datetime], datetime], t0: datetime) -> Union[np.ndarray, float]:
"""
Returns the date/timestamps in seconds since t0.
:param d: Iterable of datetimes or a single datetime object.
:param t0: Reference datetime.
"""
if hasattr(d, '__iter__'):
return np.array([(t - t0).total_seconds() for t in d])
else:
return (d - t0).total_seconds()
@staticmethod
def sec_to_datetime(s: Union[Iterable[float], float], t0: datetime) -> Union[List[datetime], datetime]:
"""
Return the date/timestamps in seconds since t0 as datetime objects.
:param s: Iterable of ints or a single int (number of seconds before or after t0).
:param t0: Reference datetime.
"""
if hasattr(s, '__iter__'):
return [t0 + timedelta(seconds=t) for t in s]
else:
return t0 + timedelta(seconds=s) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/data/storage.py | 0.867584 | 0.336713 | storage.py | pypi |
from casadi import Function, SX, if_else, inf, jacobian, logic_and, nlpsol, sum2, vertcat
import numpy as np
from .bspline import BSpline
class BSpline1D(BSpline):
"""
Arbitrary order, one-dimensional, non-uniform B-Spline implementation using Cox-de Boor recursion.
"""
def __init__(self, t, w, k=3):
"""
Create a new 1D B-Spline object.
:param t: Knot vector.
:param w: Weight vector.
:param k: Spline order.
"""
# Store arguments
self.__t = t
self.__w = w
self.__k = k
def __call__(self, x):
"""
Evaluate the B-Spline at point x.
The support of this function is the half-open interval [t[0], t[-1]).
:param x: The point at which to evaluate.
:returns: The spline evaluated at the given point.
"""
y = 0.0
for i in range(len(self.__t) - self.__k - 1):
y += if_else(logic_and(x >= self.__t[i], x <= self.__t[i + self.__k + 1]), self.__w[
i] * self.basis(self.__t, x, self.__k, i), 0.0)
return y
@classmethod
def fit(cls, x, y, k=3, monotonicity=0, curvature=0,
num_test_points=100, epsilon=1e-7, delta=1e-4, interior_pts=None):
"""
fit() returns a tck tuple like scipy.interpolate.splrep, but adjusts
the weights to meet the desired constraints to the curvature of the spline curve.
:param monotonicity:
- is an integer, magnitude is ignored
- if positive, causes spline to be monotonically increasing
- if negative, causes spline to be monotonically decreasing
- if 0, leaves spline monotonicity unconstrained
:param curvature:
- is an integer, magnitude is ignored
- if positive, causes spline curvature to be positive (convex)
- if negative, causes spline curvature to be negative (concave)
- if 0, leaves spline curvature unconstrained
:param num_test_points:
- sets the number of points that the constraints will be applied at across
the range of the spline
:param epsilon:
- offset of monotonicity and curvature constraints from zero, ensuring strict
monotonicity
- if epsilon is set to less than the tolerance of the solver, errors will result
:param delta:
- amount the first and last knots are extended outside the range of the splined points
- ensures that the spline evaluates correctly at the first and last nodes, as
well as the distance delta beyond these nodes
:param interior_pts:
- optional list of interior knots to use
:returns: A tuple of spline knots, weights, and order.
"""
x = np.asarray(x)
y = np.asarray(y)
N = len(x)
if interior_pts is None:
# Generate knots: This algorithm is based on the Fitpack algorithm by p.dierckx
# The original code lives here: http://www.netlib.org/dierckx/
if k % 2 == 1:
interior_pts = x[k // 2 + 1:-k // 2]
else:
interior_pts = (x[k // 2 + 1:-k // 2] + x[k // 2:-k // 2 - 1]) / 2
t = np.concatenate(
(np.full(k + 1, x[0] - delta), interior_pts, np.full(k + 1, x[-1] + delta)))
num_knots = len(t)
# Casadi Variable Symbols
c = SX.sym('c', num_knots)
x_sym = SX.sym('x')
# Casadi Representation of Spline Function & Derivatives
expr = cls(t, c, k)(x_sym)
free_vars = [c, x_sym]
bspline = Function('bspline', free_vars, [expr])
J = jacobian(expr, x_sym)
# bspline_prime = Function('bspline_prime', free_vars, [J])
H = jacobian(J, x_sym)
bspline_prime_prime = Function('bspline_prime_prime', free_vars, [H])
# Objective Function
xpt = SX.sym('xpt')
ypt = SX.sym('ypt')
sq_diff = Function('sq_diff', [xpt, ypt], [
(ypt - bspline(c, xpt))**2])
sq_diff = sq_diff.map(N, 'serial')
f = sum2(sq_diff(SX(x), SX(y)))
# Setup Curvature Constraints
delta_c_max = np.full(num_knots - 1, inf)
delta_c_min = np.full(num_knots - 1, -inf)
max_slope_slope = np.full(num_test_points, inf)
min_slope_slope = np.full(num_test_points, -inf)
if monotonicity != 0:
if monotonicity < 0:
delta_c_max = np.full(num_knots - 1, -epsilon)
else:
delta_c_min = np.full(num_knots - 1, epsilon)
if curvature != 0:
if curvature < 0:
max_slope_slope = np.full(num_test_points, -epsilon)
else:
min_slope_slope = np.full(num_test_points, epsilon)
monotonicity_constraints = vertcat(*[
c[i + 1] - c[i] for i in range(num_knots - 1)])
x_linspace = np.linspace(x[0], x[-1], num_test_points)
curvature_constraints = vertcat(*[
bspline_prime_prime(c, SX(x)) for x in x_linspace])
g = vertcat(monotonicity_constraints, curvature_constraints)
lbg = np.concatenate((delta_c_min, min_slope_slope))
ubg = np.concatenate((delta_c_max, max_slope_slope))
# Perform mini-optimization problem to calculate the the values of c
nlp = {'x': c, 'f': f, 'g': g}
my_solver = "ipopt"
solver = nlpsol("solver", my_solver, nlp, {'print_time': 0, 'expand': True, 'ipopt': {'print_level': 0}})
sol = solver(lbg=lbg, ubg=ubg)
stats = solver.stats()
return_status = stats['return_status']
if return_status not in ['Solve_Succeeded', 'Solved_To_Acceptable_Level', 'SUCCESS']:
raise Exception("Spline fitting failed with status {}".format(return_status))
# Return the new tck tuple
return (t, np.array(sol['x']).ravel(), k) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/data/interpolation/bspline1d.py | 0.85897 | 0.581125 | bspline1d.py | pypi |
import logging
import os
from datetime import timedelta
import numpy as np
import rtctools.data.csv as csv
from rtctools._internal.alias_tools import AliasDict
from rtctools._internal.caching import cached
from rtctools.optimization.io_mixin import IOMixin
from rtctools.optimization.timeseries import Timeseries
logger = logging.getLogger("rtctools")
class CSVMixin(IOMixin):
"""
Adds reading and writing of CSV timeseries and parameters to your optimization problem.
During preprocessing, files named ``timeseries_import.csv``, ``initial_state.csv``,
and ``parameters.csv`` are read from the ``input`` subfolder.
During postprocessing, a file named ``timeseries_export.csv`` is written to the ``output`` subfolder.
In ensemble mode, a file named ``ensemble.csv`` is read from the ``input`` folder. This file
contains two columns. The first column gives the name of the ensemble member, and the second
column its probability. Furthermore, the other XML files appear one level deeper inside the
filesystem hierarchy, inside subfolders with the names of the ensemble members.
:cvar csv_initial_state_basename:
Initial state file basename. Default is ``initial_state``.
:cvar csv_parameters_basename:
Parameters file basename. Default is ``parameters``.
:cvar csv_ensemble_basename:
Ensemble file basename. Default is ``ensemble``.
:cvar csv_delimiter:
Column delimiter used in CSV files. Default is ``,``.
:cvar csv_equidistant:
Whether or not the timeseries data is equidistant. Default is ``True``.
:cvar csv_ensemble_mode:
Whether or not to use ensembles. Default is ``False``.
:cvar csv_validate_timeseries:
Check consistency of timeseries. Default is ``True``.
"""
#: Initial state file basename
csv_initial_state_basename = 'initial_state'
#: Parameters file basename
csv_parameters_basename = 'parameters'
#: Ensemble file basename
csv_ensemble_basename = 'ensemble'
#: Column delimiter used in CSV files
csv_delimiter = ','
#: Whether or not the timeseries data is equidistant
csv_equidistant = True
#: Whether or not to use ensembles
csv_ensemble_mode = False
#: Check consistency of timeseries
csv_validate_timeseries = True
def __init__(self, **kwargs):
# Call parent class first for default behaviour.
super().__init__(**kwargs)
def read(self):
# Call parent class first for default behaviour.
super().read()
# Helper function to check if initial state array actually defines
# only the initial state
def check_initial_state_array(initial_state):
"""
Check length of initial state array, throw exception when larger than 1.
"""
if initial_state.shape:
raise Exception(
'CSVMixin: Initial state file {} contains more than one row of data. '
'Please remove the data row(s) that do not describe the initial state.'.format(
os.path.join(self._input_folder, self.csv_initial_state_basename + '.csv')))
# Read CSV files
self.__initial_state = []
if self.csv_ensemble_mode:
self.__ensemble = np.genfromtxt(
os.path.join(self._input_folder, self.csv_ensemble_basename + '.csv'),
delimiter=",", deletechars='', dtype=None, names=True, encoding=None)
logger.debug("CSVMixin: Read ensemble description")
for ensemble_member_index, ensemble_member_name in enumerate(self.__ensemble['name']):
_timeseries = csv.load(
os.path.join(
self._input_folder,
ensemble_member_name,
self.timeseries_import_basename + ".csv",
),
delimiter=self.csv_delimiter,
with_time=True,
)
self.__timeseries_times = _timeseries[_timeseries.dtype.names[0]]
self.io.reference_datetime = self.__timeseries_times[0]
for key in _timeseries.dtype.names[1:]:
self.io.set_timeseries(
key,
self.__timeseries_times,
np.asarray(_timeseries[key], dtype=np.float64),
ensemble_member_index
)
logger.debug("CSVMixin: Read timeseries")
for ensemble_member_index, ensemble_member_name in enumerate(self.__ensemble['name']):
try:
_parameters = csv.load(os.path.join(
self._input_folder, ensemble_member_name, self.csv_parameters_basename + '.csv'),
delimiter=self.csv_delimiter)
for key in _parameters.dtype.names:
self.io.set_parameter(key, float(_parameters[key]), ensemble_member_index)
except IOError:
pass
logger.debug("CSVMixin: Read parameters.")
for ensemble_member_name in self.__ensemble['name']:
try:
_initial_state = csv.load(os.path.join(
self._input_folder, ensemble_member_name, self.csv_initial_state_basename + '.csv'),
delimiter=self.csv_delimiter)
check_initial_state_array(_initial_state)
_initial_state = {key: float(_initial_state[key]) for key in _initial_state.dtype.names}
except IOError:
_initial_state = {}
self.__initial_state.append(AliasDict(self.alias_relation, _initial_state))
logger.debug("CSVMixin: Read initial state.")
else:
_timeseries = csv.load(
os.path.join(
self._input_folder, self.timeseries_import_basename + ".csv"
),
delimiter=self.csv_delimiter,
with_time=True,
)
self.__timeseries_times = _timeseries[_timeseries.dtype.names[0]]
self.io.reference_datetime = self.__timeseries_times[0]
for key in _timeseries.dtype.names[1:]:
self.io.set_timeseries(key, self.__timeseries_times, np.asarray(_timeseries[key], dtype=np.float64))
logger.debug("CSVMixin: Read timeseries.")
try:
_parameters = csv.load(os.path.join(
self._input_folder, self.csv_parameters_basename + '.csv'), delimiter=self.csv_delimiter)
logger.debug("CSVMixin: Read parameters.")
for key in _parameters.dtype.names:
self.io.set_parameter(key, float(_parameters[key]))
except IOError:
pass
try:
_initial_state = csv.load(os.path.join(
self._input_folder, self.csv_initial_state_basename + '.csv'), delimiter=self.csv_delimiter)
logger.debug("CSVMixin: Read initial state.")
check_initial_state_array(_initial_state)
_initial_state = {key: float(_initial_state[key]) for key in _initial_state.dtype.names}
except IOError:
_initial_state = {}
self.__initial_state.append(AliasDict(self.alias_relation, _initial_state))
# Timestamp check
if self.csv_validate_timeseries:
times = self.__timeseries_times
for i in range(len(times) - 1):
if times[i] >= times[i + 1]:
raise Exception(
'CSVMixin: Time stamps must be strictly increasing.')
if self.csv_equidistant:
# Check if the timeseries are truly equidistant
if self.csv_validate_timeseries:
times = self.__timeseries_times
dt = times[1] - times[0]
for i in range(len(times) - 1):
if times[i + 1] - times[i] != dt:
raise Exception(
'CSVMixin: Expecting equidistant timeseries, the time step towards '
'{} is not the same as the time step(s) before. Set csv_equidistant = False '
'if this is intended.'.format(times[i + 1]))
def ensemble_member_probability(self, ensemble_member):
if self.csv_ensemble_mode:
return self.__ensemble['probability'][ensemble_member]
else:
return 1.0
@cached
def history(self, ensemble_member):
# Call parent class first for default values.
history = super().history(ensemble_member)
initial_time = np.array([self.initial_time])
# Load parameters from parameter config
for variable in self.dae_variables['free_variables']:
variable = variable.name()
try:
history[variable] = Timeseries(initial_time,
self.__initial_state[ensemble_member][variable])
except (KeyError, ValueError):
pass
else:
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("CSVMixin: Read initial state {}".format(variable))
return history
def write(self):
# Call parent class first for default behaviour.
super().write()
# Write output
times = self.times()
def write_output(ensemble_member, folder):
results = self.extract_results(ensemble_member)
names = ['time'] + sorted({sym.name() for sym in self.output_variables})
formats = ['O'] + (len(names) - 1) * ['f8']
dtype = {'names': names, 'formats': formats}
data = np.zeros(len(times), dtype=dtype)
data['time'] = [self.io.reference_datetime + timedelta(seconds=s) for s in times]
for output_variable in self.output_variables:
output_variable = output_variable.name()
try:
values = results[output_variable]
if len(values) != len(times):
values = self.interpolate(
times, self.times(output_variable), values, self.interpolation_method(output_variable))
except KeyError:
try:
ts = self.get_timeseries(
output_variable, ensemble_member)
if len(ts.times) != len(times):
values = self.interpolate(
times, ts.times, ts.values)
else:
values = ts.values
except KeyError:
logger.error(
"Output requested for non-existent variable {}".format(output_variable))
continue
data[output_variable] = values
fname = os.path.join(folder, self.timeseries_export_basename + ".csv")
csv.save(fname, data, delimiter=self.csv_delimiter, with_time=True)
if self.csv_ensemble_mode:
for ensemble_member, ensemble_member_name in enumerate(self.__ensemble['name']):
write_output(ensemble_member, os.path.join(
self._output_folder, ensemble_member_name))
else:
write_output(0, self._output_folder) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/csv_mixin.py | 0.591723 | 0.23467 | csv_mixin.py | pypi |
import configparser
import glob
import logging
import os
import pickle
from typing import Iterable, List, Tuple, Union
import casadi as ca
import numpy as np
import rtctools.data.csv as csv
from rtctools._internal.caching import cached
from rtctools.data.interpolation.bspline1d import BSpline1D
from rtctools.data.interpolation.bspline2d import BSpline2D
from rtctools.optimization.timeseries import Timeseries
from scipy.interpolate import bisplev, bisplrep, splev
from scipy.optimize import brentq
from .optimization_problem import LookupTable as LookupTableBase
from .optimization_problem import OptimizationProblem
logger = logging.getLogger("rtctools")
class LookupTable(LookupTableBase):
"""
Lookup table.
"""
def __init__(self, inputs: List[ca.MX], function: ca.Function, tck: Tuple = None):
"""
Create a new lookup table object.
:param inputs: List of lookup table input variables.
:param function: Lookup table CasADi :class:`Function`.
"""
self.__inputs = inputs
self.__function = function
self.__t, self.__c, self.__k = [None] * 3
if tck is not None:
if len(tck) == 3:
self.__t, self.__c, self.__k = tck
elif len(tck) == 5:
self.__t = tck[:2]
self.__c = tck[2]
self.__k = tck[3:]
@property
@cached
def domain(self) -> Tuple:
t = self.__t
if t is None:
raise AttributeError('This lookup table was not instantiated with tck metadata. \
Domain/Range information is unavailable.')
if type(t) == tuple and len(t) == 2:
raise NotImplementedError('Domain/Range information is not yet implemented for 2D LookupTables')
return np.nextafter(t[0], np.inf), np.nextafter(t[-1], -np.inf)
@property
@cached
def range(self) -> Tuple:
return self(self.domain[0]), self(self.domain[1])
@property
def inputs(self) -> List[ca.MX]:
"""
List of lookup table input variables.
"""
return self.__inputs
@property
def function(self) -> ca.Function:
"""
Lookup table CasADi :class:`Function`.
"""
return self.__function
@property
@cached
def __numeric_function_evaluator(self):
return np.vectorize(
lambda *args: np.nan
if np.any(np.isnan(args))
else float(self.function(*args))
)
def __call__(
self, *args: Union[float, Iterable, Timeseries]
) -> Union[float, np.ndarray, Timeseries]:
"""
Evaluate the lookup table.
:param args: Input values.
:type args: Float, iterable of floats, or :class:`.Timeseries`
:returns: Lookup table evaluated at input values.
Example use::
y = lookup_table(1.0)
[y1, y2] = lookup_table([1.0, 2.0])
"""
evaluator = self.__numeric_function_evaluator
if len(args) == 1:
arg = args[0]
if isinstance(arg, Timeseries):
return Timeseries(arg.times, self(arg.values))
else:
if hasattr(arg, "__iter__"):
arg = np.fromiter(arg, dtype=float)
return evaluator(arg)
else:
arg = float(arg)
return evaluator(arg).item()
else:
if any(isinstance(arg, Timeseries) for arg in args):
raise TypeError(
"Higher-order LookupTable calls do not yet support Timeseries parameters"
)
elif any(hasattr(arg, "__iter__") for arg in args):
raise TypeError(
"Higher-order LookupTable calls do not yet support vector parameters"
)
else:
args = np.fromiter(args, dtype=float)
return evaluator(*args)
def reverse_call(
self,
y: Union[float, Iterable, Timeseries],
domain: Tuple[float, float] = (None, None),
detect_range_error: bool = True,
) -> Union[float, np.ndarray, Timeseries]:
"""Do an inverted call on this LookupTable
Uses SciPy brentq optimizer to simulate a reversed call.
Note: Method does not work with higher-order LookupTables
"""
if isinstance(y, Timeseries):
# Recurse and return
return Timeseries(y.times, self.reverse_call(y.values))
# Get domain information
l_d, u_d = domain
if l_d is None:
l_d = self.domain[0]
if u_d is None:
u_d = self.domain[1]
# Cast y to array of float
if hasattr(y, "__iter__"):
y_array = np.fromiter(y, dtype=float)
else:
y_array = np.array([y], dtype=float)
# Find not np.nan
is_not_nan = ~np.isnan(y_array)
y_array_not_nan = y_array[is_not_nan]
# Detect if there is a range violation
if detect_range_error:
l_r, u_r = self.range
lb_viol = y_array_not_nan < l_r
ub_viol = y_array_not_nan > u_r
all_viol = y_array_not_nan[lb_viol | ub_viol]
if all_viol.size > 0:
raise ValueError(
"Values {} are not in lookup table range ({}, {})".format(
all_viol, l_r, u_r
)
)
# Construct function to do inverse evaluation
evaluator = self.__numeric_function_evaluator
def inv_evaluator(y_target):
"""inverse evaluator function"""
return brentq(lambda x: evaluator(x) - y_target, l_d, u_d)
inv_evaluator = np.vectorize(inv_evaluator)
# Calculate x_array
x_array = np.full_like(y_array, np.nan, dtype=float)
if y_array_not_nan.size != 0:
x_array[is_not_nan] = inv_evaluator(y_array_not_nan)
# Return x
if hasattr(y, "__iter__"):
return x_array
else:
return x_array.item()
class CSVLookupTableMixin(OptimizationProblem):
"""
Adds lookup tables to your optimization problem.
During preprocessing, the CSV files located inside the ``lookup_tables`` subfolder are read.
In every CSV file, the first column contains the output of the lookup table. Subsequent columns contain
the input variables.
Cubic B-Splines are used to turn the data points into continuous lookup tables.
Optionally, a file ``curvefit_options.ini`` may be included inside the ``lookup_tables`` folder.
This file contains, grouped per lookup table, the following options:
* monotonicity:
* is an integer, magnitude is ignored
* if positive, causes spline to be monotonically increasing
* if negative, causes spline to be monotonically decreasing
* if 0, leaves spline monotonicity unconstrained
* curvature:
* is an integer, magnitude is ignored
* if positive, causes spline curvature to be positive (convex)
* if negative, causes spline curvature to be negative (concave)
* if 0, leaves spline curvature unconstrained
.. note::
Currently only one-dimensional lookup tables are fully supported. Support for two-
dimensional lookup tables is experimental.
:cvar csv_delimiter: Column delimiter used in CSV files. Default is ``,``.
:cvar csv_lookup_table_debug: Whether to generate plots of the spline fits. Default is ``false``.
:cvar csv_lookup_table_debug_points: Number of evaluation points for plots. Default is ``100``.
"""
#: Column delimiter used in CSV files
csv_delimiter = ','
#: Debug settings
csv_lookup_table_debug = False
csv_lookup_table_debug_points = 100
def __init__(self, **kwargs):
# Check arguments
if 'input_folder' in kwargs:
assert ('lookup_table_folder' not in kwargs)
self.__lookup_table_folder = os.path.join(
kwargs['input_folder'], 'lookup_tables')
else:
self.__lookup_table_folder = kwargs['lookup_table_folder']
# Call parent
super().__init__(**kwargs)
def pre(self):
# Call parent class first for default behaviour.
super().pre()
# Get curve fitting options from curvefit_options.ini file
ini_path = os.path.join(
self.__lookup_table_folder, 'curvefit_options.ini')
try:
ini_config = configparser.RawConfigParser()
ini_config.read(ini_path)
no_curvefit_options = False
except IOError:
logger.info(
"CSVLookupTableMixin: No curvefit_options.ini file found. Using default values.")
no_curvefit_options = True
def get_curvefit_options(curve_name, no_curvefit_options=no_curvefit_options):
if no_curvefit_options:
return 0, 0, 0
curvefit_options = []
def get_property(prop_name):
try:
prop = int(ini_config.get(curve_name, prop_name))
except configparser.NoSectionError:
prop = 0
except configparser.NoOptionError:
prop = 0
except ValueError:
raise Exception(
'CSVLookupTableMixin: Invalid {0} constraint for {1}. {0} should '
'be either -1, 0, or 1.'.format(prop_name, curve_name))
return prop
for prop_name in ['monotonicity', 'monotonicity2', 'curvature']:
curvefit_options.append(get_property(prop_name))
logger.debug("CSVLookupTableMixin: Curve fit option for {}:({},{},{})".format(
curve_name, *curvefit_options))
return tuple(curvefit_options)
# Read CSV files
logger.info(
"CSVLookupTableMixin: Generating Splines from lookup table data.")
self.__lookup_tables = {}
for filename in glob.glob(os.path.join(self.__lookup_table_folder, "*.csv")):
logger.debug(
"CSVLookupTableMixin: Reading lookup table from {}".format(filename))
csvinput = csv.load(filename, delimiter=self.csv_delimiter)
output = csvinput.dtype.names[0]
inputs = csvinput.dtype.names[1:]
# Get monotonicity and curvature from ini file
mono, mono2, curv = get_curvefit_options(output)
logger.debug(
"CSVLookupTableMixin: Output is {}, inputs are {}.".format(output, inputs))
tck = None
function = None
# If tck file is newer than the csv file, first try to load the cached values from the tck file
tck_filename = filename.replace('.csv', '.tck')
valid_cache = False
if os.path.exists(tck_filename):
if no_curvefit_options:
valid_cache = os.path.getmtime(filename) < os.path.getmtime(tck_filename)
else:
valid_cache = (os.path.getmtime(filename) < os.path.getmtime(tck_filename)) and \
(os.path.getmtime(ini_path) < os.path.getmtime(tck_filename))
if valid_cache:
logger.debug(
'CSVLookupTableMixin: Attempting to use cached tck values for {}'.format(output))
with open(tck_filename, 'rb') as f:
try:
tck, function = pickle.load(f)
except Exception:
valid_cache = False
if not valid_cache:
logger.info(
'CSVLookupTableMixin: Recalculating tck values for {}'.format(output))
if len(csvinput.dtype.names) == 2:
if not valid_cache:
k = 3 # default value
# 1D spline fitting needs k+1 data points
if len(csvinput[output]) >= k + 1:
tck = BSpline1D.fit(csvinput[inputs[0]], csvinput[
output], k=k, monotonicity=mono, curvature=curv)
else:
raise Exception(
'CSVLookupTableMixin: Too few data points in {} to do spline fitting. '
'Need at least {} points.'.format(filename, k + 1))
if self.csv_lookup_table_debug:
import pylab
i = np.linspace(csvinput[inputs[0]][0], csvinput[
inputs[0]][-1], self.csv_lookup_table_debug_points)
o = splev(i, tck)
pylab.clf()
# TODO: Figure out why cross-section B0607 in NZV does not
# conform to constraints!
pylab.plot(i, o)
pylab.plot(csvinput[inputs[0]], csvinput[
output], linestyle='', marker='x', markersize=10)
figure_filename = filename.replace('.csv', '.png')
pylab.savefig(figure_filename)
symbols = [ca.SX.sym(inputs[0])]
if not valid_cache:
function = ca.Function('f', symbols, [BSpline1D(*tck)(symbols[0])])
self.__lookup_tables[output] = LookupTable(symbols, function, tck)
elif len(csvinput.dtype.names) == 3:
if tck is None:
kx = 3 # default value
ky = 3 # default value
# 2D spline fitting needs (kx+1)*(ky+1) data points
if len(csvinput[output]) >= (kx + 1) * (ky + 1):
# TODO: add curvature parameters from curvefit_options.ini
# once 2d fit is implemented
tck = bisplrep(csvinput[inputs[0]], csvinput[
inputs[1]], csvinput[output], kx=kx, ky=ky)
else:
raise Exception(
'CSVLookupTableMixin: Too few data points in {} to do spline fitting. '
'Need at least {} points.'.format(filename, (kx + 1) * (ky + 1)))
if self.csv_lookup_table_debug:
import pylab
i1 = np.linspace(csvinput[inputs[0]][0], csvinput[
inputs[0]][-1], self.csv_lookup_table_debug_points)
i2 = np.linspace(csvinput[inputs[1]][0], csvinput[
inputs[1]][-1], self.csv_lookup_table_debug_points)
i1, i2 = np.meshgrid(i1, i2)
i1 = i1.flatten()
i2 = i2.flatten()
o = bisplev(i1, i2, tck)
pylab.clf()
pylab.plot_surface(i1, i2, o)
figure_filename = filename.replace('.csv', '.png')
pylab.savefig(figure_filename)
symbols = [ca.SX.sym(inputs[0]), ca.SX.sym(inputs[1])]
if not valid_cache:
function = ca.Function('f', symbols, [BSpline2D(*tck)(symbols[0], symbols[1])])
self.__lookup_tables[output] = LookupTable(symbols, function, tck)
else:
raise Exception(
'CSVLookupTableMixin: {}-dimensional lookup tables not implemented yet.'.format(
len(csvinput.dtype.names)))
if not valid_cache:
pickle.dump((tck, function),
open(filename.replace('.csv', '.tck'), 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
def lookup_tables(self, ensemble_member):
# Call parent class first for default values.
lookup_tables = super().lookup_tables(ensemble_member)
# Update lookup_tables with imported csv lookup tables
lookup_tables.update(self.__lookup_tables)
return lookup_tables | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/csv_lookup_table_mixin.py | 0.856947 | 0.299323 | csv_lookup_table_mixin.py | pypi |
import logging
from collections import OrderedDict
from typing import Tuple
import rtctools.data.netcdf as netcdf
from rtctools.optimization.io_mixin import IOMixin
logger = logging.getLogger("rtctools")
class NetCDFMixin(IOMixin):
"""
Adds NetCDF I/O to your optimization problem.
During preprocessing, a file named timeseries_import.nc is read from the ``input`` subfolder.
During postprocessing a file named timeseries_export.nc is written to the ``output`` subfolder.
Both the input and output nc files are expected to follow the FEWS format for
scalar data in a NetCDF file, i.e.:
- They must contain a variable with the station ids (location ids) which can
be recognized by the attribute `cf_role` set to `timeseries_id`.
- They must contain a time variable with attributes `standard_name` = `time`
and `axis` = `T`
From the input file, all 2-D (or 3-D in case of ensembles) variables with dimensions equal
to the station ids and time variable (and realization) are read.
To map the NetCDF parameter identifier to and from an RTC-Tools variable name,
the overridable methods :py:meth:`netcdf_id_to_variable` and
:py:meth:`netcdf_id_from_variable` are used.
:cvar netcdf_validate_timeseries:
Check consistency of timeseries. Default is ``True``
"""
#: Check consistency of timeseries.
netcdf_validate_timeseries = True
def netcdf_id_to_variable(self, station_id: str, parameter: str) -> str:
"""
Maps the station_id and the parameter name to the variable name to be
used in RTC-Tools.
:return: The variable name used in RTC-Tools
"""
return '{}__{}'.format(station_id, parameter)
def netcdf_id_from_variable(self, variable_name: str) -> Tuple[str, str]:
"""
Maps the variable name in RTC-Tools to a station_id and parameter name
for writing to a NetCDF file.
:return: A pair of station_id and parameter
"""
return variable_name.split("__")
def read(self):
# Call parent class first for default behaviour
super().read()
dataset = netcdf.ImportDataset(self._input_folder, self.timeseries_import_basename)
# Although they are not used outside of this method, we add some
# variables to self for debugging purposes
self.__timeseries_import = dataset
# store the import times
times = self.__timeseries_times = dataset.read_import_times()
self.io.reference_datetime = self.__timeseries_times[0]
# Timestamp check
self.__dt = times[1] - times[0] if len(times) >= 2 else 0
for i in range(len(times) - 1):
if times[i + 1] - times[i] != self.__dt:
self.__dt = None
break
if self.netcdf_validate_timeseries:
# check if strictly increasing
for i in range(len(times) - 1):
if times[i] >= times[i + 1]:
raise Exception('NetCDFMixin: Time stamps must be strictly increasing.')
# store the station data for later use
self.__stations = dataset.read_station_data()
# read all available timeseries from the dataset
timeseries_var_keys = dataset.find_timeseries_variables()
for parameter in timeseries_var_keys:
for i, station_id in enumerate(self.__stations.station_ids):
name = self.netcdf_id_to_variable(station_id, parameter)
if dataset.ensemble_member_variable is not None:
if dataset.ensemble_member_variable.dimensions[0] in dataset.variable_dimensions(parameter):
for ensemble_member_index in range(self.__timeseries_import.ensemble_size):
values = dataset.read_timeseries_values(i, parameter, ensemble_member_index)
self.io.set_timeseries(name, self.__timeseries_times, values, ensemble_member_index)
else:
values = dataset.read_timeseries_values(i, parameter, 0)
for ensemble_member_index in range(self.__timeseries_import.ensemble_size):
self.io.set_timeseries(name, self.__timeseries_times, values, ensemble_member_index)
else:
values = dataset.read_timeseries_values(i, parameter, 0)
self.io.set_timeseries(name, self.__timeseries_times, values, 0)
logger.debug('Read timeseries data for station id "{}" and parameter "{}", '
'stored under variable name "{}"'
.format(station_id, parameter, name))
logger.debug("NetCDFMixin: Read timeseries")
def write(self):
# Call parent class first for default behaviour
super().write()
dataset = netcdf.ExportDataset(self._output_folder, self.timeseries_export_basename)
times = [(dt - self.__timeseries_times[0]).seconds for dt in self.__timeseries_times]
dataset.write_times(times, self.initial_time, self.io.reference_datetime)
output_variables = [sym.name() for sym in self.output_variables]
output_station_ids, output_parameter_ids = zip(*(
self.netcdf_id_from_variable(var_name) for var_name in output_variables))
# Make sure that output_station_ids and output_parameter_ids are
# unique, but make sure to avoid non-deterministic ordering.
unique_station_ids = list(OrderedDict.fromkeys(output_station_ids))
unique_parameter_ids = list(OrderedDict.fromkeys(output_parameter_ids))
dataset.write_station_data(self.__stations, unique_station_ids)
dataset.write_ensemble_data(self.ensemble_size)
dataset.create_variables(unique_parameter_ids, self.ensemble_size)
for ensemble_member in range(self.ensemble_size):
results = self.extract_results(ensemble_member)
for var_name, station_id, parameter_id in zip(
output_variables, output_station_ids, output_parameter_ids):
# determine the output values
try:
values = results[var_name]
if len(values) != len(times):
values = self.interpolate(
times, self.times(var_name), values, self.interpolation_method(var_name))
except KeyError:
try:
ts = self.get_timeseries(var_name, ensemble_member)
if len(ts.times) != len(times):
values = self.interpolate(
times, ts.times, ts.values)
else:
values = ts.values
except KeyError:
logger.error(
'NetCDFMixin: Output requested for non-existent variable {}. '
'Will not be in output file.'.format(var_name))
continue
dataset.write_output_values(station_id, parameter_id, ensemble_member, values, self.ensemble_size)
dataset.close() | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/netcdf_mixin.py | 0.710327 | 0.525795 | netcdf_mixin.py | pypi |
import logging
from abc import ABCMeta, abstractmethod, abstractproperty
from typing import Any, Dict, Iterator, List, Tuple, Union
import casadi as ca
import numpy as np
from rtctools._internal.alias_tools import AliasDict
from rtctools._internal.debug_check_helpers import DebugLevel, debug_check
from rtctools.data.storage import DataStoreAccessor
from .timeseries import Timeseries
logger = logging.getLogger("rtctools")
# Typical type for a bound on a variable
BT = Union[float, np.ndarray, Timeseries]
class LookupTable:
"""
Base class for LookupTables.
"""
@property
def inputs(self) -> List[ca.MX]:
"""
List of lookup table input variables.
"""
raise NotImplementedError
@property
def function(self) -> ca.Function:
"""
Lookup table CasADi :class:`Function`.
"""
raise NotImplementedError
class OptimizationProblem(DataStoreAccessor, metaclass=ABCMeta):
"""
Base class for all optimization problems.
"""
_debug_check_level = DebugLevel.MEDIUM
_debug_check_options = {}
def __init__(self, **kwargs):
# Call parent class first for default behaviour.
super().__init__(**kwargs)
self.__mixed_integer = False
def optimize(self, preprocessing: bool = True, postprocessing: bool = True,
log_solver_failure_as_error: bool = True) -> bool:
"""
Perform one initialize-transcribe-solve-finalize cycle.
:param preprocessing: True to enable a call to ``pre`` preceding the optimization.
:param postprocessing: True to enable a call to ``post`` following the optimization.
:returns: True on success.
"""
# Deprecations / removals
if hasattr(self, 'initial_state'):
raise RuntimeError("Support for `initial_state()` has been removed. Please use `history()` instead.")
logger.info("Entering optimize()")
# Do any preprocessing, which may include changing parameter values on
# the model
if preprocessing:
self.pre()
# Check if control inputs are bounded
self.__check_bounds_control_input()
else:
logger.debug(
'Skipping Preprocessing in OptimizationProblem.optimize()')
# Transcribe problem
discrete, lbx, ubx, lbg, ubg, x0, nlp = self.transcribe()
# Create an NLP solver
logger.debug("Collecting solver options")
self.__mixed_integer = np.any(discrete)
options = {}
options.update(self.solver_options()) # Create a copy
logger.debug("Creating solver")
if options.pop('expand', False):
# NOTE: CasADi only supports the "expand" option for nlpsol. To
# also be able to expand with e.g. qpsol, we do the expansion
# ourselves here.
logger.debug("Expanding objective and constraints to SX")
expand_f_g = ca.Function('f_g', [nlp['x']], [nlp['f'], nlp['g']]).expand()
X_sx = ca.SX.sym('X', *nlp['x'].shape)
f_sx, g_sx = expand_f_g(X_sx)
nlp['f'] = f_sx
nlp['g'] = g_sx
nlp['x'] = X_sx
# Debug check for non-linearity in constraints
self.__debug_check_linearity_constraints(nlp)
# Debug check for linear independence of the constraints
self.__debug_check_linear_independence(lbx, ubx, lbg, ubg, nlp)
# Solver option
my_solver = options['solver']
del options['solver']
# Already consumed
del options['optimized_num_dir']
# Iteration callback
iteration_callback = options.pop('iteration_callback', None)
# CasADi solver to use
casadi_solver = options.pop('casadi_solver')
if isinstance(casadi_solver, str):
casadi_solver = getattr(ca, casadi_solver)
nlpsol_options = {**options}
if self.__mixed_integer:
nlpsol_options['discrete'] = discrete
if iteration_callback:
nlpsol_options['iteration_callback'] = iteration_callback
# Remove ipopt and bonmin defaults if they are not used
if my_solver != 'ipopt':
nlpsol_options.pop('ipopt', None)
if my_solver != 'bonmin':
nlpsol_options.pop('bonmin', None)
solver = casadi_solver('nlp', my_solver, nlp, nlpsol_options)
# Solve NLP
logger.info("Calling solver")
results = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=ca.veccat(*lbg), ubg=ca.veccat(*ubg))
# Extract relevant stats
self.__objective_value = float(results['f'])
self.__solver_output = np.array(results['x']).ravel()
self.__solver_stats = solver.stats()
success, log_level = self.solver_success(self.__solver_stats, log_solver_failure_as_error)
return_status = self.__solver_stats['return_status']
if 'secondary_return_status' in self.__solver_stats:
return_status = "{}: {}".format(return_status, self.__solver_stats['secondary_return_status'])
wall_clock_time = "elapsed time not read"
if 't_wall_total' in self.__solver_stats:
wall_clock_time = "{} seconds".format(self.__solver_stats['t_wall_total'])
elif 't_wall_solver' in self.__solver_stats:
wall_clock_time = "{} seconds".format(self.__solver_stats['t_wall_solver'])
if success:
logger.log(log_level, "Solver succeeded with status {} ({}).".format(
return_status, wall_clock_time))
else:
try:
ii = [y[0] for y in self.loop_over_error].index(self.priority)
loop_error_indicator = self.loop_over_error[ii][1]
try:
loop_error = self.loop_over_error[ii][2]
if loop_error_indicator and loop_error in return_status:
log_level = logging.INFO
except IndexError:
if loop_error_indicator:
log_level = logging.INFO
logger.log(log_level, "Solver succeeded with status {} ({}).".format(
return_status, wall_clock_time))
except AttributeError:
logger.log(log_level, "Solver succeeded with status {} ({}).".format(
return_status, wall_clock_time))
# Do any postprocessing
if postprocessing:
self.post()
else:
logger.debug(
'Skipping Postprocessing in OptimizationProblem.optimize()')
# Done
logger.info("Done with optimize()")
return success
def __check_bounds_control_input(self) -> None:
# Checks if at the control inputs have bounds, log warning when a control input is not bounded.
bounds = self.bounds()
for variable in self.dae_variables['control_inputs']:
variable = variable.name()
if variable not in bounds:
logger.warning(
"OptimizationProblem: control input {} has no bounds.".format(variable))
@abstractmethod
def transcribe(self) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[str, ca.MX]]:
"""
Transcribe the continuous optimization problem to a discretized, solver-ready
optimization problem.
"""
pass
def solver_options(self) -> Dict[str, Union[str, int, float, bool, str]]:
"""
Returns a dictionary of CasADi optimization problem solver options.
The default solver for continuous problems is `Ipopt <https://projects.coin-or.org/Ipopt/>`_.
The default solver for mixed integer problems is `Bonmin <http://projects.coin-or.org/Bonmin/>`_.
:returns: A dictionary of solver options. See the CasADi and
respective solver documentation for details.
"""
options = {'error_on_fail': False,
'optimized_num_dir': 3,
'casadi_solver': ca.nlpsol}
if self.__mixed_integer:
options['solver'] = 'bonmin'
bonmin_options = options['bonmin'] = {}
bonmin_options['algorithm'] = 'B-BB'
bonmin_options['nlp_solver'] = 'Ipopt'
bonmin_options['nlp_log_level'] = 2
bonmin_options['linear_solver'] = 'mumps'
else:
options['solver'] = 'ipopt'
ipopt_options = options['ipopt'] = {}
ipopt_options['linear_solver'] = 'mumps'
return options
def solver_success(self,
solver_stats: Dict[str, Union[str, bool]],
log_solver_failure_as_error: bool) -> Tuple[bool, int]:
"""
Translates the returned solver statistics into a boolean and log level
to indicate whether the solve was succesful, and how to log it.
:param solver_stats: Dictionary containing information about the
solver status. See explanation below.
:param log_solver_failure_as_error: Indicates whether a solve failure
Should be logged as an error or info message.
``solver_stats`` typically consist of three fields:
* return_status: ``str``
* secondary_return_status: ``str``
* success: ``bool``
By default we rely on CasADi's interpretation of the return_status
(and secondary status) to the success variable, with an exception for
IPOPT (see below).
The logging level is typically ``logging.INFO`` for success, and
``logging.ERROR`` for failure. Only for IPOPT an exception is made for
`Not_Enough_Degrees_Of_Freedom`, which returns ``logging.WARNING`` instead.
For example, this can happen when too many goals are specified, and
lower priority goals cannot improve further on the current result.
:returns: A tuple indicating whether or not the solver has succeeded, and what level to log it with.
"""
success = solver_stats['success']
log_level = logging.INFO if success else logging.ERROR
if (self.solver_options()['solver'].lower() in ['bonmin', 'ipopt']
and solver_stats['return_status'] in ['Not_Enough_Degrees_Of_Freedom']):
log_level = logging.WARNING
if log_level == logging.ERROR and not log_solver_failure_as_error:
log_level = logging.INFO
return success, log_level
@abstractproperty
def solver_input(self) -> ca.MX:
"""
The symbolic input to the NLP solver.
"""
pass
@abstractmethod
def extract_results(self, ensemble_member: int = 0) -> Dict[str, np.ndarray]:
"""
Extracts state and control input time series from optimizer results.
:returns: A dictionary of result time series.
"""
pass
@property
def objective_value(self) -> float:
"""
The last obtained objective function value.
"""
return self.__objective_value
@property
def solver_output(self) -> np.ndarray:
"""
The raw output from the last NLP solver run.
"""
return self.__solver_output
@property
def solver_stats(self) -> Dict[str, Any]:
"""
The stats from the last NLP solver run.
"""
return self.__solver_stats
def pre(self) -> None:
"""
Preprocessing logic is performed here.
"""
pass
@abstractproperty
def dae_residual(self) -> ca.MX:
"""
Symbolic DAE residual of the model.
"""
pass
@abstractproperty
def dae_variables(self) -> Dict[str, List[ca.MX]]:
"""
Dictionary of symbolic variables for the DAE residual.
"""
pass
@property
def path_variables(self) -> List[ca.MX]:
"""
List of additional, time-dependent optimization variables, not covered by the DAE model.
"""
return []
@abstractmethod
def variable(self, variable: str) -> ca.MX:
"""
Returns an :class:`MX` symbol for the given variable.
:param variable: Variable name.
:returns: The associated CasADi :class:`MX` symbol.
"""
raise NotImplementedError
@property
def extra_variables(self) -> List[ca.MX]:
"""
List of additional, time-independent optimization variables, not covered by the DAE model.
"""
return []
@property
def output_variables(self) -> List[ca.MX]:
"""
List of variables that the user requests to be included in the output files.
"""
return []
def delayed_feedback(self) -> List[Tuple[str, str, float]]:
"""
Returns the delayed feedback mappings. These are given as a list of triples :math:`(x, y, \\tau)`,
to indicate that :math:`y = x(t - \\tau)`.
:returns: A list of triples.
Example::
def delayed_feedback(self):
fb1 = ['x', 'y', 0.1]
fb2 = ['x', 'z', 0.2]
return [fb1, fb2]
"""
return []
@property
def ensemble_size(self) -> int:
"""
The number of ensemble members.
"""
return 1
def ensemble_member_probability(self, ensemble_member: int) -> float:
"""
The probability of an ensemble member occurring.
:param ensemble_member: The ensemble member index.
:returns: The probability of an ensemble member occurring.
:raises: IndexError
"""
return 1.0
def parameters(self, ensemble_member: int) -> AliasDict[str, Union[bool, int, float, ca.MX]]:
"""
Returns a dictionary of parameters.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of parameter names and values.
"""
return AliasDict(self.alias_relation)
def string_parameters(self, ensemble_member: int) -> Dict[str, str]:
"""
Returns a dictionary of string parameters.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of string parameter names and values.
"""
return {}
def constant_inputs(self, ensemble_member: int) -> AliasDict[str, Timeseries]:
"""
Returns a dictionary of constant inputs.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of constant input names and time series.
"""
return AliasDict(self.alias_relation)
def lookup_tables(self, ensemble_member: int) -> AliasDict[str, LookupTable]:
"""
Returns a dictionary of lookup tables.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of variable names and lookup tables.
"""
return AliasDict(self.alias_relation)
@staticmethod
def merge_bounds(a: Tuple[BT, BT], b: Tuple[BT, BT]) -> Tuple[BT, BT]:
"""
Returns a pair of bounds which is the intersection of the two pairs of
bounds given as input.
:param a: First pair ``(upper, lower)`` bounds
:param b: Second pair ``(upper, lower)`` bounds
:returns: A pair of ``(upper, lower)`` bounds which is the
intersection of the two input bounds.
"""
a, A = a
b, B = b
# Make sure we are dealing with the correct types
if __debug__:
for v in (a, A, b, B):
if isinstance(v, np.ndarray):
assert v.ndim == 1
assert np.issubdtype(v.dtype, np.number)
else:
assert isinstance(v, (float, int, Timeseries))
all_bounds = [a, A, b, B]
# First make sure that we treat single element vectors as scalars
for i, v in enumerate(all_bounds):
if isinstance(v, np.ndarray) and np.prod(v.shape) == 1:
all_bounds[i] = v.item()
# Upcast lower bounds to be of equal type, and upper bounds as well.
for i, j in [(0, 2), (2, 0), (1, 3), (3, 1)]:
v1 = all_bounds[i]
v2 = all_bounds[j]
# We only check for v1 being of a "smaller" type than v2, as we
# know we will encounter the reverse as well.
if isinstance(v1, type(v2)):
# Same type, nothing to do.
continue
elif isinstance(v1, (int, float)) and isinstance(v2, Timeseries):
all_bounds[i] = Timeseries(v2.times, np.full_like(v2.values, v1))
elif isinstance(v1, np.ndarray) and isinstance(v2, Timeseries):
if v2.values.ndim != 2 or len(v1) != v2.values.shape[1]:
raise Exception(
"Mismatching vector size when upcasting to Timeseries, {} vs. {}.".format(v1, v2))
all_bounds[i] = Timeseries(v2.times, np.broadcast_to(v1, v2.values.shape))
elif isinstance(v1, (int, float)) and isinstance(v2, np.ndarray):
all_bounds[i] = np.full_like(v2, v1)
a, A, b, B = all_bounds
assert isinstance(a, type(b))
assert isinstance(A, type(B))
# Merge the bounds
m, M = None, None
if isinstance(a, np.ndarray):
if not a.shape == b.shape:
raise Exception("Cannot merge vector minimum bounds of non-equal size")
m = np.maximum(a, b)
elif isinstance(a, Timeseries):
if len(a.times) != len(b.times):
raise Exception("Cannot merge Timeseries minimum bounds with different lengths")
elif not np.all(a.times == b.times):
raise Exception("Cannot merge Timeseries minimum bounds with non-equal times")
elif not a.values.shape == b.values.shape:
raise Exception("Cannot merge vector Timeseries minimum bounds of non-equal size")
m = Timeseries(a.times, np.maximum(a.values, b.values))
else:
m = max(a, b)
if isinstance(A, np.ndarray):
if not A.shape == B.shape:
raise Exception("Cannot merge vector maximum bounds of non-equal size")
M = np.minimum(A, B)
elif isinstance(A, Timeseries):
if len(A.times) != len(B.times):
raise Exception("Cannot merge Timeseries maximum bounds with different lengths")
elif not np.all(A.times == B.times):
raise Exception("Cannot merge Timeseries maximum bounds with non-equal times")
elif not A.values.shape == B.values.shape:
raise Exception("Cannot merge vector Timeseries maximum bounds of non-equal size")
M = Timeseries(A.times, np.minimum(A.values, B.values))
else:
M = min(A, B)
return m, M
def bounds(self) -> AliasDict[str, Tuple[BT, BT]]:
"""
Returns variable bounds as a dictionary mapping variable names to a pair of bounds.
A bound may be a constant, or a time series.
:returns: A dictionary of variable names and ``(upper, lower)`` bound pairs.
The bounds may be numbers or :class:`.Timeseries` objects.
Example::
def bounds(self):
return {'x': (1.0, 2.0), 'y': (2.0, 3.0)}
"""
return AliasDict(self.alias_relation)
def history(self, ensemble_member: int) -> AliasDict[str, Timeseries]:
"""
Returns the state history.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of variable names and historical time series (up to and including t0).
"""
return AliasDict(self.alias_relation)
def variable_is_discrete(self, variable: str) -> bool:
"""
Returns ``True`` if the provided variable is discrete.
:param variable: Variable name.
:returns: ``True`` if variable is discrete (integer).
"""
return False
def variable_nominal(self, variable: str) -> Union[float, np.ndarray]:
"""
Returns the nominal value of the variable. Variables are scaled by replacing them with
their nominal value multiplied by the new variable.
:param variable: Variable name.
:returns: The nominal value of the variable.
"""
return 1
@property
def initial_time(self) -> float:
"""
The initial time in seconds.
"""
return self.times()[0]
@property
def initial_residual(self) -> ca.MX:
"""
The initial equation residual.
Initial equations are used to find consistent initial conditions.
:returns: An :class:`MX` object representing F in the initial equation F = 0.
"""
return ca.MX(0)
def seed(self, ensemble_member: int) -> AliasDict[str, Union[float, Timeseries]]:
"""
Seeding data. The optimization algorithm is seeded with the data returned by this method.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of variable names and seed time series.
"""
return AliasDict(self.alias_relation)
def objective(self, ensemble_member: int) -> ca.MX:
"""
The objective function for the given ensemble member.
Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable at a given time.
:param ensemble_member: The ensemble member index.
:returns: An :class:`MX` object representing the objective function.
Example::
def objective(self, ensemble_member):
# Return value of state 'x' at final time:
times = self.times()
return self.state_at('x', times[-1], ensemble_member)
"""
return ca.MX(0)
def path_objective(self, ensemble_member: int) -> ca.MX:
"""
Returns a path objective the given ensemble member.
Path objectives apply to all times and ensemble members simultaneously.
Call :func:`OptimizationProblem.state` to return a time- and ensemble-member-independent
symbol representing a model variable.
:param ensemble_member: The ensemble member index. This index is currently unused,
and here for future use only.
:returns: A :class:`MX` object representing the path objective.
Example::
def path_objective(self, ensemble_member):
# Minimize x(t) for all t
return self.state('x')
"""
return ca.MX(0)
def constraints(self, ensemble_member: int) -> List[
Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
"""
Returns a list of constraints for the given ensemble member.
Call :func:`OptimizationProblem.state_at` to return a symbol representing a model variable at a given time.
:param ensemble_member: The ensemble member index.
:returns: A list of triples ``(f, m, M)``, with an :class:`MX` object representing
the constraint function ``f``, lower bound ``m``, and upper bound ``M``.
The bounds must be numbers.
Example::
def constraints(self, ensemble_member):
t = 1.0
constraint1 = (
2 * self.state_at('x', t, ensemble_member),
2.0, 4.0)
constraint2 = (
self.state_at('x', t, ensemble_member) + self.state_at('y', t, ensemble_member),
2.0, 3.0)
return [constraint1, constraint2]
"""
return []
def path_constraints(self, ensemble_member: int) -> List[
Tuple[ca.MX, Union[float, np.ndarray], Union[float, np.ndarray]]]:
"""
Returns a list of path constraints.
Path constraints apply to all times and ensemble members simultaneously.
Call :func:`OptimizationProblem.state` to return a time- and ensemble-member-independent
symbol representing a model variable.
:param ensemble_member: The ensemble member index. This index may only
be used to supply member-dependent bounds.
:returns: A list of triples ``(f, m, M)``, with an :class:`MX` object representing
the path constraint function ``f``, lower bound ``m``, and upper bound ``M``.
The bounds may be numbers or :class:`.Timeseries` objects.
Example::
def path_constraints(self, ensemble_member):
# 2 * x must lie between 2 and 4 for every time instance.
path_constraint1 = (2 * self.state('x'), 2.0, 4.0)
# x + y must lie between 2 and 3 for every time instance
path_constraint2 = (self.state('x') + self.state('y'), 2.0, 3.0)
return [path_constraint1, path_constraint2]
"""
return []
def post(self) -> None:
"""
Postprocessing logic is performed here.
"""
pass
@property
def equidistant(self) -> bool:
"""
``True`` if all time series are equidistant.
"""
return False
INTERPOLATION_LINEAR = 0
INTERPOLATION_PIECEWISE_CONSTANT_FORWARD = 1
INTERPOLATION_PIECEWISE_CONSTANT_BACKWARD = 2
def interpolate(
self,
t: Union[float, np.ndarray],
ts: np.ndarray,
fs: np.ndarray,
f_left: float = np.nan,
f_right: float = np.nan,
mode: int = INTERPOLATION_LINEAR) -> Union[float, np.ndarray]:
"""
Linear interpolation over time.
:param t: Time at which to evaluate the interpolant.
:type t: float or vector of floats
:param ts: Time stamps.
:type ts: numpy array
:param fs: Function values at time stamps ts.
:param f_left: Function value left of leftmost time stamp.
:param f_right: Function value right of rightmost time stamp.
:param mode: Interpolation mode.
:returns: The interpolated value.
"""
if isinstance(fs, np.ndarray) and fs.ndim == 2:
# 2-D array of values. Interpolate each column separately.
if len(t) == len(ts) and np.all(t == ts):
# Early termination; nothing to interpolate
return fs.copy()
fs_int = [self.interpolate(t, ts, fs[:, i], f_left, f_right, mode) for i in range(fs.shape[1])]
return np.stack(fs_int, axis=1)
elif hasattr(t, '__iter__'):
if len(t) == len(ts) and np.all(t == ts):
# Early termination; nothing to interpolate
return fs.copy()
return self.__interpolate(t, ts, fs, f_left, f_right, mode)
else:
if ts[0] == t:
# Early termination; nothing to interpolate
return fs[0]
return self.__interpolate(t, ts, fs, f_left, f_right, mode)
def __interpolate(self, t, ts, fs, f_left=np.nan, f_right=np.nan, mode=INTERPOLATION_LINEAR):
"""
Linear interpolation over time.
:param t: Time at which to evaluate the interpolant.
:type t: float or vector of floats
:param ts: Time stamps.
:type ts: numpy array
:param fs: Function values at time stamps ts.
:param f_left: Function value left of leftmost time stamp.
:param f_right: Function value right of rightmost time stamp.
:param mode: Interpolation mode.
Note that it is assumed that `ts` is sorted. No such assumption is made for `t`
.
:returns: The interpolated value.
"""
if f_left is None:
if (min(t) if hasattr(t, '__iter__') else t) < ts[0]:
raise Exception("Interpolation: Point {} left of range".format(t))
if f_right is None:
if (max(t) if hasattr(t, '__iter__') else t) > ts[-1]:
raise Exception("Interpolation: Point {} right of range".format(t))
if mode == self.INTERPOLATION_LINEAR:
# No need to handle f_left / f_right; NumPy already does this for us
return np.interp(t, ts, fs, f_left, f_right)
elif mode == self.INTERPOLATION_PIECEWISE_CONSTANT_FORWARD:
v = fs[np.maximum(np.searchsorted(ts, t, side='right') - 1, 0)]
elif mode == self.INTERPOLATION_PIECEWISE_CONSTANT_BACKWARD:
v = fs[np.minimum(np.searchsorted(ts, t, side='left'), len(ts) - 1)]
else:
raise NotImplementedError
# Handle f_left / f_right:
if hasattr(t, "__iter__"):
v[t < ts[0]] = f_left
v[t > ts[-1]] = f_right
else:
if t < ts[0]:
v = f_left
elif t > ts[-1]:
v = f_right
return v
@abstractproperty
def controls(self) -> List[str]:
"""
List of names of the control variables (excluding aliases).
"""
pass
@abstractmethod
def discretize_controls(self, resolved_bounds: AliasDict) -> Tuple[
int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Performs the discretization of the control inputs, filling lower and upper
bound vectors for the resulting optimization variables, as well as an initial guess.
:param resolved_bounds: :class:`AliasDict` of numerical bound values. This is the
same dictionary as returned by :func:`bounds`, but with all
parameter symbols replaced with their numerical values.
:returns: The number of control variables in the optimization problem, a lower
bound vector, an upper bound vector, a seed vector, and a dictionary
of offset values.
"""
pass
def dynamic_parameters(self) -> List[ca.MX]:
"""
Returns a list of parameter symbols that may vary from run to run. The values
of these parameters are not cached.
:returns: A list of parameter symbols.
"""
return []
@abstractmethod
def extract_controls(self, ensemble_member: int = 0) -> Dict[str, np.ndarray]:
"""
Extracts state time series from optimizer results.
Must return a dictionary of result time series.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of control input time series.
"""
pass
def control_vector(self, variable: str, ensemble_member: int = 0) -> Union[ca.MX, List[ca.MX]]:
"""
Return the optimization variables for the entire time horizon of the given state.
:param variable: Variable name.
:param ensemble_member: The ensemble member index.
:returns: A vector of control input symbols for the entire time horizon.
:raises: KeyError
"""
return self.state_vector(variable, ensemble_member)
def control(self, variable: str) -> ca.MX:
"""
Returns an :class:`MX` symbol for the given control input, not bound to any time.
:param variable: Variable name.
:returns: :class:`MX` symbol for given control input.
:raises: KeyError
"""
return self.variable(variable)
@abstractmethod
def control_at(self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False) -> ca.MX:
"""
Returns an :class:`MX` symbol representing the given control input at the given time.
:param variable: Variable name.
:param t: Time.
:param ensemble_member: The ensemble member index.
:param scaled: True to return the scaled variable.
:returns: :class:`MX` symbol representing the control input at the given time.
:raises: KeyError
"""
pass
@abstractproperty
def differentiated_states(self) -> List[str]:
"""
List of names of the differentiated state variables (excluding aliases).
"""
pass
@abstractproperty
def algebraic_states(self) -> List[str]:
"""
List of names of the algebraic state variables (excluding aliases).
"""
pass
@abstractmethod
def discretize_states(self, resolved_bounds: AliasDict) -> Tuple[
int, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Perform the discretization of the states.
Fills lower and upper bound vectors for the resulting optimization
variables, as well as an initial guess.
:param resolved_bounds: :class:`AliasDict` of numerical bound values.
This is the same dictionary as returned by :func:`bounds`, but
with all parameter symbols replaced with their numerical values.
:returns: The number of control variables in the optimization problem,
a lower bound vector, an upper bound vector, a seed vector,
and a dictionary of vector offset values.
"""
pass
@abstractmethod
def extract_states(self, ensemble_member: int = 0) -> Dict[str, np.ndarray]:
"""
Extracts state time series from optimizer results.
Must return a dictionary of result time series.
:param ensemble_member: The ensemble member index.
:returns: A dictionary of state time series.
"""
pass
@abstractmethod
def state_vector(self, variable: str, ensemble_member: int = 0) -> Union[ca.MX, List[ca.MX]]:
"""
Return the optimization variables for the entire time horizon of the given state.
:param variable: Variable name.
:param ensemble_member: The ensemble member index.
:returns: A vector of state symbols for the entire time horizon.
:raises: KeyError
"""
pass
def state(self, variable: str) -> ca.MX:
"""
Returns an :class:`MX` symbol for the given state, not bound to any time.
:param variable: Variable name.
:returns: :class:`MX` symbol for given state.
:raises: KeyError
"""
return self.variable(variable)
@abstractmethod
def state_at(self, variable: str, t: float, ensemble_member: int = 0, scaled: bool = False) -> ca.MX:
"""
Returns an :class:`MX` symbol representing the given variable at the given time.
:param variable: Variable name.
:param t: Time.
:param ensemble_member: The ensemble member index.
:param scaled: True to return the scaled variable.
:returns: :class:`MX` symbol representing the state at the given time.
:raises: KeyError
"""
pass
@abstractmethod
def extra_variable(self, variable: str, ensemble_member: int = 0) -> ca.MX:
"""
Returns an :class:`MX` symbol representing the extra variable inside the state vector.
:param variable: Variable name.
:param ensemble_member: The ensemble member index.
:returns: :class:`MX` symbol representing the extra variable.
:raises: KeyError
"""
pass
@abstractmethod
def states_in(self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0) -> Iterator[ca.MX]:
"""
Iterates over symbols for states in the interval [t0, tf].
:param variable: Variable name.
:param t0: Left bound of interval. If equal to None, the initial time is used.
:param tf: Right bound of interval. If equal to None, the final time is used.
:param ensemble_member: The ensemble member index.
:raises: KeyError
"""
pass
@abstractmethod
def integral(self, variable: str, t0: float = None, tf: float = None, ensemble_member: int = 0) -> ca.MX:
"""
Returns an expression for the integral over the interval [t0, tf].
:param variable: Variable name.
:param t0: Left bound of interval. If equal to None, the initial time is used.
:param tf: Right bound of interval. If equal to None, the final time is used.
:param ensemble_member: The ensemble member index.
:returns: :class:`MX` object representing the integral.
:raises: KeyError
"""
pass
@abstractmethod
def der(self, variable: str) -> ca.MX:
"""
Returns an :class:`MX` symbol for the time derivative given state, not bound to any time.
:param variable: Variable name.
:returns: :class:`MX` symbol for given state.
:raises: KeyError
"""
pass
@abstractmethod
def der_at(self, variable: str, t: float, ensemble_member: int = 0) -> ca.MX:
"""
Returns an expression for the time derivative of the specified variable at time t.
:param variable: Variable name.
:param t: Time.
:param ensemble_member: The ensemble member index.
:returns: :class:`MX` object representing the derivative.
:raises: KeyError
"""
pass
def get_timeseries(self, variable: str, ensemble_member: int = 0) -> Timeseries:
"""
Looks up a timeseries from the internal data store.
:param variable: Variable name.
:param ensemble_member: The ensemble member index.
:returns: The requested time series.
:rtype: :class:`.Timeseries`
:raises: KeyError
"""
raise NotImplementedError
def set_timeseries(
self,
variable: str,
timeseries: Timeseries,
ensemble_member: int = 0,
output: bool = True,
check_consistency: bool = True) -> None:
"""
Sets a timeseries in the internal data store.
:param variable: Variable name.
:param timeseries: Time series data.
:type timeseries: iterable of floats, or :class:`.Timeseries`
:param ensemble_member: The ensemble member index.
:param output: Whether to include this time series in output data files.
:param check_consistency: Whether to check consistency between the time stamps on
the new timeseries object and any existing time stamps.
"""
raise NotImplementedError
def timeseries_at(self, variable: str, t: float, ensemble_member: int = 0) -> float:
"""
Return the value of a time series at the given time.
:param variable: Variable name.
:param t: Time.
:param ensemble_member: The ensemble member index.
:returns: The interpolated value of the time series.
:raises: KeyError
"""
raise NotImplementedError
def map_path_expression(self, expr: ca.MX, ensemble_member: int) -> ca.MX:
"""
Maps the path expression `expr` over the entire time horizon of the optimization problem.
:param expr: An :class:`MX` path expression.
:returns: An :class:`MX` expression evaluating `expr` over the entire time horizon.
"""
raise NotImplementedError
@debug_check(DebugLevel.HIGH)
def __debug_check_linearity_constraints(self, nlp):
x = nlp['x']
f = nlp['f']
g = nlp['g']
expand_f_g = ca.Function('f_g', [x], [f, g]).expand()
X_sx = ca.SX.sym('X', *x.shape)
f_sx, g_sx = expand_f_g(X_sx)
jac = ca.Function('j', [X_sx], [ca.jacobian(g_sx, X_sx)]).expand()
if jac(np.nan).is_regular():
logger.info("The constraints are linear")
else:
hes = ca.Function('j', [X_sx], [ca.jacobian(ca.jacobian(g_sx, X_sx), X_sx)]).expand()
if hes(np.nan).is_regular():
logger.info("The constraints are quadratic")
else:
logger.info("The constraints are nonlinear")
@debug_check(DebugLevel.VERYHIGH)
def __debug_check_linear_independence(self, lbx, ubx, lbg, ubg, nlp):
x = nlp['x']
f = nlp['f']
g = nlp['g']
expand_f_g = ca.Function('f_g', [x], [f, g]).expand()
x_sx = ca.SX.sym('X', *x.shape)
f_sx, g_sx = expand_f_g(x_sx)
x, f, g = x_sx, f_sx, g_sx
lbg = np.array(ca.vertsplit(ca.veccat(*lbg))).ravel()
ubg = np.array(ca.vertsplit(ca.veccat(*ubg))).ravel()
# Find the linear constraints
g_sjac = ca.Function('Af', [x], [ca.jtimes(g, x, x.ones(*x.shape))])
res = g_sjac(np.nan)
res = np.array(res).ravel()
g_is_linear = ~np.isnan(res)
# Find the rows in the jacobian with only a single entry
g_jac_csr = ca.DM(ca.Function('tmp', [x], [g]).sparsity_jac(0, 0)).tocsc().tocsr()
g_single_variable = (np.diff(g_jac_csr.indptr) == 1)
# Find the rows which are equality constraints
g_eq_constraint = (lbg == ubg)
# The intersection of all selections are constraints like we want
g_constant_assignment = g_is_linear & g_single_variable & g_eq_constraint
# Map of variable (index) to constraints/row numbers
var_index_assignment = {}
for i in range(g.size1()):
if g_constant_assignment[i]:
var_ind = g_jac_csr.getrow(i).indices[0]
var_index_assignment.setdefault(var_ind, []).append(i)
var_names, named_x, named_f, named_g = self._debug_get_named_nlp(nlp)
for vi, g_inds in var_index_assignment.items():
if len(g_inds) > 1:
logger.info("Variable '{}' has duplicate constraints setting its value:".format(var_names[vi]))
for g_i in g_inds:
logger.info("row {}: {} = {}".format(g_i, named_g[g_i], lbg[g_i]))
# Find variables for which the bounds are equal, but also an equality
# constraint is set. This would result in a constraint `1 = 1` with
# the default IPOPT option `fixed_variable_treatment = make_parameter`
x_inds = np.flatnonzero(lbx == ubx)
for vi in x_inds:
if vi in var_index_assignment:
logger.info("Variable '{}' has equal bounds (value = {}), but also the following equality constraints:"
.format(var_names[vi], lbx[vi]))
for g_i in var_index_assignment[vi]:
logger.info("row {}: {} = {}".format(g_i, named_g[g_i], lbg[g_i])) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/optimization_problem.py | 0.885464 | 0.228028 | optimization_problem.py | pypi |
import functools
import logging
import sys
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from typing import Callable, Dict, List, Union
import casadi as ca
import numpy as np
from .optimization_problem import OptimizationProblem
from .timeseries import Timeseries
logger = logging.getLogger("rtctools")
class _EmptyEnsembleList(list):
"""
An indexable object containing infinitely many empty lists.
Only to be used as a placeholder.
"""
def __getitem__(self, key):
return []
class _EmptyEnsembleOrderedDict(OrderedDict):
"""
An indexable object containing infinitely many empty OrderedDicts.
Only to be used as a placeholder.
"""
def __getitem__(self, key):
return OrderedDict()
class Goal(metaclass=ABCMeta):
r"""
Base class for lexicographic goal programming goals.
A goal is defined by overriding the :func:`function` method.
:cvar function_range: Range of goal function. *Required if a target is set*.
:cvar function_nominal: Nominal value of function. Used for scaling. Default is ``1``.
:cvar target_min: Desired lower bound for goal function. Default is ``numpy.nan``.
:cvar target_max: Desired upper bound for goal function. Default is ``numpy.nan``.
:cvar priority: Integer priority of goal. Default is ``1``.
:cvar weight: Optional weighting applied to the goal. Default is ``1.0``.
:cvar order: Penalization order of goal violation. Default is ``2``.
:cvar critical: If ``True``, the algorithm will abort if this goal cannot be fully met.
Default is ``False``.
:cvar relaxation: Amount of slack added to the hard constraints related to the goal.
Must be a nonnegative value. Default is ``0.0``.
The target bounds indicate the range within the function should stay, *if possible*. Goals
are, in that sense, *soft*, as opposed to standard hard constraints.
Four types of goals can be created:
1. Minimization goal if no target bounds are set:
.. math::
\min f
2. Lower bound goal if ``target_min`` is set:
.. math::
m \leq f
3. Upper bound goal if ``target_max`` is set:
.. math::
f \leq M
4. Combined lower and upper bound goal if ``target_min`` and ``target_max`` are both set:
.. math::
m \leq f \leq M
Lower priority goals take precedence over higher priority goals.
Goals with the same priority are weighted off against each other in a single objective function.
In goals where a target is set:
* The function range interval must be provided as this is used to introduce hard constrains on the value that
the function can take. If one is unsure about which value the function can take, it is recommended to
overestimate this interval. However, an overestimated interval will negatively influence how accurately the
target bounds are met.
* The target provided must be contained in the function range.
* The function nominal is used to scale the constraints.
* If both a target_min and a target_max are set, the target maximum must be at least equal to minimum one.
* In a path goal, the target can be a Timeseries.
In minimization goals:
* The function range is not used and therefore cannot be set.
* The function nominal is used to scale the function value in the objective function. To ensure that all goals
are given a similar importance, it is crucial to provide an accurate estimate of this parameter.
The goal violation value is taken to the order'th power in the objective function of the final
optimization problem.
Relaxation is used to loosen the constraints that are set after the
optimization of the goal's priority. The unit of the relaxation is equal
to that of the goal function.
A goal can be written in vector form. In a vector goal:
* The goal size determines how many goals there are.
* The goal function has shape ``(goal size, 1)``.
* The function is either minimized or has, possibly various, targets.
* Function nominal can either be an array with as many entries as the goal size or have a single value.
* Function ranges can either be an array with as many entries as the goal size or have a single value.
* In a goal, the target can either be an array with as many entries as the goal size or have a single value.
* In a path goal, the target can also be a Timeseries whose values are either a 1-dimensional vector or have
as many columns as the goal size.
Example definition of the point goal :math:`x(t) \geq 1.1` for :math:`t=1.0` at priority 1::
class MyGoal(Goal):
def function(self, optimization_problem, ensemble_member):
# State 'x' at time t = 1.0
t = 1.0
return optimization_problem.state_at('x', t, ensemble_member)
function_range = (1.0, 2.0)
target_min = 1.1
priority = 1
Example definition of the path goal :math:`x(t) \geq 1.1` for all :math:`t` at priority 2::
class MyPathGoal(Goal):
def function(self, optimization_problem, ensemble_member):
# State 'x' at any point in time
return optimization_problem.state('x')
function_range = (1.0, 2.0)
target_min = 1.1
priority = 2
Note that for path goals, the ensemble member index is not passed to the call
to :func:`OptimizationProblem.state`. This call returns a time-independent symbol
that is also independent of the active ensemble member. Path goals are
applied to all times and all ensemble members simultaneously.
"""
@abstractmethod
def function(self, optimization_problem: OptimizationProblem, ensemble_member: int) -> ca.MX:
"""
This method returns a CasADi :class:`MX` object describing the goal function.
:returns: A CasADi :class:`MX` object.
"""
pass
#: Range of goal function
function_range = (np.nan, np.nan)
#: Nominal value of function (used for scaling)
function_nominal = 1.0
#: Desired lower bound for goal function
target_min = np.nan
#: Desired upper bound for goal function
target_max = np.nan
#: Lower priority goals take precedence over higher priority goals.
priority = 1
#: Goals with the same priority are weighted off against each other in a
#: single objective function.
weight = 1.0
#: The goal violation value is taken to the order'th power in the objective
#: function.
order = 2
#: The size of the goal if it's a vector goal.
size = 1
#: Critical goals must always be fully satisfied.
critical = False
#: Absolute relaxation applied to the optimized values of this goal
relaxation = 0.0
#: Timeseries ID for function value data (optional)
function_value_timeseries_id = None
#: Timeseries ID for goal violation data (optional)
violation_timeseries_id = None
@property
def has_target_min(self) -> bool:
"""
``True`` if the user goal has min bounds.
"""
if isinstance(self.target_min, Timeseries):
return True
else:
return np.any(np.isfinite(self.target_min))
@property
def has_target_max(self) -> bool:
"""
``True`` if the user goal has max bounds.
"""
if isinstance(self.target_max, Timeseries):
return True
else:
return np.any(np.isfinite(self.target_max))
@property
def has_target_bounds(self) -> bool:
"""
``True`` if the user goal has min/max bounds.
"""
return (self.has_target_min or self.has_target_max)
@property
def is_empty(self) -> bool:
target_min_set = isinstance(self.target_min, Timeseries) or np.any(np.isfinite(self.target_min))
target_max_set = isinstance(self.target_max, Timeseries) or np.any(np.isfinite(self.target_max))
if not target_min_set and not target_max_set:
# A minimization goal
return False
target_min = self.target_min
if isinstance(target_min, Timeseries):
target_min = target_min.values
target_max = self.target_max
if isinstance(target_max, Timeseries):
target_max = target_max.values
min_empty = not np.any(np.isfinite(target_min))
max_empty = not np.any(np.isfinite(target_max))
return min_empty and max_empty
def get_function_key(self, optimization_problem: OptimizationProblem, ensemble_member: int) -> str:
"""
Returns a key string uniquely identifying the goal function. This
is used to eliminate linearly dependent constraints from the optimization problem.
"""
if hasattr(self, 'function_key'):
return self.function_key
# This must be deterministic. See RTCTOOLS-485.
if not hasattr(Goal, '_function_key_counter'):
Goal._function_key_counter = 0
self.function_key = '{}_{}'.format(self.__class__.__name__, Goal._function_key_counter)
Goal._function_key_counter += 1
return self.function_key
def __repr__(self) -> str:
return '{}(priority={}, target_min={}, target_max={}, function_range={})'.format(
self.__class__, self.priority, self.target_min, self.target_max, self.function_range)
class StateGoal(Goal):
r"""
Base class for lexicographic goal programming path goals that act on a single model state.
A state goal is defined by setting at least the ``state`` class variable.
:cvar state: State on which the goal acts. *Required*.
:cvar target_min: Desired lower bound for goal function. Default is ``numpy.nan``.
:cvar target_max: Desired upper bound for goal function. Default is ``numpy.nan``.
:cvar priority: Integer priority of goal. Default is ``1``.
:cvar weight: Optional weighting applied to the goal. Default is ``1.0``.
:cvar order: Penalization order of goal violation. Default is ``2``.
:cvar critical: If ``True``, the algorithm will abort if this goal cannot be fully met.
Default is ``False``.
Example definition of the goal :math:`x(t) \geq 1.1` for all :math:`t` at priority 2::
class MyStateGoal(StateGoal):
state = 'x'
target_min = 1.1
priority = 2
Contrary to ordinary ``Goal`` objects, ``PathGoal`` objects need to be initialized with an
``OptimizationProblem`` instance to allow extraction of state metadata, such as bounds and
nominal values. Consequently, state goals must be instantiated as follows::
my_state_goal = MyStateGoal(optimization_problem)
Note that ``StateGoal`` is a helper class. State goals can also be defined using ``Goal`` as direct base class,
by implementing the ``function`` method and providing the ``function_range`` and ``function_nominal``
class variables manually.
"""
#: The state on which the goal acts.
state = None
def __init__(self, optimization_problem):
"""
Initialize the state goal object.
:param optimization_problem: ``OptimizationProblem`` instance.
"""
# Check whether a state has been specified
if self.state is None:
raise Exception('Please specify a state.')
# Extract state range from model
if self.has_target_bounds:
try:
self.function_range = optimization_problem.bounds()[self.state]
except KeyError:
raise Exception('State {} has no bounds or does not exist in the model.'.format(self.state))
if self.function_range[0] is None:
raise Exception('Please provide a lower bound for state {}.'.format(self.state))
if self.function_range[1] is None:
raise Exception('Please provide an upper bound for state {}.'.format(self.state))
# Extract state nominal from model
self.function_nominal = optimization_problem.variable_nominal(self.state)
# Set function key
canonical, sign = optimization_problem.alias_relation.canonical_signed(self.state)
self.function_key = canonical if sign > 0.0 else '-' + canonical
def function(self, optimization_problem, ensemble_member):
return optimization_problem.state(self.state)
def __repr__(self):
return '{}(priority={}, state={}, target_min={}, target_max={}, function_range={})'.format(
self.__class__, self.priority, self.state, self.target_min, self.target_max, self.function_range)
class _GoalConstraint:
def __init__(
self,
goal: Goal,
function: Callable[[OptimizationProblem], ca.MX],
m: Union[float, np.ndarray, Timeseries],
M: Union[float, np.ndarray, Timeseries],
optimized: bool):
assert isinstance(m, (float, np.ndarray, Timeseries))
assert isinstance(M, (float, np.ndarray, Timeseries))
assert type(m) == type(M)
# NumPy arrays only allowed for vector goals
if isinstance(m, np.ndarray):
assert len(m) == goal.size
assert len(M) == goal.size
self.goal = goal
self.function = function
self.min = m
self.max = M
self.optimized = optimized
def update_bounds(self, other, enforce='self'):
# NOTE: a.update_bounds(b) is _not_ the same as b.update_bounds(a).
# See how the 'enforce' parameter is used.
min_, max_ = self.min, self.max
other_min, other_max = other.min, other.max
if isinstance(min_, Timeseries):
assert isinstance(max_, Timeseries)
assert isinstance(other_min, Timeseries)
assert isinstance(other_max, Timeseries)
min_ = min_.values
max_ = max_.values
other_min = other_min.values
other_max = other_max.values
min_ = np.maximum(min_, other_min)
max_ = np.minimum(max_, other_max)
# Ensure new constraint bounds do not loosen or shift
# previous bounds due to numerical errors.
if enforce == 'self':
min_ = np.minimum(max_, other_min)
max_ = np.maximum(min_, other_max)
else:
min_ = np.minimum(min_, other_max)
max_ = np.maximum(max_, other_min)
# Ensure consistency of bounds. Bounds may become inconsistent due to
# small numerical computation errors.
min_ = np.minimum(min_, max_)
if isinstance(self.min, Timeseries):
self.min = Timeseries(self.min.times, min_)
self.max = Timeseries(self.max.times, max_)
else:
self.min = min_
self.max = max_
class _GoalProgrammingMixinBase(OptimizationProblem, metaclass=ABCMeta):
def _gp_n_objectives(self, subproblem_objectives, subproblem_path_objectives, ensemble_member):
return ca.vertcat(*[o(self, ensemble_member) for o in subproblem_objectives]).size1() \
+ ca.vertcat(*[o(self, ensemble_member) for o in subproblem_path_objectives]).size1()
def _gp_objective(self, subproblem_objectives, n_objectives, ensemble_member):
if len(subproblem_objectives) > 0:
acc_objective = ca.sum1(ca.vertcat(*[o(self, ensemble_member) for o in subproblem_objectives]))
if self.goal_programming_options()['scale_by_problem_size']:
acc_objective = acc_objective / n_objectives
return acc_objective
else:
return ca.MX(0)
def _gp_path_objective(self, subproblem_path_objectives, n_objectives, ensemble_member):
if len(subproblem_path_objectives) > 0:
acc_objective = ca.sum1(ca.vertcat(*[o(self, ensemble_member) for o in subproblem_path_objectives]))
if self.goal_programming_options()['scale_by_problem_size']:
# Objective is already divided by number of active time steps
# at this point when `scale_by_problem_size` is set.
acc_objective = acc_objective / n_objectives
return acc_objective
else:
return ca.MX(0)
@abstractmethod
def goal_programming_options(self) -> Dict[str, Union[float, bool]]:
raise NotImplementedError()
def goals(self) -> List[Goal]:
"""
User problem returns list of :class:`Goal` objects.
:returns: A list of goals.
"""
return []
def path_goals(self) -> List[Goal]:
"""
User problem returns list of path :class:`Goal` objects.
:returns: A list of path goals.
"""
return []
def _gp_min_max_arrays(self, g, target_shape=None):
"""
Broadcasts the goal target minimum and target maximum to arrays of a desired target shape.
Depending on whether g is a vector goal or not, the output shape differs:
- A 2-D array of size (goal.size, target_shape or 1) if the goal size
is larger than one, i.e. a vector goal
- A 1-D array of size (target_shape or 1, ) otherwise
"""
times = self.times()
m, M = None, None
if isinstance(g.target_min, Timeseries):
m = self.interpolate(
times, g.target_min.times, g.target_min.values, -np.inf, -np.inf)
if m.ndim > 1:
m = m.transpose()
elif isinstance(g.target_min, np.ndarray) and target_shape:
m = np.broadcast_to(g.target_min, (target_shape, g.size)).transpose()
elif target_shape:
m = np.full(target_shape, g.target_min)
else:
m = np.array([g.target_min]).transpose()
if isinstance(g.target_max, Timeseries):
M = self.interpolate(
times, g.target_max.times, g.target_max.values, np.inf, np.inf)
if M.ndim > 1:
M = M.transpose()
elif isinstance(g.target_max, np.ndarray) and target_shape:
M = np.broadcast_to(g.target_max, (target_shape, g.size)).transpose()
elif target_shape:
M = np.full(target_shape, g.target_max)
else:
M = np.array([g.target_max]).transpose()
if g.size > 1 and m.ndim == 1:
m = np.broadcast_to(m, (g.size, len(m)))
if g.size > 1 and M.ndim == 1:
M = np.broadcast_to(M, (g.size, len(M)))
if g.size > 1:
assert m.shape == (g.size, 1 if target_shape is None else target_shape)
else:
assert m.shape == (1 if target_shape is None else target_shape, )
assert m.shape == M.shape
return m, M
def _gp_validate_goals(self, goals, is_path_goal):
goals = sorted(goals, key=lambda x: x.priority)
options = self.goal_programming_options()
# Validate goal definitions
for goal in goals:
m, M = goal.function_range
# The function range should not be a symbolic expression
if isinstance(m, ca.MX):
assert m.is_constant()
if m.size1() == 1:
m = float(m)
else:
m = np.array(m.to_DM())
if isinstance(M, ca.MX):
assert M.is_constant()
if M.size1() == 1:
M = float(M)
else:
M = np.array(M.to_DM())
assert isinstance(m, (float, int, np.ndarray))
assert isinstance(M, (float, int, np.ndarray))
if np.any(goal.function_nominal <= 0):
raise Exception("Nonpositive nominal value specified for goal {}".format(goal))
if goal.critical and not goal.has_target_bounds:
raise Exception("Minimization goals cannot be critical")
if goal.critical:
# Allow a function range for backwards compatibility reasons.
# Maybe raise a warning that its not actually used?
pass
elif goal.has_target_bounds:
if not np.all(np.isfinite(m)) or not np.all(np.isfinite(M)):
raise Exception("No function range specified for goal {}".format(goal))
if np.any(m >= M):
raise Exception("Invalid function range for goal {}".format(goal))
if goal.weight <= 0:
raise Exception("Goal weight should be positive for goal {}".format(goal))
else:
if goal.function_range != (np.nan, np.nan):
raise Exception("Specifying function range not allowed for goal {}".format(goal))
if not is_path_goal:
if isinstance(goal.target_min, Timeseries):
raise Exception("Target min cannot be a Timeseries for goal {}".format(goal))
if isinstance(goal.target_max, Timeseries):
raise Exception("Target max cannot be a Timeseries for goal {}".format(goal))
try:
int(goal.priority)
except ValueError:
raise Exception("Priority of not int or castable to int for goal {}".format(goal))
if options['keep_soft_constraints']:
if goal.relaxation != 0.0:
raise Exception("Relaxation not allowed with `keep_soft_constraints` for goal {}".format(goal))
if goal.violation_timeseries_id is not None:
raise Exception(
"Violation timeseries id not allowed with `keep_soft_constraints` for goal {}".format(goal))
else:
if goal.size > 1:
raise Exception("Option `keep_soft_constraints` needs to be set for vector goal {}".format(goal))
if goal.critical and goal.size > 1:
raise Exception("Vector goal cannot be critical for goal {}".format(goal))
if is_path_goal:
target_shape = len(self.times())
else:
target_shape = None
# Check consistency and monotonicity of goals. Scalar target min/max
# of normal goals are also converted to arrays to unify checks with
# path goals.
if options['check_monotonicity']:
for e in range(self.ensemble_size):
# Store the previous goal of a certain function key we
# encountered, such that we can compare to it.
fk_goal_map = {}
for goal in goals:
fk = goal.get_function_key(self, e)
prev = fk_goal_map.get(fk)
fk_goal_map[fk] = goal
if prev is not None:
goal_m, goal_M = self._gp_min_max_arrays(goal, target_shape)
other_m, other_M = self._gp_min_max_arrays(prev, target_shape)
indices = np.where(np.logical_not(np.logical_or(
np.isnan(goal_m), np.isnan(other_m))))
if goal.has_target_min:
if np.any(goal_m[indices] < other_m[indices]):
raise Exception(
'Target minimum of goal {} must be greater or equal than '
'target minimum of goal {}.'.format(goal, prev))
indices = np.where(np.logical_not(np.logical_or(
np.isnan(goal_M), np.isnan(other_M))))
if goal.has_target_max:
if np.any(goal_M[indices] > other_M[indices]):
raise Exception(
'Target maximum of goal {} must be less or equal than '
'target maximum of goal {}'.format(goal, prev))
for goal in goals:
goal_m, goal_M = self._gp_min_max_arrays(goal, target_shape)
goal_lb = np.broadcast_to(goal.function_range[0], goal_m.shape[::-1]).transpose()
goal_ub = np.broadcast_to(goal.function_range[1], goal_M.shape[::-1]).transpose()
if goal.has_target_min and goal.has_target_max:
indices = np.where(np.logical_not(np.logical_or(
np.isnan(goal_m), np.isnan(goal_M))))
if np.any(goal_m[indices] > goal_M[indices]):
raise Exception("Target minimum exceeds target maximum for goal {}".format(goal))
if goal.has_target_min and not goal.critical:
indices = np.where(np.isfinite(goal_m))
if np.any(goal_m[indices] <= goal_lb[indices]):
raise Exception(
'Target minimum should be greater than the lower bound of the function range for goal {}'
.format(goal))
if np.any(goal_m[indices] > goal_ub[indices]):
raise Exception(
'Target minimum should not be greater than the upper bound of the function range for goal {}'
.format(goal))
if goal.has_target_max and not goal.critical:
indices = np.where(np.isfinite(goal_M))
if np.any(goal_M[indices] >= goal_ub[indices]):
raise Exception(
'Target maximum should be smaller than the upper bound of the function range for goal {}'
.format(goal))
if np.any(goal_M[indices] < goal_lb[indices]):
raise Exception(
'Target maximum should not be smaller than the lower bound of the function range for goal {}'
.format(goal))
if goal.relaxation < 0.0:
raise Exception('Relaxation of goal {} should be a nonnegative value'.format(goal))
def _gp_goal_constraints(self, goals, sym_index, options, is_path_goal):
"""
There are three ways in which a goal turns into objectives/constraints:
1. A goal with target bounds results in a part for the objective (the
violation variable), and 1 or 2 constraints (target min, max, or both).
2. A goal without target bounds (i.e. minimization goal) results in just a
part for the objective.
3. A critical goal results in just a (pair of) constraint(s). These are hard
constraints, which need to be put in the constraint store to guarantee
linear independence.
"""
epsilons = []
objectives = []
soft_constraints = [[] for ensemble_member in range(self.ensemble_size)]
hard_constraints = [[] for ensemble_member in range(self.ensemble_size)]
extra_constants = []
eps_format = "eps_{}_{}"
min_format = "min_{}_{}"
max_format = "max_{}_{}"
if is_path_goal:
eps_format = "path_" + eps_format
min_format = "path_" + min_format
max_format = "path_" + max_format
for j, goal in enumerate(goals):
if goal.critical:
assert goal.size == 1, "Critical goals cannot be vector goals"
epsilon = np.zeros(len(self.times()) if is_path_goal else 1)
elif goal.has_target_bounds:
epsilon = ca.MX.sym(eps_format.format(sym_index, j), goal.size)
epsilons.append(epsilon)
# Make symbols for the target bounds (if set)
if goal.has_target_min:
min_variable = min_format.format(sym_index, j)
# NOTE: When using a vector goal, we want to be sure that its constraints
# and objective end up _exactly_ equal to its non-vector equivalent. We
# therefore have to get rid of any superfluous/trivial constraints that
# would otherwise be generated by the vector goal.
target_min_slice_inds = np.full(goal.size, True)
if isinstance(goal.target_min, Timeseries):
target_min = Timeseries(goal.target_min.times, goal.target_min.values)
inds = np.logical_or(np.isnan(target_min.values), np.isneginf(target_min.values))
target_min.values[inds] = -sys.float_info.max
n_times = len(goal.target_min.times)
target_min_slice_inds = ~np.all(np.broadcast_to(inds.transpose(), (goal.size, n_times)), axis=1)
elif isinstance(goal.target_min, np.ndarray):
target_min = goal.target_min.copy()
inds = np.logical_or(np.isnan(target_min), np.isneginf(target_min))
target_min[inds] = -sys.float_info.max
target_min_slice_inds = ~inds
else:
target_min = goal.target_min
extra_constants.append((min_variable, target_min))
else:
min_variable = None
if goal.has_target_max:
max_variable = max_format.format(sym_index, j)
target_max_slice_inds = np.full(goal.size, True)
if isinstance(goal.target_max, Timeseries):
target_max = Timeseries(goal.target_max.times, goal.target_max.values)
inds = np.logical_or(np.isnan(target_max.values), np.isposinf(target_max.values))
target_max.values[inds] = sys.float_info.max
n_times = len(goal.target_max.times)
target_max_slice_inds = ~np.all(np.broadcast_to(inds.transpose(), (goal.size, n_times)), axis=1)
elif isinstance(goal.target_max, np.ndarray):
target_max = goal.target_max.copy()
inds = np.logical_or(np.isnan(target_max), np.isposinf(target_max))
target_max[inds] = sys.float_info.max
target_max_slice_inds = ~inds
else:
target_max = goal.target_max
extra_constants.append((max_variable, target_max))
else:
max_variable = None
# Make objective for soft constraints and minimization goals
if not goal.critical:
if hasattr(goal, '_objective_func'):
_objective_func = goal._objective_func
elif goal.has_target_bounds:
if is_path_goal and options['scale_by_problem_size']:
goal_m, goal_M = self._gp_min_max_arrays(goal, target_shape=len(self.times()))
goal_active = np.isfinite(goal_m) | np.isfinite(goal_M)
n_active = np.sum(goal_active.astype(int), axis=-1)
# Avoid possible division by zero if goal is inactive
n_active = np.maximum(n_active, 1)
else:
n_active = 1
def _objective_func(problem, ensemble_member,
goal=goal, epsilon=epsilon, is_path_goal=is_path_goal,
n_active=n_active):
if is_path_goal:
epsilon = problem.variable(epsilon.name())
else:
epsilon = problem.extra_variable(epsilon.name(), ensemble_member)
return goal.weight * ca.constpow(epsilon, goal.order) / n_active
else:
if is_path_goal and options['scale_by_problem_size']:
n_active = len(self.times())
else:
n_active = 1
def _objective_func(problem, ensemble_member, goal=goal, is_path_goal=is_path_goal,
n_active=n_active):
f = goal.function(problem, ensemble_member) / goal.function_nominal
return goal.weight * ca.constpow(f, goal.order) / n_active
objectives.append(_objective_func)
# Make constraints for goals with target bounds
if goal.has_target_bounds:
if goal.critical:
for ensemble_member in range(self.ensemble_size):
constraint = self._gp_goal_hard_constraint(
goal, epsilon, None, ensemble_member, options, is_path_goal)
hard_constraints[ensemble_member].append(constraint)
else:
for ensemble_member in range(self.ensemble_size):
# We use a violation variable formulation, with the violation
# variables epsilon bounded between 0 and 1.
def _soft_constraint_func(problem, target, bound, inds,
goal=goal, epsilon=epsilon, ensemble_member=ensemble_member,
is_path_constraint=is_path_goal):
if is_path_constraint:
target = problem.variable(target)
eps = problem.variable(epsilon.name())
else:
target = problem.parameters(ensemble_member)[target]
eps = problem.extra_variable(epsilon.name(), ensemble_member)
inds = inds.nonzero()[0].astype(int).tolist()
f = goal.function(problem, ensemble_member)
nominal = goal.function_nominal
return ca.if_else(ca.fabs(target) < sys.float_info.max,
(f - eps * (bound - target) - target) / nominal,
0.0)[inds]
if goal.has_target_min and np.any(target_min_slice_inds):
_f = functools.partial(
_soft_constraint_func,
target=min_variable,
bound=goal.function_range[0],
inds=target_min_slice_inds)
constraint = _GoalConstraint(goal, _f, 0.0, np.inf, False)
soft_constraints[ensemble_member].append(constraint)
if goal.has_target_max and np.any(target_max_slice_inds):
_f = functools.partial(
_soft_constraint_func,
target=max_variable,
bound=goal.function_range[1],
inds=target_max_slice_inds)
constraint = _GoalConstraint(goal, _f, -np.inf, 0.0, False)
soft_constraints[ensemble_member].append(constraint)
return epsilons, objectives, soft_constraints, hard_constraints, extra_constants
def _gp_goal_hard_constraint(self, goal, epsilon, existing_constraint, ensemble_member, options, is_path_goal):
if not is_path_goal:
epsilon = epsilon[:1]
goal_m, goal_M = self._gp_min_max_arrays(goal, target_shape=epsilon.shape[0])
if goal.has_target_bounds:
# We use a violation variable formulation, with the violation
# variables epsilon bounded between 0 and 1.
m, M = np.full_like(epsilon, -np.inf, dtype=np.float64), np.full_like(epsilon, np.inf, dtype=np.float64)
# A function range does not have to be specified for critical
# goals. Avoid multiplying with NaN in that case.
if goal.has_target_min:
m = (epsilon * ((goal.function_range[0] - goal_m) if not goal.critical else 0.0)
+ goal_m - goal.relaxation) / goal.function_nominal
if goal.has_target_max:
M = (epsilon * ((goal.function_range[1] - goal_M) if not goal.critical else 0.0)
+ goal_M + goal.relaxation) / goal.function_nominal
if goal.has_target_min and goal.has_target_max:
# Avoid comparing with NaN
inds = ~(np.isnan(m) | np.isnan(M))
inds[inds] &= np.abs(m[inds] - M[inds]) < options['equality_threshold']
if np.any(inds):
avg = 0.5 * (m + M)
m[inds] = M[inds] = avg[inds]
m[~np.isfinite(goal_m)] = -np.inf
M[~np.isfinite(goal_M)] = np.inf
inds = epsilon > options['violation_tolerance']
if np.any(inds):
if is_path_goal:
expr = self.map_path_expression(goal.function(self, ensemble_member), ensemble_member)
else:
expr = goal.function(self, ensemble_member)
function = ca.Function('f', [self.solver_input], [expr])
value = np.array(function(self.solver_output))
m[inds] = (value - goal.relaxation) / goal.function_nominal
M[inds] = (value + goal.relaxation) / goal.function_nominal
m -= options['constraint_relaxation']
M += options['constraint_relaxation']
else:
# Epsilon encodes the position within the function range.
if options['fix_minimized_values'] and goal.relaxation == 0.0:
m = epsilon / goal.function_nominal
M = epsilon / goal.function_nominal
self.check_collocation_linearity = False
self.linear_collocation = False
else:
m = -np.inf * np.ones(epsilon.shape)
M = (epsilon + goal.relaxation) / goal.function_nominal + options['constraint_relaxation']
if is_path_goal:
m = Timeseries(self.times(), m)
M = Timeseries(self.times(), M)
else:
m = m[0]
M = M[0]
constraint = _GoalConstraint(
goal,
lambda problem, ensemble_member=ensemble_member, goal=goal: (
goal.function(problem, ensemble_member) / goal.function_nominal),
m, M, True)
# Epsilon is fixed. Override previous {min,max} constraints for this
# state.
if existing_constraint:
constraint.update_bounds(existing_constraint, enforce='other')
return constraint
def _gp_update_constraint_store(self, constraint_store, constraints):
for ensemble_member in range(self.ensemble_size):
for other in constraints[ensemble_member]:
fk = other.goal.get_function_key(self, ensemble_member)
try:
constraint_store[ensemble_member][fk].update_bounds(other)
except KeyError:
constraint_store[ensemble_member][fk] = other
def priority_started(self, priority: int) -> None:
"""
Called when optimization for goals of certain priority is started.
:param priority: The priority level that was started.
"""
pass
def priority_completed(self, priority: int) -> None:
"""
Called after optimization for goals of certain priority is completed.
:param priority: The priority level that was completed.
"""
pass | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/goal_programming_mixin_base.py | 0.836388 | 0.550003 | goal_programming_mixin_base.py | pypi |
from typing import List, Tuple, Union
from .goal_programming_mixin import Goal, GoalProgrammingMixin
class _MeasurementGoal(Goal):
def __init__(self, state, measurement_id, max_deviation=1.0):
self.__state = state
self.__measurement_id = measurement_id
self.function_nominal = max_deviation
def function(self, optimization_problem, ensemble_member):
op = optimization_problem
return (
op.state_at(self.__state, op.initial_time, ensemble_member) -
op.timeseries_at(self.__measurement_id, op.initial_time, ensemble_member))
order = 2
priority = -2
class _SmoothingGoal(Goal):
def __init__(self, state1, state2, max_deviation=1.0):
self.__state1 = state1
self.__state2 = state2
self.function_nominal = max_deviation
def function(self, optimization_problem, ensemble_member):
op = optimization_problem
return (
op.state_at(self.__state1, op.initial_time, ensemble_member) -
op.state_at(self.__state2, op.initial_time, ensemble_member))
order = 2
priority = -1
class InitialStateEstimationMixin(GoalProgrammingMixin):
"""
Adds initial state estimation to your optimization problem *using goal programming*.
Before any other goals are evaluated, first, the deviation between initial
state measurements and their respective model states is minimized in the
least squares sense (1DVAR, priority -2). Secondly, the distance between
pairs of states is minimized, again in the least squares sense, so that
"smooth" initial guesses are provided for states without measurements
(priority -1).
.. note::
There are types of problems where, in addition to minimizing
differences between states and measurements, it is advisable to
perform a steady-state initialization using additional initial-time
model equations. For hydraulic models, for instance, it is often
helpful to require that the time-derivative of the flow variables
vanishes at the initial time.
"""
def initial_state_measurements(self) -> List[Union[Tuple[str, str], Tuple[str, str, float]]]:
"""
List of pairs ``(state, measurement_id)`` or triples ``(state, measurement_id, max_deviation)``,
relating states to measurement time series IDs.
The default maximum deviation is ``1.0``.
"""
return []
def initial_state_smoothing_pairs(self) -> List[Union[Tuple[str, str], Tuple[str, str, float]]]:
"""
List of pairs ``(state1, state2)`` or triples ``(state1, state2, max_deviation)``, relating
states the distance of which is to be minimized.
The default maximum deviation is ``1.0``.
"""
return []
def goals(self):
g = super().goals()
for measurement in self.initial_state_measurements():
g.append(_MeasurementGoal(*measurement))
for smoothing_pair in self.initial_state_smoothing_pairs():
g.append(_SmoothingGoal(*smoothing_pair))
return g | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/initial_state_estimation_mixin.py | 0.949541 | 0.667324 | initial_state_estimation_mixin.py | pypi |
import itertools
import logging
from typing import Dict, Union
import casadi as ca
import numpy as np
import pkg_resources
import pymoca
import pymoca.backends.casadi.api
from rtctools._internal.alias_tools import AliasDict
from rtctools._internal.caching import cached
from rtctools._internal.casadi_helpers import substitute_in_external
from .optimization_problem import OptimizationProblem
from .timeseries import Timeseries
logger = logging.getLogger("rtctools")
class ModelicaMixin(OptimizationProblem):
"""
Adds a `Modelica <http://www.modelica.org/>`_ model to your optimization problem.
During preprocessing, the Modelica files located inside the ``model`` subfolder are loaded.
:cvar modelica_library_folders:
Folders in which any referenced Modelica libraries are to be found. Default is an empty list.
"""
# Folders in which the referenced Modelica libraries are found
modelica_library_folders = []
def __init__(self, **kwargs):
# Check arguments
assert ('model_folder' in kwargs)
# Log pymoca version
logger.debug("Using pymoca {}.".format(pymoca.__version__))
# Transfer model from the Modelica .mo file to CasADi using pymoca
if 'model_name' in kwargs:
model_name = kwargs['model_name']
else:
if hasattr(self, 'model_name'):
model_name = self.model_name
else:
model_name = self.__class__.__name__
self.__pymoca_model = pymoca.backends.casadi.api.transfer_model(
kwargs['model_folder'], model_name, self.compiler_options())
# Extract the CasADi MX variables used in the model
self.__mx = {}
self.__mx['time'] = [self.__pymoca_model.time]
self.__mx['states'] = [v.symbol for v in self.__pymoca_model.states]
self.__mx['derivatives'] = [v.symbol for v in self.__pymoca_model.der_states]
self.__mx['algebraics'] = [v.symbol for v in self.__pymoca_model.alg_states]
self.__mx['parameters'] = [v.symbol for v in self.__pymoca_model.parameters]
self.__mx['string_parameters'] = [v.name for v in (*self.__pymoca_model.string_parameters,
*self.__pymoca_model.string_constants)]
self.__mx['control_inputs'] = []
self.__mx['constant_inputs'] = []
self.__mx['lookup_tables'] = []
# Merge with user-specified delayed feedback
for v in self.__pymoca_model.inputs:
if v.symbol.name() in self.__pymoca_model.delay_states:
# Delayed feedback variables are local to each ensemble, and
# therefore belong to the collection of algebraic variables,
# rather than to the control inputs.
self.__mx['algebraics'].append(v.symbol)
else:
if v.symbol.name() in kwargs.get('lookup_tables', []):
self.__mx['lookup_tables'].append(v.symbol)
elif v.fixed:
self.__mx['constant_inputs'].append(v.symbol)
else:
self.__mx['control_inputs'].append(v.symbol)
# Initialize nominals and types
# These are not in @cached dictionary properties for backwards compatibility.
self.__python_types = AliasDict(self.alias_relation)
for v in itertools.chain(
self.__pymoca_model.states, self.__pymoca_model.alg_states, self.__pymoca_model.inputs):
self.__python_types[v.symbol.name()] = v.python_type
# Initialize dae, initial residuals, as well as delay arguments
# These are not in @cached dictionary properties so that we need to create the list
# of function arguments only once.
variable_lists = ['states', 'der_states', 'alg_states', 'inputs', 'constants', 'parameters']
function_arguments = [self.__pymoca_model.time] + [
ca.veccat(*[v.symbol for v in getattr(self.__pymoca_model, variable_list)])
for variable_list in variable_lists]
self.__dae_residual = self.__pymoca_model.dae_residual_function(*function_arguments)
if self.__dae_residual is None:
self.__dae_residual = ca.MX()
self.__initial_residual = self.__pymoca_model.initial_residual_function(*function_arguments)
if self.__initial_residual is None:
self.__initial_residual = ca.MX()
# Log variables in debug mode
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("ModelicaMixin: Found states {}".format(
', '.join([var.name() for var in self.__mx['states']])))
logger.debug("ModelicaMixin: Found derivatives {}".format(
', '.join([var.name() for var in self.__mx['derivatives']])))
logger.debug("ModelicaMixin: Found algebraics {}".format(
', '.join([var.name() for var in self.__mx['algebraics']])))
logger.debug("ModelicaMixin: Found control inputs {}".format(
', '.join([var.name() for var in self.__mx['control_inputs']])))
logger.debug("ModelicaMixin: Found constant inputs {}".format(
', '.join([var.name() for var in self.__mx['constant_inputs']])))
logger.debug("ModelicaMixin: Found parameters {}".format(
', '.join([var.name() for var in self.__mx['parameters']])))
# Call parent class first for default behaviour.
super().__init__(**kwargs)
@cached
def compiler_options(self) -> Dict[str, Union[str, bool]]:
"""
Subclasses can configure the `pymoca <http://github.com/pymoca/pymoca>`_ compiler options here.
:returns: A dictionary of pymoca compiler options. See the pymoca documentation for details.
"""
# Default options
compiler_options = {}
# Expand vector states to multiple scalar component states.
compiler_options['expand_vectors'] = True
# Where imported model libraries are located.
library_folders = self.modelica_library_folders.copy()
for ep in pkg_resources.iter_entry_points(group='rtctools.libraries.modelica'):
if ep.name == "library_folder":
library_folders.append(
pkg_resources.resource_filename(ep.module_name, ep.attrs[0]))
compiler_options['library_folders'] = library_folders
# Eliminate equations of the type 'var = const'.
compiler_options['eliminate_constant_assignments'] = True
# Eliminate constant symbols from model, replacing them with the values
# specified in the model.
compiler_options['replace_constant_values'] = True
# Replace any constant expressions into the model.
compiler_options['replace_constant_expressions'] = True
# Replace any parameter expressions into the model.
compiler_options['replace_parameter_expressions'] = True
# Eliminate variables starting with underscores.
compiler_options['eliminable_variable_expression'] = r'(.*[.]|^)_\w+(\[[\d,]+\])?\Z'
# Pymoca currently requires `expand_mx` to be set for
# `eliminable_variable_expression` to work.
compiler_options['expand_mx'] = True
# Automatically detect and eliminate alias variables.
compiler_options['detect_aliases'] = True
# Disallow aliasing to derivative states
compiler_options['allow_derivative_aliases'] = False
# Cache the model on disk
compiler_options['cache'] = True
# Done
return compiler_options
def delayed_feedback(self):
delayed_feedback = super().delayed_feedback()
# Create delayed feedback
for delay_state, delay_argument in zip(self.__pymoca_model.delay_states, self.__pymoca_model.delay_arguments):
delayed_feedback.append(
(delay_argument.expr, delay_state, delay_argument.duration))
return delayed_feedback
@property
def dae_residual(self):
return self.__dae_residual
@property
def dae_variables(self):
return self.__mx
@property
@cached
def output_variables(self):
output_variables = [ca.MX.sym(variable)
for variable in self.__pymoca_model.outputs]
output_variables.extend(self.__mx['control_inputs'])
return output_variables
@cached
def parameters(self, ensemble_member):
# Call parent class first for default values.
parameters = super().parameters(ensemble_member)
# Return parameter values from pymoca model
parameters.update({v.symbol.name(): v.value for v in self.__pymoca_model.parameters})
# Done
return parameters
@cached
def string_parameters(self, ensemble_member):
# Call parent class first for default values.
parameters = super().string_parameters(ensemble_member)
# Return parameter values from pymoca model
parameters.update({v.name: v.value for v in self.__pymoca_model.string_parameters})
parameters.update({v.name: v.value for v in self.__pymoca_model.string_constants})
# Done
return parameters
@cached
def history(self, ensemble_member):
history = super().history(ensemble_member)
initial_time = np.array([self.initial_time])
# Parameter values
parameters = self.parameters(ensemble_member)
parameter_values = [parameters.get(param.name(), param) for param in self.__mx['parameters']]
# Initial conditions obtained from start attributes.
for v in self.__pymoca_model.states:
if v.fixed:
sym_name = v.symbol.name()
start = v.start
if isinstance(start, ca.MX):
# If start contains symbolics, try substituting parameter values
if isinstance(start, ca.MX) and not start.is_constant():
[start] = substitute_in_external([start], self.__mx['parameters'], parameter_values)
if not start.is_constant() or np.isnan(float(start)):
raise Exception('ModelicaMixin: Could not resolve initial value for {}'.format(sym_name))
start = v.python_type(start)
history[sym_name] = Timeseries(initial_time, start)
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("ModelicaMixin: Initial state variable {} = {}".format(sym_name, start))
return history
@property
def initial_residual(self):
return self.__initial_residual
@cached
def bounds(self):
# Call parent class first for default values.
bounds = super().bounds()
# Parameter values
parameters = self.parameters(0)
parameter_values = [parameters.get(param.name(), param) for param in self.__mx['parameters']]
# Load additional bounds from model
for v in itertools.chain(
self.__pymoca_model.states, self.__pymoca_model.alg_states, self.__pymoca_model.inputs):
sym_name = v.symbol.name()
try:
(m, M) = bounds[sym_name]
except KeyError:
if self.__python_types.get(sym_name, float) == bool:
(m, M) = (0, 1)
else:
(m, M) = (-np.inf, np.inf)
m_ = v.min
if isinstance(m_, ca.MX) and not m_.is_constant():
[m_] = substitute_in_external([m_], self.__mx['parameters'], parameter_values)
if not m_.is_constant() or np.isnan(float(m_)):
raise Exception('Could not resolve lower bound for variable {}'.format(sym_name))
m_ = float(m_)
M_ = v.max
if isinstance(M_, ca.MX) and not M_.is_constant():
[M_] = substitute_in_external([M_], self.__mx['parameters'], parameter_values)
if not M_.is_constant() or np.isnan(float(M_)):
raise Exception('Could not resolve upper bound for variable {}'.format(sym_name))
M_ = float(M_)
# We take the intersection of all provided bounds
m = max(m, m_)
M = min(M, M_)
bounds[sym_name] = (m, M)
return bounds
@cached
def seed(self, ensemble_member):
# Call parent class first for default values.
seed = super().seed(ensemble_member)
# Parameter values
parameters = self.parameters(ensemble_member)
parameter_values = [parameters.get(param.name(), param) for param in self.__mx['parameters']]
# Load seeds
for var in itertools.chain(self.__pymoca_model.states, self.__pymoca_model.alg_states):
if var.fixed:
# Values will be set from import timeseries
continue
start = var.start
if isinstance(start, ca.MX) or start != 0.0:
sym_name = var.symbol.name()
# If start contains symbolics, try substituting parameter values
if isinstance(start, ca.MX) and not start.is_constant():
[start] = substitute_in_external([start], self.__mx['parameters'], parameter_values)
if not start.is_constant() or np.isnan(float(start)):
logger.error('ModelicaMixin: Could not resolve seed value for {}'.format(sym_name))
continue
times = self.times(sym_name)
start = var.python_type(start)
s = Timeseries(times, np.full_like(times, start))
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("ModelicaMixin: Seeded variable {} = {}".format(sym_name, start))
seed[sym_name] = s
return seed
def variable_is_discrete(self, variable):
return self.__python_types.get(variable, float) != float
@property
@cached
def alias_relation(self):
return self.__pymoca_model.alias_relation
@property
@cached
def __nominals(self):
# Make the dict
nominal_dict = AliasDict(self.alias_relation)
# Grab parameters and their values
parameters = self.parameters(0)
parameter_values = [parameters.get(param.name(), param) for param in self.__mx['parameters']]
# Iterate over nominalizable states
for v in itertools.chain(
self.__pymoca_model.states, self.__pymoca_model.alg_states, self.__pymoca_model.inputs):
sym_name = v.symbol.name()
nominal = v.nominal
# If nominal contains parameter symbols, substitute them
if isinstance(nominal, ca.MX) and not nominal.is_constant():
[nominal] = substitute_in_external([nominal], self.__mx['parameters'], parameter_values)
if not nominal.is_constant() or np.isnan(float(nominal)):
logger.error('ModelicaMixin: Could not resolve nominal value for {}'.format(sym_name))
continue
nominal = float(nominal)
if not np.isnan(nominal):
# Take absolute value (nominal sign is meaningless- a nominal is a magnitude)
nominal = abs(nominal)
# If nominal is 0 or 1, we just use the default (1.0)
if nominal in (0.0, 1.0):
continue
nominal_dict[sym_name] = nominal
if logger.getEffectiveLevel() == logging.DEBUG:
logger.debug("ModelicaMixin: Set nominal value for variable {} to {}".format(
sym_name, nominal))
else:
logger.warning("ModelicaMixin: Could not set nominal value for {}".format(sym_name))
return nominal_dict
def variable_nominal(self, variable):
try:
return self.__nominals[variable]
except KeyError:
return super().variable_nominal(variable) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/optimization/modelica_mixin.py | 0.777722 | 0.275483 | modelica_mixin.py | pypi |
from collections.abc import MutableSet
from typing import Generic, Iterator, Mapping, Tuple, TypeVar
from pymoca.backends.casadi.alias_relation import AliasRelation # noqa: F401
class OrderedSet(MutableSet):
"""
Adapted from https://code.activestate.com/recipes/576694/
with some additional methods:
__getstate__, __setstate__, __getitem__
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def __getstate__(self):
""" Avoids max depth RecursionError when using pickle """
return list(self)
def __setstate__(self, state):
""" Tells pickle how to restore instance """
self.__init__(state)
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, stride = index.indices(len(self))
return [self.__getitem__(i) for i in range(start, stop, stride)]
else:
end = self.end
curr = end[2]
i = 0
while curr is not end:
if i == index:
return curr[0]
curr = curr[2]
i += 1
raise IndexError('set index {} out of range with length {}'.format(index, len(self)))
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# End snippet
KT = TypeVar('KT')
VT = TypeVar('VT')
class AliasDict(Generic[KT, VT]):
def __init__(self, relation, other=None, signed_values=True):
self.__relation = relation
self.__d = {}
self.__signed_values = signed_values
if other:
self.update(other)
def __canonical_signed(self, key: KT):
var, sign = self.__relation.canonical_signed(key)
if self.__signed_values:
return var, sign
else:
return var, 1
def __setitem__(self, key: KT, val: VT):
var, sign = self.__canonical_signed(key)
if isinstance(val, tuple):
assert len(val) == 2
if sign < 0:
self.__d[var] = (-val[1], -val[0])
else:
self.__d[var] = val
elif isinstance(val, list) and sign < 0:
self.__d[var] = [-x for x in val]
else:
self.__d[var] = -val if sign < 0 else val
def __getitem__(self, key: KT):
var, sign = self.__canonical_signed(key)
val = self.__d[var]
if isinstance(val, tuple):
if sign < 0:
return (-val[1], -val[0])
else:
return val
elif isinstance(val, list) and sign < 0:
return [-x for x in val]
else:
return -val if sign < 0 else val
def __delitem__(self, key: KT):
var, sign = self.__canonical_signed(key)
del self.__d[var]
def __contains__(self, key: KT):
var, sign = self.__canonical_signed(key)
return var in self.__d
def __len__(self) -> int:
return len(self.__d)
def __iter__(self) -> Iterator[KT]:
return iter(self.__d)
def update(self, other: Mapping[KT, VT]):
for key, value in other.items():
self[key] = value
def get(self, key: KT, default: VT = None):
if key in self:
return self[key]
else:
return default
def setdefault(self, key: KT, default: VT):
if key in self:
return self[key]
else:
self[key] = default
return default
def keys(self) -> Iterator[KT]:
return self.__d.keys()
def values(self) -> Iterator[VT]:
return self.__d.values()
def items(self) -> Iterator[Tuple[KT, VT]]:
return self.__d.items()
def copy(self):
copy = AliasDict(self.__relation, None, self.__signed_values)
copy.__d = self.__d.copy()
return copy
def __repr__(self):
return self.__d.__repr__() | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/_internal/alias_tools.py | 0.815049 | 0.283769 | alias_tools.py | pypi |
import logging
import casadi as ca
logger = logging.getLogger("rtctools")
try:
from casadi import interp1d
except ImportError:
logger.warning('interp1d not available in this version of CasADi. Linear interpolation will not work.')
interp1d = None
def is_affine(e, v):
try:
Af = ca.Function('f', [v], [ca.jacobian(e, v)]).expand()
except RuntimeError as e:
if "'eval_sx' not defined for" in str(e):
Af = ca.Function('f', [v], [ca.jacobian(e, v)])
else:
raise
return (Af.sparsity_jac(0, 0).nnz() == 0)
def nullvertcat(*L):
"""
Like vertcat, but creates an MX with consistent dimensions even if L is empty.
"""
if len(L) == 0:
return ca.DM(0, 1)
else:
return ca.vertcat(*L)
def reduce_matvec(e, v):
"""
Reduces the MX graph e of linear operations on p into a matrix-vector product.
This reduces the number of nodes required to represent the linear operations.
"""
Af = ca.Function('Af', [ca.MX()], [ca.jacobian(e, v)])
A = Af(ca.DM())
return ca.reshape(ca.mtimes(A, v), e.shape)
def substitute_in_external(expr, symbols, values):
if len(symbols) == 0 or all(isinstance(x, ca.DM) for x in expr):
return expr
else:
f = ca.Function('f', symbols, expr)
return f.call(values, True, False)
def interpolate(ts, xs, t, equidistant, mode=0):
if interp1d is not None:
if mode == 0:
mode_str = 'linear'
elif mode == 1:
mode_str = 'floor'
else:
mode_str = 'ceil'
return interp1d(ts, xs, t, mode_str, equidistant)
else:
if mode == 1:
xs = xs[:-1] # block-forward
else:
xs = xs[1:] # block-backward
t = ca.MX(t)
if t.size1() > 1:
t_ = ca.MX.sym('t')
xs_ = ca.MX.sym('xs', xs.size1())
f = ca.Function('interpolant', [t_, xs_], [ca.mtimes(ca.transpose((t_ >= ts[:-1]) * (t_ < ts[1:])), xs_)])
f = f.map(t.size1(), 'serial')
return ca.transpose(f(ca.transpose(t), ca.repmat(xs, 1, t.size1())))
else:
return ca.mtimes(ca.transpose((t >= ts[:-1]) * (t < ts[1:])), xs) | /rtc_tools-2.5.2a1-py3-none-any.whl/rtctools/_internal/casadi_helpers.py | 0.443359 | 0.339636 | casadi_helpers.py | pypi |
from aiohttp import web
routes = web.RouteTableDef()
from rtcbot import RTCConnection, getRTCBotJS, CVCamera
camera = CVCamera()
# For this example, we use just one global connection
conn = RTCConnection()
conn.video.putSubscription(camera)
import time
import random
import asyncio
from rtcbot.base import ThreadedSubscriptionProducer
def get_sensor_data():
time.sleep(0.5) # Represents an operation that takes half a second to complete
return random.random()
class MySensor(ThreadedSubscriptionProducer):
def _producer(self):
self._setReady(True) # Notify that ready to start gathering data
while not self._shouldClose: # Keep gathering until close is requested
time.sleep(1)
data = get_sensor_data()
# Send the data to the asyncio thread,
# so it can be retrieved with await mysensor.get()
self._put_nowait(data)
self._setReady(False) # Notify that sensor is no longer operational
mysensor = MySensor()
async def send_sensor_data():
while True:
data = await mysensor.get() # we await the output of MySensor in a loop
conn.put_nowait(data)
asyncio.ensure_future(send_sensor_data())
# Serve the RTCBot javascript library at /rtcbot.js
@routes.get("/rtcbot.js")
async def rtcbotjs(request):
return web.Response(content_type="application/javascript", text=getRTCBotJS())
# This sets up the connection
@routes.post("/connect")
async def connect(request):
clientOffer = await request.json()
serverResponse = await conn.getLocalDescription(clientOffer)
return web.json_response(serverResponse)
@routes.get("/")
async def index(request):
return web.Response(
content_type="text/html",
text="""
<html>
<head>
<title>RTCBot: Video</title>
<script src="/rtcbot.js"></script>
</head>
<body style="text-align: center;padding-top: 30px;">
<video autoplay playsinline controls></video> <audio autoplay></audio>
<p>
Open the browser's developer tools to see console messages (CTRL+SHIFT+C)
</p>
<script>
var conn = new rtcbot.RTCConnection();
conn.video.subscribe(function(stream) {
document.querySelector("video").srcObject = stream;
});
conn.subscribe(m => console.log("Received from python:", m));
async function connect() {
let offer = await conn.getLocalDescription();
// POST the information to /connect
let response = await fetch("/connect", {
method: "POST",
cache: "no-cache",
body: JSON.stringify(offer)
});
await conn.setRemoteDescription(await response.json());
console.log("Ready!");
}
connect();
</script>
</body>
</html>
""",
)
async def cleanup(app=None):
await conn.close()
camera.close()
mysensor.close()
conn.onClose(cleanup)
app = web.Application()
app.add_routes(routes)
app.on_shutdown.append(cleanup)
web.run_app(app) | /rtcbot-0.2.5.tar.gz/rtcbot-0.2.5/examples/threads/threaded_sensor.py | 0.428712 | 0.152663 | threaded_sensor.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.