edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
import os import sys from enum import Enum, auto from importlib.metadata import PackageNotFoundError, metadata from pathlib import Path from tempfile import gettempdir from typing import Callable, Dict, List, Optional, Sequence, Tuple from npe2 import PackageMetadata, PluginManager from qtpy.QtCore import ( QEvent, QObject, QPoint, QProcess, QProcessEnvironment, QSize, Qt, Signal, Slot, ) from qtpy.QtGui import QFont, QMovie from qtpy.QtWidgets import ( QCheckBox, QDialog, QFrame, QHBoxLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QSizePolicy, QSplitter, QTextEdit, QVBoxLayout, QWidget, ) from superqt import QElidingLabel from typing_extensions import Literal import napari.resources from ...plugins import plugin_manager from ...plugins.hub import iter_hub_plugin_info from ...plugins.pypi import iter_napari_plugin_info from ...plugins.utils import normalized_name from ...settings import get_settings from ...utils._appdirs import user_plugin_dir, user_site_packages from ...utils.misc import ( parse_version, running_as_bundled_app, running_as_constructor_app, ) from ...utils.translations import trans from ..qt_resources import QColoredSVGIcon from ..qthreading import create_worker from ..widgets.qt_message_popup import WarnPopup from ..widgets.qt_tooltip import QtToolTipLabel InstallerTypes = Literal['pip', 'mamba'] # TODO: add error icon and handle pip install errors class Installer(QObject): started = Signal() finished = Signal(int) def __init__( self, output_widget: QTextEdit = None, installer: InstallerTypes = "pip", ): super().__init__() self._queue: List[Tuple[Tuple[str, ...], Callable[[], QProcess]]] = [] self._processes: Dict[Tuple[str, ...], QProcess] = {} self._exit_code = 0 self._conda_env_path = None self._installer_type = installer if installer != "pip" and (Path(sys.prefix) / "conda-meta").is_dir(): self._conda_env_path = sys.prefix # create install process self._output_widget = output_widget self.process = None def _create_process( self, installer: InstallerTypes = "pip", ): process = QProcess() process.setProcessChannelMode(QProcess.MergedChannels) process.readyReadStandardOutput.connect( lambda process=process: self._on_stdout_ready(process) ) env = QProcessEnvironment.systemEnvironment() if installer == "pip": process.setProgram(self._sys_executable_or_bundled_python()) # patch process path combined_paths = os.pathsep.join( [ user_site_packages(), env.systemEnvironment().value("PYTHONPATH"), ] ) env.insert("PYTHONPATH", combined_paths) else: process.setProgram(installer) if installer == "mamba": from ..._version import version_tuple # To avoid napari version changing when installing a plugin, we # add a pin to the current napari version, that way we can # restrict any changes to the actual napari application. # Conda/mamba also pin python by default, so we effectively # constrain python and napari versions from changing, when # installing plugins inside the constructor bundled application. # See: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-pkgs.html#preventing-packages-from-updating-pinning napari_version = ".".join(str(v) for v in version_tuple[:3]) if env.contains("CONDA_PINNED_PACKAGES"): # str delimiter is '&' system_pins = f"&{env.value("CONDA_PINNED_PACKAGES")}" else: system_pins = "" env.insert( "CONDA_PINNED_PACKAGES", f"napari={napari_version}{system_pins}", ) if os.name == "nt": # workaround https://github.com/napari/napari/issues/4247, 4484 if not env.contains("TEMP"): temp = gettempdir() env.insert("TMP", temp) env.insert("TEMP", temp) if not env.contains("USERPROFILE"): env.insert("HOME", os.path.expanduser("~")) env.insert("USERPROFILE", os.path.expanduser("~")) process.setProcessEnvironment(env) self.set_output_widget(self._output_widget) process.finished.connect( lambda ec, es: self._on_process_finished(process, ec, es) ) # FIXME connecting lambda to finished signal is bug creating and may end with segfault when garbage # collection will consume Installer object before process end. return process def _sys_executable_or_bundled_python(self): # Note: is_bundled_app() returns False even if using a Briefcase bundle... # Workaround: see if sys.executable is set to something something napari on Mac if sys.executable.endswith("napari") and sys.platform == 'darwin': # sys.prefix should be <napari.app>/Contents/Resources/Support/Python/Resources python = os.path.join(sys.prefix, "bin", "python3") if os.path.isfile(python): return python return sys.executable def set_output_widget(self, output_widget: QTextEdit): if output_widget: self._output_widget = output_widget def _on_process_finished(self, process, exit_code, exit_status): if exit_code != 0: self._exit_code = 0 process_to_terminate = [] for pkg_list, proc in self._processes.items(): if proc == process: process_to_terminate.append(pkg_list) for pkg_list in process_to_terminate: process = self._processes.pop(pkg_list) process.terminate() self._handle_action() def _on_stdout_ready(self, process): if self._output_widget: text = process.readAllStandardOutput().data().decode() self._output_widget.append(text) def _handle_action(self): if self._queue: pkg_list, func = self._queue.pop() self.started.emit() process = func() self._processes[pkg_list] = process if not self._processes: from ...plugins import plugin_manager plugin_manager.discover() plugin_manager.prune() self.finished.emit(self._exit_code) def install( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type self._queue.insert( 0, [ tuple(pkg_list), lambda: self._install(pkg_list, installer, channels), ], ) self._handle_action() def _install( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type process = self._create_process(installer) if installer != "pip": cmd = [ 'install', '-y', '--prefix', self._conda_env_path, ] for channel in channels: cmd.extend(["-c", channel]) else: cmd = ['-m', 'pip', 'install', '--upgrade'] if ( running_as_bundled_app() and sys.platform.startswith('linux') and not self._conda_env_path ): cmd += [ '--no-warn-script-location', '--prefix', user_plugin_dir(), ] process.setArguments(cmd + list(pkg_list)) if self._output_widget and self._queue: self._output_widget.clear() process.start() return process def uninstall( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type self._queue.insert( 0, [ tuple(pkg_list), lambda: self._uninstall(pkg_list, installer, channels), ], ) self._handle_action() def _uninstall( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type if installer != "pip": args = [ 'remove', '-y', '--prefix', self._conda_env_path, ] for channel in channels: args.extend(["-c", channel]) else: args = ['-m', 'pip', 'uninstall', '-y'] process = self._create_process(installer) process.setArguments(args + list(pkg_list)) if self._output_widget and self._queue: self._output_widget.clear() process.start() for pkg in pkg_list: plugin_manager.unregister(pkg) return process def cancel( self, pkg_list: Sequence[str] = None, ): if pkg_list is None: for _, process in self._processes.items(): process.terminate() self._processes = {} else: try: process = self._processes.pop(tuple(pkg_list)) process.terminate() except KeyError: pass @staticmethod def _is_installed_with_conda(): """ Check if conda was used to install qt and napari. """ from qtpy import QT_VERSION from ..._version import version_tuple parts = [str(part) for part in version_tuple[:3]] napari_version_string = f"napari-{".".join(parts)}-" qt_version_string = f"qt-{QT_VERSION}-" conda_meta_path = Path(sys.prefix) / "conda-meta" if conda_meta_path.is_dir(): for file in conda_meta_path.iterdir(): fname = file.parts[-1] if fname.startswith(napari_version_string) and fname.endswith( ".json" ): return True elif fname.startswith(qt_version_string) and fname.endswith( ".json" ): return True else: return False class PluginListItem(QFrame): def __init__( self, package_name: str, version: str = '', url: str = '', summary: str = '', author: str = '', license: str = "UNKNOWN", *, plugin_name: str = None, parent: QWidget = None, enabled: bool = True, installed: bool = False, npe_version=1, ): super().__init__(parent) self.setup_ui(enabled) self.plugin_name.setText(package_name) self.package_name.setText(version) self.summary.setText(summary) self.package_author.setText(author) self.cancel_btn.setVisible(False) self.help_button.setText(trans._("Website")) self.help_button.setObjectName("help_button") self._handle_npe2_plugin(npe_version) if installed: self.enabled_checkbox.show() self.action_button.setText(trans._("uninstall")) self.action_button.setObjectName("remove_button") else: self.enabled_checkbox.hide() self.action_button.setText(trans._("install")) self.action_button.setObjectName("install_button") def _handle_npe2_plugin(self, npe_version): if npe_version == 1: return opacity = 0.4 if npe_version == 'shim' else 1 lbl = trans._('npe1 (adapted)') if npe_version == 'shim' else 'npe2' npe2_icon = QLabel(self) icon = QColoredSVGIcon.from_resources('logo_silhouette') npe2_icon.setPixmap( icon.colored(color='#33F0FF', opacity=opacity).pixmap(20, 20) ) self.row1.insertWidget(2, QLabel(lbl)) self.row1.insertWidget(2, npe2_icon) def _get_dialog(self) -> QDialog: p = self.parent() while not isinstance(p, QDialog) and p.parent(): p = p.parent() return p def set_busy(self, text: str, update: bool = False): self.item_status.setText(text) self.cancel_btn.setVisible(True) if not update: self.action_button.setVisible(False) else: self.update_btn.setVisible(False) def setup_ui(self, enabled=True): self.v_lay = QVBoxLayout(self) self.v_lay.setContentsMargins(-1, 6, -1, 6) self.v_lay.setSpacing(0) self.row1 = QHBoxLayout() self.row1.setSpacing(6) self.enabled_checkbox = QCheckBox(self) self.enabled_checkbox.setChecked(enabled) self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox) self.enabled_checkbox.setToolTip(trans._("enable/disable")) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.enabled_checkbox.sizePolicy().hasHeightForWidth() ) self.enabled_checkbox.setSizePolicy(sizePolicy) self.enabled_checkbox.setMinimumSize(QSize(20, 0)) self.enabled_checkbox.setText("") self.row1.addWidget(self.enabled_checkbox) self.plugin_name = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.plugin_name.sizePolicy().hasHeightForWidth() ) self.plugin_name.setSizePolicy(sizePolicy) font15 = QFont() font15.setPointSize(15) self.plugin_name.setFont(font15) self.row1.addWidget(self.plugin_name) icon = QColoredSVGIcon.from_resources("warning") self.warning_tooltip = QtToolTipLabel(self) # TODO: This color should come from the theme but the theme needs # to provide the right color. Default warning should be orange, not # red. Code example: # theme_name = get_settings().appearance.theme # napari.utils.theme.get_theme(theme_name, as_dict=False).warning.as_hex() self.warning_tooltip.setPixmap( icon.colored(color="#E3B617").pixmap(15, 15) ) self.warning_tooltip.setVisible(False) self.row1.addWidget(self.warning_tooltip) self.item_status = QLabel(self) self.item_status.setObjectName("small_italic_text") self.item_status.setSizePolicy(sizePolicy) self.row1.addWidget(self.item_status) self.row1.addStretch() self.package_name = QLabel(self) self.package_name.setAlignment( Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter ) self.row1.addWidget(self.package_name) self.cancel_btn = QPushButton("cancel", self) self.cancel_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.cancel_btn.setObjectName("remove_button") self.row1.addWidget(self.cancel_btn) self.update_btn = QPushButton(self) self.update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.update_btn.setObjectName("install_button") self.row1.addWidget(self.update_btn) self.update_btn.setVisible(False) self.help_button = QPushButton(self) self.action_button = QPushButton(self) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.action_button.sizePolicy().hasHeightForWidth() ) self.help_button.setSizePolicy(sizePolicy) self.action_button.setSizePolicy(sizePolicy) self.row1.addWidget(self.help_button) self.row1.addWidget(self.action_button) self.v_lay.addLayout(self.row1) self.row2 = QHBoxLayout() self.error_indicator = QPushButton() self.error_indicator.setObjectName("warning_icon") self.error_indicator.setCursor(Qt.PointingHandCursor) self.error_indicator.hide() self.row2.addWidget(self.error_indicator) self.row2.setContentsMargins(-1, 4, 0, -1) self.summary = QElidingLabel(parent=self) sizePolicy = QSizePolicy( QSizePolicy.MinimumExpanding, QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.summary.sizePolicy().hasHeightForWidth() ) self.summary.setSizePolicy(sizePolicy) self.summary.setObjectName("small_text") self.row2.addWidget(self.summary) self.package_author = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.package_author.sizePolicy().hasHeightForWidth() ) self.package_author.setSizePolicy(sizePolicy) self.package_author.setObjectName("small_text") self.row2.addWidget(self.package_author) self.v_lay.addLayout(self.row2) def _on_enabled_checkbox(self, state: int): """Called with `state` when checkbox is clicked.""" enabled = bool(state) plugin_name = self.plugin_name.text() pm2 = PluginManager.instance() if plugin_name in pm2: pm2.enable(plugin_name) if state else pm2.disable(plugin_name) return for npe1_name, _, distname in plugin_manager.iter_available(): if distname and (distname == plugin_name): plugin_manager.set_blocked(npe1_name, not enabled) def show_warning(self, message: str = ""): """Show warning icon and tooltip.""" self.warning_tooltip.setVisible(bool(message)) self.warning_tooltip.setToolTip(message) class QPluginList(QListWidget): def __init__(self, parent: QWidget, installer: Installer): super().__init__(parent) self.installer = installer self.setSortingEnabled(True) self._remove_list = [] def _count_visible(self) -> int: """Return the number of visible items. Visible items are the result of the normal `count` method minus any hidden items. """ hidden = 0 count = self.count() for i in range(count): item = self.item(i) hidden += item.isHidden() return count - hidden @Slot(PackageMetadata) def addItem( self, project_info: PackageMetadata, installed=False, plugin_name=None, enabled=True, npe_version=1, ): pkg_name = project_info.name # don't add duplicates if self.findItems(pkg_name, Qt.MatchFixedString) and not plugin_name: return # including summary here for sake of filtering below. searchable_text = f"{pkg_name} {project_info.summary}" item = QListWidgetItem(searchable_text, self) item.version = project_info.version super().addItem(item) widg = PluginListItem( package_name=pkg_name, version=project_info.version, url=project_info.home_page, summary=project_info.summary, author=project_info.author, license=project_info.license, parent=self, plugin_name=plugin_name, enabled=enabled, installed=installed, npe_version=npe_version, ) item.widget = widg item.npe_version = npe_version action_name = 'uninstall' if installed else 'install' item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) if project_info.home_page: import webbrowser widg.help_button.clicked.connect( lambda: webbrowser.open(project_info.home_page) ) else: widg.help_button.setVisible(False) widg.action_button.clicked.connect( lambda: self.handle_action(item, pkg_name, action_name) ) widg.update_btn.clicked.connect( lambda: self.handle_action(item, pkg_name, "install", update=True) ) widg.cancel_btn.clicked.connect( lambda: self.handle_action(item, pkg_name, "cancel") ) item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) def handle_action(self, item, pkg_name, action_name, update=False): widget = item.widget item.setText("0-" + item.text()) method = getattr(self.installer, action_name) self._remove_list.append((pkg_name, item)) self._warn_dialog = None if item.npe_version != 1: # show warning pop up dialog message = trans._( 'When installing/uninstalling npe2 plugins, you must restart napari for UI changes to take effect.' ) self._warn_dialog = WarnPopup( text=message, ) delta_x = 75 global_point = widget.action_button.mapToGlobal( widget.action_button.rect().topLeft() ) global_point = QPoint(global_point.x() - delta_x, global_point.y()) self._warn_dialog.move(global_point) if action_name == "install": if update: if hasattr(item, 'latest_version'): pkg_name += f"=={item.latest_version}" widget.set_busy(trans._("updating..."), update) widget.action_button.setDisabled(True) else: widget.set_busy(trans._("installing..."), update) method([pkg_name]) if self._warn_dialog: self._warn_dialog.exec_() self.scrollToTop() elif action_name == "uninstall": widget.set_busy(trans._("uninstalling..."), update) widget.update_btn.setDisabled(True) method([pkg_name]) if self._warn_dialog: self._warn_dialog.exec_() self.scrollToTop() elif action_name == "cancel": widget.set_busy(trans._("cancelling..."), update) method((pkg_name,)) @Slot(PackageMetadata, bool) def tag_outdated(self, project_info: PackageMetadata, is_available: bool): if not is_available: return for item in self.findItems(project_info.name, Qt.MatchStartsWith): current = item.version latest = project_info.version if parse_version(current) >= parse_version(latest): continue if hasattr(item, 'outdated'): # already tagged it continue item.outdated = True item.latest_version = latest widg = self.itemWidget(item) widg.update_btn.setVisible(True) widg.update_btn.setText( trans._("update (v{latest})", latest=latest) ) def tag_unavailable(self, project_info: PackageMetadata): """ Tag list items as unavailable for install with conda-forge. This will disable the item and the install button and add a warning icon with a hover tooltip. """ for item in self.findItems(project_info.name, Qt.MatchStartsWith): widget = self.itemWidget(item) widget.show_warning( trans._( "Plugin not yet available for installation within the bundle application" ) ) widget.setObjectName("unavailable") widget.style().unpolish(widget) widget.style().polish(widget) widget.action_button.setEnabled(False) widget.warning_tooltip.setVisible(True) def filter(self, text: str): """Filter items to those containing `text`.""" if text: # PySide has some issues, so we compare using id # See: https://bugreports.qt.io/browse/PYSIDE-74 shown = [id(it) for it in self.findItems(text, Qt.MatchContains)] for i in range(self.count()): item = self.item(i) item.setHidden(id(item) not in shown) else: for i in range(self.count()): item = self.item(i) item.setHidden(False) class RefreshState(Enum): REFRESHING = auto() OUTDATED = auto() DONE = auto() class QtPluginDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.refresh_state = RefreshState.DONE self.already_installed = set() installer_type = "mamba" if running_as_constructor_app() else "pip" self.installer = Installer(installer=installer_type) self.setup_ui() self.installer.set_output_widget(self.stdout_text) self.installer.started.connect(self._on_installer_start) self.installer.finished.connect(self._on_installer_done) self.refresh() def _on_installer_start(self): self.cancel_all_btn.setVisible(True) self.working_indicator.show() self.process_error_indicator.hide() self.close_btn.setDisabled(True) def _on_installer_done(self, exit_code): self.working_indicator.hide() if exit_code: self.process_error_indicator.show() self.cancel_all_btn.setVisible(False) self.close_btn.setDisabled(False) self.refresh() def closeEvent(self, event): if self.close_btn.isEnabled(): super().closeEvent(event) event.ignore() def refresh(self): if self.refresh_state != RefreshState.DONE: self.refresh_state = RefreshState.OUTDATED return self.refresh_state = RefreshState.REFRESHING self.installed_list.clear() self.available_list.clear() # fetch installed from npe2 import PluginManager from ...plugins import plugin_manager plugin_manager.discover() # since they might not be loaded yet self.already_installed = set() def _add_to_installed(distname, enabled, npe_version=1): norm_name = normalized_name(distname or '') if distname: try: meta = metadata(distname) except PackageNotFoundError: self.refresh_state = RefreshState.OUTDATED return # a race condition has occurred and the package is uninstalled by another thread if len(meta) == 0: # will not add builtins. return self.already_installed.add(norm_name) else: meta = {} self.installed_list.addItem( PackageMetadata( metadata_version="1.0", name=norm_name, version=meta.get('version', ''), summary=meta.get('summary', ''), home_page=meta.get('url', ''), author=meta.get('author', ''), license=meta.get('license', ''), ), installed=True, enabled=enabled, npe_version=npe_version, ) pm2 = PluginManager.instance() for manifest in pm2.iter_manifests(): distname = normalized_name(manifest.name or '') if distname in self.already_installed or distname == 'napari': continue enabled = not pm2.is_disabled(manifest.name) # if it's an Npe1 adaptor, call it v1 npev = 'shim' if 'npe1' in type(manifest).__name__.lower() else 2 _add_to_installed(distname, enabled, npe_version=npev) for ( plugin_name, _mod_name, distname, ) in plugin_manager.iter_available(): # not showing these in the plugin dialog if plugin_name in ('napari_plugin_engine',): continue if distname in self.already_installed: continue _add_to_installed( distname, not plugin_manager.is_blocked(plugin_name) ) self.installed_label.setText( trans._( "Installed Plugins ({amount})", amount=len(self.already_installed), ) ) # fetch available plugins settings = get_settings() use_hub = ( running_as_bundled_app() or running_as_constructor_app() or settings.plugins.plugin_api.name == "napari_hub" ) if use_hub: conda_forge = running_as_constructor_app() self.worker = create_worker( iter_hub_plugin_info, conda_forge=conda_forge ) else: self.worker = create_worker(iter_napari_plugin_info) self.worker.yielded.connect(self._handle_yield) self.worker.finished.connect(self.working_indicator.hide) self.worker.finished.connect(self._update_count_in_label) self.worker.finished.connect(self._end_refresh) self.worker.start() def setup_ui(self): self.resize(1080, 640) vlay_1 = QVBoxLayout(self) self.h_splitter = QSplitter(self) vlay_1.addWidget(self.h_splitter) self.h_splitter.setOrientation(Qt.Horizontal) self.v_splitter = QSplitter(self.h_splitter) self.v_splitter.setOrientation(Qt.Vertical) self.v_splitter.setMinimumWidth(500) installed = QWidget(self.v_splitter) lay = QVBoxLayout(installed) lay.setContentsMargins(0, 2, 0, 2) self.installed_label = QLabel(trans._("Installed Plugins")) self.packages_filter = QLineEdit() self.packages_filter.setPlaceholderText(trans._("filter...")) self.packages_filter.setMaximumWidth(350) self.packages_filter.setClearButtonEnabled(True) mid_layout = QVBoxLayout() mid_layout.addWidget(self.packages_filter) mid_layout.addWidget(self.installed_label) lay.addLayout(mid_layout) self.installed_list = QPluginList(installed, self.installer) self.packages_filter.textChanged.connect(self.installed_list.filter) lay.addWidget(self.installed_list) uninstalled = QWidget(self.v_splitter) lay = QVBoxLayout(uninstalled) lay.setContentsMargins(0, 2, 0, 2) self.avail_label = QLabel(trans._("Available Plugins")) mid_layout = QHBoxLayout() mid_layout.addWidget(self.avail_label) mid_layout.addStretch() lay.addLayout(mid_layout) self.available_list = QPluginList(uninstalled, self.installer) self.packages_filter.textChanged.connect(self.available_list.filter) lay.addWidget(self.available_list) self.stdout_text = QTextEdit(self.v_splitter) self.stdout_text.setReadOnly(True) self.stdout_text.setObjectName("pip_install_status") self.stdout_text.hide() buttonBox = QHBoxLayout() self.working_indicator = QLabel(trans._("loading ..."), self) sp = self.working_indicator.sizePolicy() sp.setRetainSizeWhenHidden(True) self.working_indicator.setSizePolicy(sp) self.process_error_indicator = QLabel(self) self.process_error_indicator.setObjectName("error_label") self.process_error_indicator.hide() load_gif = str(Path(napari.resources.__file__).parent / "loading.gif") mov = QMovie(load_gif) mov.setScaledSize(QSize(18, 18)) self.working_indicator.setMovie(mov) mov.start() visibility_direct_entry = not running_as_constructor_app() self.direct_entry_edit = QLineEdit(self) self.direct_entry_edit.installEventFilter(self) self.direct_entry_edit.setPlaceholderText( trans._('install by name/url, or drop file...') ) self.direct_entry_edit.setVisible(visibility_direct_entry) self.direct_entry_btn = QPushButton(trans._("Install"), self) self.direct_entry_btn.setVisible(visibility_direct_entry) self.direct_entry_btn.clicked.connect(self._install_packages) self.show_status_btn = QPushButton(trans._("Show Status"), self) self.show_status_btn.setFixedWidth(100) self.cancel_all_btn = QPushButton(trans._("cancel all actions"), self) self.cancel_all_btn.setObjectName("remove_button") self.cancel_all_btn.setVisible(False) self.cancel_all_btn.clicked.connect(lambda: self.installer.cancel()) self.close_btn = QPushButton(trans._("Close"), self) self.close_btn.clicked.connect(self.accept) self.close_btn.setObjectName("close_button") buttonBox.addWidget(self.show_status_btn) buttonBox.addWidget(self.working_indicator) buttonBox.addWidget(self.direct_entry_edit) buttonBox.addWidget(self.direct_entry_btn) if not visibility_direct_entry: buttonBox.addStretch() buttonBox.addWidget(self.process_error_indicator) buttonBox.addSpacing(20) buttonBox.addWidget(self.cancel_all_btn) buttonBox.addSpacing(20) buttonBox.addWidget(self.close_btn) buttonBox.setContentsMargins(0, 0, 4, 0) vlay_1.addLayout(buttonBox) self.show_status_btn.setCheckable(True) self.show_status_btn.setChecked(False) self.show_status_btn.toggled.connect(self._toggle_status) self.v_splitter.setStretchFactor(1, 2) self.h_splitter.setStretchFactor(0, 2) self.packages_filter.setFocus() def _update_count_in_label(self): count = self.available_list.count() self.avail_label.setText( trans._("Available Plugins ({count})", count=count) ) def _end_refresh(self): refresh_state = self.refresh_state self.refresh_state = RefreshState.DONE if refresh_state == RefreshState.OUTDATED: self.refresh() def eventFilter(self, watched, event): if event.type() == QEvent.DragEnter: # we need to accept this event explicitly to be able # to receive QDropEvents! event.accept() if event.type() == QEvent.Drop: md = event.mimeData() if md.hasUrls(): files = [url.toLocalFile() for url in md.urls()] self.direct_entry_edit.setText(files[0]) return True return super().eventFilter(watched, event) def _toggle_status(self, show): if show: self.show_status_btn.setText(trans._("Hide Status")) self.stdout_text.show() else: self.show_status_btn.setText(trans._("Show Status")) self.stdout_text.hide() def _install_packages(self, packages: Sequence[str] = ()): if not packages: _packages = self.direct_entry_edit.text() if os.path.exists(_packages): packages = [_packages] else: packages = _packages.split() self.direct_entry_edit.clear() if packages: self.installer.install(packages) def _handle_yield(self, data: Tuple[PackageMetadata, bool]): project_info, is_available = data if project_info.name in self.already_installed: self.installed_list.tag_outdated(project_info, is_available) else: self.available_list.addItem(project_info) if not is_available: self.available_list.tag_unavailable(project_info) self.filter() def filter(self, text: str = None) -> None: """Filter by text or set current text as filter.""" if text is None: text = self.packages_filter.text() else: self.packages_filter.setText(text) self.installed_list.filter(text) self.available_list.filter(text) if __name__ == "__main__": from qtpy.QtWidgets import QApplication app = QApplication([]) w = QtPluginDialog() w.show() app.exec_()
import os import sys from enum import Enum, auto from importlib.metadata import PackageNotFoundError, metadata from pathlib import Path from tempfile import gettempdir from typing import Callable, Dict, List, Optional, Sequence, Tuple from npe2 import PackageMetadata, PluginManager from qtpy.QtCore import ( QEvent, QObject, QPoint, QProcess, QProcessEnvironment, QSize, Qt, Signal, Slot, ) from qtpy.QtGui import QFont, QMovie from qtpy.QtWidgets import ( QCheckBox, QDialog, QFrame, QHBoxLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QSizePolicy, QSplitter, QTextEdit, QVBoxLayout, QWidget, ) from superqt import QElidingLabel from typing_extensions import Literal import napari.resources from ...plugins import plugin_manager from ...plugins.hub import iter_hub_plugin_info from ...plugins.pypi import iter_napari_plugin_info from ...plugins.utils import normalized_name from ...settings import get_settings from ...utils._appdirs import user_plugin_dir, user_site_packages from ...utils.misc import ( parse_version, running_as_bundled_app, running_as_constructor_app, ) from ...utils.translations import trans from ..qt_resources import QColoredSVGIcon from ..qthreading import create_worker from ..widgets.qt_message_popup import WarnPopup from ..widgets.qt_tooltip import QtToolTipLabel InstallerTypes = Literal['pip', 'mamba'] # TODO: add error icon and handle pip install errors class Installer(QObject): started = Signal() finished = Signal(int) def __init__( self, output_widget: QTextEdit = None, installer: InstallerTypes = "pip", ): super().__init__() self._queue: List[Tuple[Tuple[str, ...], Callable[[], QProcess]]] = [] self._processes: Dict[Tuple[str, ...], QProcess] = {} self._exit_code = 0 self._conda_env_path = None self._installer_type = installer if installer != "pip" and (Path(sys.prefix) / "conda-meta").is_dir(): self._conda_env_path = sys.prefix # create install process self._output_widget = output_widget self.process = None def _create_process( self, installer: InstallerTypes = "pip", ): process = QProcess() process.setProcessChannelMode(QProcess.MergedChannels) process.readyReadStandardOutput.connect( lambda process=process: self._on_stdout_ready(process) ) env = QProcessEnvironment.systemEnvironment() if installer == "pip": process.setProgram(self._sys_executable_or_bundled_python()) # patch process path combined_paths = os.pathsep.join( [ user_site_packages(), env.systemEnvironment().value("PYTHONPATH"), ] ) env.insert("PYTHONPATH", combined_paths) else: process.setProgram(installer) if installer == "mamba": from ..._version import version_tuple # To avoid napari version changing when installing a plugin, we # add a pin to the current napari version, that way we can # restrict any changes to the actual napari application. # Conda/mamba also pin python by default, so we effectively # constrain python and napari versions from changing, when # installing plugins inside the constructor bundled application. # See: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-pkgs.html#preventing-packages-from-updating-pinning napari_version = ".".join(str(v) for v in version_tuple[:3]) if env.contains("CONDA_PINNED_PACKAGES"): # str delimiter is '&' system_pins = f"&{env.value('CONDA_PINNED_PACKAGES')}" else: system_pins = "" env.insert( "CONDA_PINNED_PACKAGES", f"napari={napari_version}{system_pins}", ) if os.name == "nt": # workaround https://github.com/napari/napari/issues/4247, 4484 if not env.contains("TEMP"): temp = gettempdir() env.insert("TMP", temp) env.insert("TEMP", temp) if not env.contains("USERPROFILE"): env.insert("HOME", os.path.expanduser("~")) env.insert("USERPROFILE", os.path.expanduser("~")) process.setProcessEnvironment(env) self.set_output_widget(self._output_widget) process.finished.connect( lambda ec, es: self._on_process_finished(process, ec, es) ) # FIXME connecting lambda to finished signal is bug creating and may end with segfault when garbage # collection will consume Installer object before process end. return process def _sys_executable_or_bundled_python(self): # Note: is_bundled_app() returns False even if using a Briefcase bundle... # Workaround: see if sys.executable is set to something something napari on Mac if sys.executable.endswith("napari") and sys.platform == 'darwin': # sys.prefix should be <napari.app>/Contents/Resources/Support/Python/Resources python = os.path.join(sys.prefix, "bin", "python3") if os.path.isfile(python): return python return sys.executable def set_output_widget(self, output_widget: QTextEdit): if output_widget: self._output_widget = output_widget def _on_process_finished(self, process, exit_code, exit_status): if exit_code != 0: self._exit_code = 0 process_to_terminate = [] for pkg_list, proc in self._processes.items(): if proc == process: process_to_terminate.append(pkg_list) for pkg_list in process_to_terminate: process = self._processes.pop(pkg_list) process.terminate() self._handle_action() def _on_stdout_ready(self, process): if self._output_widget: text = process.readAllStandardOutput().data().decode() self._output_widget.append(text) def _handle_action(self): if self._queue: pkg_list, func = self._queue.pop() self.started.emit() process = func() self._processes[pkg_list] = process if not self._processes: from ...plugins import plugin_manager plugin_manager.discover() plugin_manager.prune() self.finished.emit(self._exit_code) def install( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type self._queue.insert( 0, [ tuple(pkg_list), lambda: self._install(pkg_list, installer, channels), ], ) self._handle_action() def _install( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type process = self._create_process(installer) if installer != "pip": cmd = [ 'install', '-y', '--prefix', self._conda_env_path, ] for channel in channels: cmd.extend(["-c", channel]) else: cmd = ['-m', 'pip', 'install', '--upgrade'] if ( running_as_bundled_app() and sys.platform.startswith('linux') and not self._conda_env_path ): cmd += [ '--no-warn-script-location', '--prefix', user_plugin_dir(), ] process.setArguments(cmd + list(pkg_list)) if self._output_widget and self._queue: self._output_widget.clear() process.start() return process def uninstall( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type self._queue.insert( 0, [ tuple(pkg_list), lambda: self._uninstall(pkg_list, installer, channels), ], ) self._handle_action() def _uninstall( self, pkg_list: Sequence[str], installer: Optional[InstallerTypes] = None, channels: Sequence[str] = ("conda-forge",), ): installer = installer or self._installer_type if installer != "pip": args = [ 'remove', '-y', '--prefix', self._conda_env_path, ] for channel in channels: args.extend(["-c", channel]) else: args = ['-m', 'pip', 'uninstall', '-y'] process = self._create_process(installer) process.setArguments(args + list(pkg_list)) if self._output_widget and self._queue: self._output_widget.clear() process.start() for pkg in pkg_list: plugin_manager.unregister(pkg) return process def cancel( self, pkg_list: Sequence[str] = None, ): if pkg_list is None: for _, process in self._processes.items(): process.terminate() self._processes = {} else: try: process = self._processes.pop(tuple(pkg_list)) process.terminate() except KeyError: pass @staticmethod def _is_installed_with_conda(): """ Check if conda was used to install qt and napari. """ from qtpy import QT_VERSION from ..._version import version_tuple parts = [str(part) for part in version_tuple[:3]] napari_version_string = f"napari-{'.'.join(parts)}-" qt_version_string = f"qt-{QT_VERSION}-" conda_meta_path = Path(sys.prefix) / "conda-meta" if conda_meta_path.is_dir(): for file in conda_meta_path.iterdir(): fname = file.parts[-1] if fname.startswith(napari_version_string) and fname.endswith( ".json" ): return True elif fname.startswith(qt_version_string) and fname.endswith( ".json" ): return True else: return False class PluginListItem(QFrame): def __init__( self, package_name: str, version: str = '', url: str = '', summary: str = '', author: str = '', license: str = "UNKNOWN", *, plugin_name: str = None, parent: QWidget = None, enabled: bool = True, installed: bool = False, npe_version=1, ): super().__init__(parent) self.setup_ui(enabled) self.plugin_name.setText(package_name) self.package_name.setText(version) self.summary.setText(summary) self.package_author.setText(author) self.cancel_btn.setVisible(False) self.help_button.setText(trans._("Website")) self.help_button.setObjectName("help_button") self._handle_npe2_plugin(npe_version) if installed: self.enabled_checkbox.show() self.action_button.setText(trans._("uninstall")) self.action_button.setObjectName("remove_button") else: self.enabled_checkbox.hide() self.action_button.setText(trans._("install")) self.action_button.setObjectName("install_button") def _handle_npe2_plugin(self, npe_version): if npe_version == 1: return opacity = 0.4 if npe_version == 'shim' else 1 lbl = trans._('npe1 (adapted)') if npe_version == 'shim' else 'npe2' npe2_icon = QLabel(self) icon = QColoredSVGIcon.from_resources('logo_silhouette') npe2_icon.setPixmap( icon.colored(color='#33F0FF', opacity=opacity).pixmap(20, 20) ) self.row1.insertWidget(2, QLabel(lbl)) self.row1.insertWidget(2, npe2_icon) def _get_dialog(self) -> QDialog: p = self.parent() while not isinstance(p, QDialog) and p.parent(): p = p.parent() return p def set_busy(self, text: str, update: bool = False): self.item_status.setText(text) self.cancel_btn.setVisible(True) if not update: self.action_button.setVisible(False) else: self.update_btn.setVisible(False) def setup_ui(self, enabled=True): self.v_lay = QVBoxLayout(self) self.v_lay.setContentsMargins(-1, 6, -1, 6) self.v_lay.setSpacing(0) self.row1 = QHBoxLayout() self.row1.setSpacing(6) self.enabled_checkbox = QCheckBox(self) self.enabled_checkbox.setChecked(enabled) self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox) self.enabled_checkbox.setToolTip(trans._("enable/disable")) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.enabled_checkbox.sizePolicy().hasHeightForWidth() ) self.enabled_checkbox.setSizePolicy(sizePolicy) self.enabled_checkbox.setMinimumSize(QSize(20, 0)) self.enabled_checkbox.setText("") self.row1.addWidget(self.enabled_checkbox) self.plugin_name = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.plugin_name.sizePolicy().hasHeightForWidth() ) self.plugin_name.setSizePolicy(sizePolicy) font15 = QFont() font15.setPointSize(15) self.plugin_name.setFont(font15) self.row1.addWidget(self.plugin_name) icon = QColoredSVGIcon.from_resources("warning") self.warning_tooltip = QtToolTipLabel(self) # TODO: This color should come from the theme but the theme needs # to provide the right color. Default warning should be orange, not # red. Code example: # theme_name = get_settings().appearance.theme # napari.utils.theme.get_theme(theme_name, as_dict=False).warning.as_hex() self.warning_tooltip.setPixmap( icon.colored(color="#E3B617").pixmap(15, 15) ) self.warning_tooltip.setVisible(False) self.row1.addWidget(self.warning_tooltip) self.item_status = QLabel(self) self.item_status.setObjectName("small_italic_text") self.item_status.setSizePolicy(sizePolicy) self.row1.addWidget(self.item_status) self.row1.addStretch() self.package_name = QLabel(self) self.package_name.setAlignment( Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter ) self.row1.addWidget(self.package_name) self.cancel_btn = QPushButton("cancel", self) self.cancel_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.cancel_btn.setObjectName("remove_button") self.row1.addWidget(self.cancel_btn) self.update_btn = QPushButton(self) self.update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.update_btn.setObjectName("install_button") self.row1.addWidget(self.update_btn) self.update_btn.setVisible(False) self.help_button = QPushButton(self) self.action_button = QPushButton(self) sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.action_button.sizePolicy().hasHeightForWidth() ) self.help_button.setSizePolicy(sizePolicy) self.action_button.setSizePolicy(sizePolicy) self.row1.addWidget(self.help_button) self.row1.addWidget(self.action_button) self.v_lay.addLayout(self.row1) self.row2 = QHBoxLayout() self.error_indicator = QPushButton() self.error_indicator.setObjectName("warning_icon") self.error_indicator.setCursor(Qt.PointingHandCursor) self.error_indicator.hide() self.row2.addWidget(self.error_indicator) self.row2.setContentsMargins(-1, 4, 0, -1) self.summary = QElidingLabel(parent=self) sizePolicy = QSizePolicy( QSizePolicy.MinimumExpanding, QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.summary.sizePolicy().hasHeightForWidth() ) self.summary.setSizePolicy(sizePolicy) self.summary.setObjectName("small_text") self.row2.addWidget(self.summary) self.package_author = QLabel(self) sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.package_author.sizePolicy().hasHeightForWidth() ) self.package_author.setSizePolicy(sizePolicy) self.package_author.setObjectName("small_text") self.row2.addWidget(self.package_author) self.v_lay.addLayout(self.row2) def _on_enabled_checkbox(self, state: int): """Called with `state` when checkbox is clicked.""" enabled = bool(state) plugin_name = self.plugin_name.text() pm2 = PluginManager.instance() if plugin_name in pm2: pm2.enable(plugin_name) if state else pm2.disable(plugin_name) return for npe1_name, _, distname in plugin_manager.iter_available(): if distname and (distname == plugin_name): plugin_manager.set_blocked(npe1_name, not enabled) def show_warning(self, message: str = ""): """Show warning icon and tooltip.""" self.warning_tooltip.setVisible(bool(message)) self.warning_tooltip.setToolTip(message) class QPluginList(QListWidget): def __init__(self, parent: QWidget, installer: Installer): super().__init__(parent) self.installer = installer self.setSortingEnabled(True) self._remove_list = [] def _count_visible(self) -> int: """Return the number of visible items. Visible items are the result of the normal `count` method minus any hidden items. """ hidden = 0 count = self.count() for i in range(count): item = self.item(i) hidden += item.isHidden() return count - hidden @Slot(PackageMetadata) def addItem( self, project_info: PackageMetadata, installed=False, plugin_name=None, enabled=True, npe_version=1, ): pkg_name = project_info.name # don't add duplicates if self.findItems(pkg_name, Qt.MatchFixedString) and not plugin_name: return # including summary here for sake of filtering below. searchable_text = f"{pkg_name} {project_info.summary}" item = QListWidgetItem(searchable_text, self) item.version = project_info.version super().addItem(item) widg = PluginListItem( package_name=pkg_name, version=project_info.version, url=project_info.home_page, summary=project_info.summary, author=project_info.author, license=project_info.license, parent=self, plugin_name=plugin_name, enabled=enabled, installed=installed, npe_version=npe_version, ) item.widget = widg item.npe_version = npe_version action_name = 'uninstall' if installed else 'install' item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) if project_info.home_page: import webbrowser widg.help_button.clicked.connect( lambda: webbrowser.open(project_info.home_page) ) else: widg.help_button.setVisible(False) widg.action_button.clicked.connect( lambda: self.handle_action(item, pkg_name, action_name) ) widg.update_btn.clicked.connect( lambda: self.handle_action(item, pkg_name, "install", update=True) ) widg.cancel_btn.clicked.connect( lambda: self.handle_action(item, pkg_name, "cancel") ) item.setSizeHint(widg.sizeHint()) self.setItemWidget(item, widg) def handle_action(self, item, pkg_name, action_name, update=False): widget = item.widget item.setText("0-" + item.text()) method = getattr(self.installer, action_name) self._remove_list.append((pkg_name, item)) self._warn_dialog = None if item.npe_version != 1: # show warning pop up dialog message = trans._( 'When installing/uninstalling npe2 plugins, you must restart napari for UI changes to take effect.' ) self._warn_dialog = WarnPopup( text=message, ) delta_x = 75 global_point = widget.action_button.mapToGlobal( widget.action_button.rect().topLeft() ) global_point = QPoint(global_point.x() - delta_x, global_point.y()) self._warn_dialog.move(global_point) if action_name == "install": if update: if hasattr(item, 'latest_version'): pkg_name += f"=={item.latest_version}" widget.set_busy(trans._("updating..."), update) widget.action_button.setDisabled(True) else: widget.set_busy(trans._("installing..."), update) method([pkg_name]) if self._warn_dialog: self._warn_dialog.exec_() self.scrollToTop() elif action_name == "uninstall": widget.set_busy(trans._("uninstalling..."), update) widget.update_btn.setDisabled(True) method([pkg_name]) if self._warn_dialog: self._warn_dialog.exec_() self.scrollToTop() elif action_name == "cancel": widget.set_busy(trans._("cancelling..."), update) method((pkg_name,)) @Slot(PackageMetadata, bool) def tag_outdated(self, project_info: PackageMetadata, is_available: bool): if not is_available: return for item in self.findItems(project_info.name, Qt.MatchStartsWith): current = item.version latest = project_info.version if parse_version(current) >= parse_version(latest): continue if hasattr(item, 'outdated'): # already tagged it continue item.outdated = True item.latest_version = latest widg = self.itemWidget(item) widg.update_btn.setVisible(True) widg.update_btn.setText( trans._("update (v{latest})", latest=latest) ) def tag_unavailable(self, project_info: PackageMetadata): """ Tag list items as unavailable for install with conda-forge. This will disable the item and the install button and add a warning icon with a hover tooltip. """ for item in self.findItems(project_info.name, Qt.MatchStartsWith): widget = self.itemWidget(item) widget.show_warning( trans._( "Plugin not yet available for installation within the bundle application" ) ) widget.setObjectName("unavailable") widget.style().unpolish(widget) widget.style().polish(widget) widget.action_button.setEnabled(False) widget.warning_tooltip.setVisible(True) def filter(self, text: str): """Filter items to those containing `text`.""" if text: # PySide has some issues, so we compare using id # See: https://bugreports.qt.io/browse/PYSIDE-74 shown = [id(it) for it in self.findItems(text, Qt.MatchContains)] for i in range(self.count()): item = self.item(i) item.setHidden(id(item) not in shown) else: for i in range(self.count()): item = self.item(i) item.setHidden(False) class RefreshState(Enum): REFRESHING = auto() OUTDATED = auto() DONE = auto() class QtPluginDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.refresh_state = RefreshState.DONE self.already_installed = set() installer_type = "mamba" if running_as_constructor_app() else "pip" self.installer = Installer(installer=installer_type) self.setup_ui() self.installer.set_output_widget(self.stdout_text) self.installer.started.connect(self._on_installer_start) self.installer.finished.connect(self._on_installer_done) self.refresh() def _on_installer_start(self): self.cancel_all_btn.setVisible(True) self.working_indicator.show() self.process_error_indicator.hide() self.close_btn.setDisabled(True) def _on_installer_done(self, exit_code): self.working_indicator.hide() if exit_code: self.process_error_indicator.show() self.cancel_all_btn.setVisible(False) self.close_btn.setDisabled(False) self.refresh() def closeEvent(self, event): if self.close_btn.isEnabled(): super().closeEvent(event) event.ignore() def refresh(self): if self.refresh_state != RefreshState.DONE: self.refresh_state = RefreshState.OUTDATED return self.refresh_state = RefreshState.REFRESHING self.installed_list.clear() self.available_list.clear() # fetch installed from npe2 import PluginManager from ...plugins import plugin_manager plugin_manager.discover() # since they might not be loaded yet self.already_installed = set() def _add_to_installed(distname, enabled, npe_version=1): norm_name = normalized_name(distname or '') if distname: try: meta = metadata(distname) except PackageNotFoundError: self.refresh_state = RefreshState.OUTDATED return # a race condition has occurred and the package is uninstalled by another thread if len(meta) == 0: # will not add builtins. return self.already_installed.add(norm_name) else: meta = {} self.installed_list.addItem( PackageMetadata( metadata_version="1.0", name=norm_name, version=meta.get('version', ''), summary=meta.get('summary', ''), home_page=meta.get('url', ''), author=meta.get('author', ''), license=meta.get('license', ''), ), installed=True, enabled=enabled, npe_version=npe_version, ) pm2 = PluginManager.instance() for manifest in pm2.iter_manifests(): distname = normalized_name(manifest.name or '') if distname in self.already_installed or distname == 'napari': continue enabled = not pm2.is_disabled(manifest.name) # if it's an Npe1 adaptor, call it v1 npev = 'shim' if 'npe1' in type(manifest).__name__.lower() else 2 _add_to_installed(distname, enabled, npe_version=npev) for ( plugin_name, _mod_name, distname, ) in plugin_manager.iter_available(): # not showing these in the plugin dialog if plugin_name in ('napari_plugin_engine',): continue if distname in self.already_installed: continue _add_to_installed( distname, not plugin_manager.is_blocked(plugin_name) ) self.installed_label.setText( trans._( "Installed Plugins ({amount})", amount=len(self.already_installed), ) ) # fetch available plugins settings = get_settings() use_hub = ( running_as_bundled_app() or running_as_constructor_app() or settings.plugins.plugin_api.name == "napari_hub" ) if use_hub: conda_forge = running_as_constructor_app() self.worker = create_worker( iter_hub_plugin_info, conda_forge=conda_forge ) else: self.worker = create_worker(iter_napari_plugin_info) self.worker.yielded.connect(self._handle_yield) self.worker.finished.connect(self.working_indicator.hide) self.worker.finished.connect(self._update_count_in_label) self.worker.finished.connect(self._end_refresh) self.worker.start() def setup_ui(self): self.resize(1080, 640) vlay_1 = QVBoxLayout(self) self.h_splitter = QSplitter(self) vlay_1.addWidget(self.h_splitter) self.h_splitter.setOrientation(Qt.Horizontal) self.v_splitter = QSplitter(self.h_splitter) self.v_splitter.setOrientation(Qt.Vertical) self.v_splitter.setMinimumWidth(500) installed = QWidget(self.v_splitter) lay = QVBoxLayout(installed) lay.setContentsMargins(0, 2, 0, 2) self.installed_label = QLabel(trans._("Installed Plugins")) self.packages_filter = QLineEdit() self.packages_filter.setPlaceholderText(trans._("filter...")) self.packages_filter.setMaximumWidth(350) self.packages_filter.setClearButtonEnabled(True) mid_layout = QVBoxLayout() mid_layout.addWidget(self.packages_filter) mid_layout.addWidget(self.installed_label) lay.addLayout(mid_layout) self.installed_list = QPluginList(installed, self.installer) self.packages_filter.textChanged.connect(self.installed_list.filter) lay.addWidget(self.installed_list) uninstalled = QWidget(self.v_splitter) lay = QVBoxLayout(uninstalled) lay.setContentsMargins(0, 2, 0, 2) self.avail_label = QLabel(trans._("Available Plugins")) mid_layout = QHBoxLayout() mid_layout.addWidget(self.avail_label) mid_layout.addStretch() lay.addLayout(mid_layout) self.available_list = QPluginList(uninstalled, self.installer) self.packages_filter.textChanged.connect(self.available_list.filter) lay.addWidget(self.available_list) self.stdout_text = QTextEdit(self.v_splitter) self.stdout_text.setReadOnly(True) self.stdout_text.setObjectName("pip_install_status") self.stdout_text.hide() buttonBox = QHBoxLayout() self.working_indicator = QLabel(trans._("loading ..."), self) sp = self.working_indicator.sizePolicy() sp.setRetainSizeWhenHidden(True) self.working_indicator.setSizePolicy(sp) self.process_error_indicator = QLabel(self) self.process_error_indicator.setObjectName("error_label") self.process_error_indicator.hide() load_gif = str(Path(napari.resources.__file__).parent / "loading.gif") mov = QMovie(load_gif) mov.setScaledSize(QSize(18, 18)) self.working_indicator.setMovie(mov) mov.start() visibility_direct_entry = not running_as_constructor_app() self.direct_entry_edit = QLineEdit(self) self.direct_entry_edit.installEventFilter(self) self.direct_entry_edit.setPlaceholderText( trans._('install by name/url, or drop file...') ) self.direct_entry_edit.setVisible(visibility_direct_entry) self.direct_entry_btn = QPushButton(trans._("Install"), self) self.direct_entry_btn.setVisible(visibility_direct_entry) self.direct_entry_btn.clicked.connect(self._install_packages) self.show_status_btn = QPushButton(trans._("Show Status"), self) self.show_status_btn.setFixedWidth(100) self.cancel_all_btn = QPushButton(trans._("cancel all actions"), self) self.cancel_all_btn.setObjectName("remove_button") self.cancel_all_btn.setVisible(False) self.cancel_all_btn.clicked.connect(lambda: self.installer.cancel()) self.close_btn = QPushButton(trans._("Close"), self) self.close_btn.clicked.connect(self.accept) self.close_btn.setObjectName("close_button") buttonBox.addWidget(self.show_status_btn) buttonBox.addWidget(self.working_indicator) buttonBox.addWidget(self.direct_entry_edit) buttonBox.addWidget(self.direct_entry_btn) if not visibility_direct_entry: buttonBox.addStretch() buttonBox.addWidget(self.process_error_indicator) buttonBox.addSpacing(20) buttonBox.addWidget(self.cancel_all_btn) buttonBox.addSpacing(20) buttonBox.addWidget(self.close_btn) buttonBox.setContentsMargins(0, 0, 4, 0) vlay_1.addLayout(buttonBox) self.show_status_btn.setCheckable(True) self.show_status_btn.setChecked(False) self.show_status_btn.toggled.connect(self._toggle_status) self.v_splitter.setStretchFactor(1, 2) self.h_splitter.setStretchFactor(0, 2) self.packages_filter.setFocus() def _update_count_in_label(self): count = self.available_list.count() self.avail_label.setText( trans._("Available Plugins ({count})", count=count) ) def _end_refresh(self): refresh_state = self.refresh_state self.refresh_state = RefreshState.DONE if refresh_state == RefreshState.OUTDATED: self.refresh() def eventFilter(self, watched, event): if event.type() == QEvent.DragEnter: # we need to accept this event explicitly to be able # to receive QDropEvents! event.accept() if event.type() == QEvent.Drop: md = event.mimeData() if md.hasUrls(): files = [url.toLocalFile() for url in md.urls()] self.direct_entry_edit.setText(files[0]) return True return super().eventFilter(watched, event) def _toggle_status(self, show): if show: self.show_status_btn.setText(trans._("Hide Status")) self.stdout_text.show() else: self.show_status_btn.setText(trans._("Show Status")) self.stdout_text.hide() def _install_packages(self, packages: Sequence[str] = ()): if not packages: _packages = self.direct_entry_edit.text() if os.path.exists(_packages): packages = [_packages] else: packages = _packages.split() self.direct_entry_edit.clear() if packages: self.installer.install(packages) def _handle_yield(self, data: Tuple[PackageMetadata, bool]): project_info, is_available = data if project_info.name in self.already_installed: self.installed_list.tag_outdated(project_info, is_available) else: self.available_list.addItem(project_info) if not is_available: self.available_list.tag_unavailable(project_info) self.filter() def filter(self, text: str = None) -> None: """Filter by text or set current text as filter.""" if text is None: text = self.packages_filter.text() else: self.packages_filter.setText(text) self.installed_list.filter(text) self.available_list.filter(text) if __name__ == "__main__": from qtpy.QtWidgets import QApplication app = QApplication([]) w = QtPluginDialog() w.show() app.exec_()
import collections import sys from conda.base.context import context from conda_build import utils from conda_build.config import get_or_merge_config from conda_build.variants import find_config_files, parse_config_file from conda_build import __version__ as cb_version from rich.console import Console console = Console() cb_split_version = tuple(int(x) for x in cb_version.split(".")) if "bsd" in sys.platform: shell_path = "/bin/sh" elif utils.on_win: shell_path = "bash" else: shell_path = "/bin/bash" def get_config(folder, variant=None): if not variant: variant = {} config = get_or_merge_config(None, variant) if cb_split_version >= (3, 20, 5): config_files = find_config_files(folder, config) else: config_files = find_config_files(folder) console.print(f"\nLoading config files: [green]{", ".join(config_files)}\n") parsed_cfg = collections.OrderedDict() for f in config_files: parsed_cfg[f] = parse_config_file(f, config) # TODO just using latest config here, should merge! if len(config_files): cbc = parsed_cfg[config_files[-1]] else: cbc = {} return cbc, config def normalize_subdir(subdir): if subdir == "noarch": subdir = context.subdir else: return subdir
import collections import sys from conda.base.context import context from conda_build import utils from conda_build.config import get_or_merge_config from conda_build.variants import find_config_files, parse_config_file from conda_build import __version__ as cb_version from rich.console import Console console = Console() cb_split_version = tuple(int(x) for x in cb_version.split(".")) if "bsd" in sys.platform: shell_path = "/bin/sh" elif utils.on_win: shell_path = "bash" else: shell_path = "/bin/bash" def get_config(folder, variant=None): if not variant: variant = {} config = get_or_merge_config(None, variant) if cb_split_version >= (3, 20, 5): config_files = find_config_files(folder, config) else: config_files = find_config_files(folder) console.print(f"\nLoading config files: [green]{', '.join(config_files)}\n") parsed_cfg = collections.OrderedDict() for f in config_files: parsed_cfg[f] = parse_config_file(f, config) # TODO just using latest config here, should merge! if len(config_files): cbc = parsed_cfg[config_files[-1]] else: cbc = {} return cbc, config def normalize_subdir(subdir): if subdir == "noarch": subdir = context.subdir else: return subdir
import uuid from textwrap import dedent from jumpscale.loader import j from jumpscale.sals.chatflows.chatflows import GedisChatBot, chatflow_step, StopChatFlow from jumpscale.sals.reservation_chatflow import deployer, solutions, deployment_context, DeploymentFailed kinds = { "minio": solutions.list_minio_solutions, "kubernetes": solutions.list_kubernetes_solutions, "ubuntu": solutions.list_ubuntu_solutions, "flist": solutions.list_flist_solutions, "gitea": solutions.list_gitea_solutions, } ports = {"minio": 9000, "kubernetes": 6443, "gitea": 3000} class SolutionExpose(GedisChatBot): steps = [ "solution_type", "exposed_solution", "expose_type", "exposed_ports", "domain_selection", "reservation", "success", ] title = "Solution Expose" def _deployment_start(self): self.solution_id = uuid.uuid4().hex self.user_form_data = {} self.solution_metadata = {} self.email = self.user_info()["email"] self.username = self.user_info()["username"] self.threebot_name = j.data.text.removesuffix(self.username, ".3bot") @chatflow_step(title="Solution type") def solution_type(self): self.md_show_update("Initializing chatflow....") self._deployment_start() available_solutions = {} for kind in list(kinds.keys()): solutions = kinds[kind]() if solutions: available_solutions.update({kind: solutions}) if not available_solutions: raise StopChatFlow(f"You don't have any solutions to expose") self.kind = self.single_choice( "Please choose the solution type", list(available_solutions.keys()), required=True ) self.sols = {} for sol in available_solutions[self.kind]: name = sol["Name"] self.sols[name] = sol @chatflow_step(title="Solution to be exposed") def exposed_solution(self): self.solution_name = self.single_choice( "Please choose the solution to expose", list(self.sols.keys()), required=True ) self.solution = self.sols[self.solution_name] if self.kind == "kubernetes": self.pool_id = self.solution["Master Pool"] elif self.kind == "minio": self.pool_id = self.solution["Primary Pool"] else: self.pool_id = self.solution["Pool"] @chatflow_step(title="Expose Type") def expose_type(self): choices = ["TRC", "NGINX"] self.proxy_type = self.single_choice( "Select how you want to expose your solution (TRC forwards the traffic as is to the specified HTTP/HTTPS ports while NGINX reverse proxies the HTTP/HTTPS requests to an HTTP port)", choices, default="TRC", ) if self.proxy_type == "NGINX": force_https = self.single_choice("Do you want to force HTTPS?", ["YES", "NO"], default="NO") self.force_https = force_https == "YES" @chatflow_step(title="Ports") def exposed_ports(self): port = ports.get(self.kind) if self.proxy_type == "TRC": form = self.new_form() tlsport = form.int_ask("Which tls port you want to expose", default=port or 443, required=True, min=1) port = form.int_ask("Which port you want to expose", default=port or 80, required=True, min=1) form.ask() self.port = port.value self.tls_port = tlsport.value elif self.proxy_type == "NGINX": self.port = self.int_ask("Which port you want to expose", default=port or 80, required=True, min=1) if self.kind == "kubernetes": self.solution_ip = self.solution["Master IP"] elif self.kind == "minio": self.solution_ip = self.solution["Primary IPv4"] else: self.solution_ip = self.solution["IPv4 Address"] @chatflow_step(title="Domain") def domain_selection(self): # {"domain": {"gateway": gw, "pool": p}} gateways = deployer.list_all_gateways() if not gateways: raise StopChatFlow("There are no available gateways in the farms bound to your pools") # add managed domains gateway_id_dict = {} pool_id_dict = {} messages = {} for gw_dict in gateways.values(): gateway_id_dict[gw_dict["gateway"].node_id] = gw_dict["gateway"] pool_id_dict[gw_dict["pool"].pool_id] = gw_dict["pool"] for dom in gw_dict["gateway"].managed_domains: location_list = [ gw_dict["gateway"].location.continent, gw_dict["gateway"].location.country, gw_dict["gateway"].location.city, ] location = " - ".join([info for info in location_list if info and info != "Unknown"]) if location: location = f" Location: {location}" messages[f"Managed {dom}{location}"] = gw_dict # add delegate domains delegated_domains = solutions.list_delegated_domain_solutions() for dom in delegated_domains: if dom["Pool"] not in pool_id_dict: pool_id_dict[dom["Pool"]] = gateway_id_dict[dom["Gateway"]] gw_dict = {"gateway": gateway_id_dict[dom["Gateway"]], "pool": pool_id_dict[dom["Pool"]]} messages[f"Delegated {dom["Name"]}"] = gw_dict domain_ask_list = list(messages.keys()) # add custom_domain domain_ask_list.append("Custom Domain") chosen_domain = self.single_choice("Please choose the domain you wish to use", domain_ask_list, required=True) if chosen_domain != "Custom Domain": self.domain_gateway = messages[chosen_domain]["gateway"] self.domain_pool = messages[chosen_domain]["pool"] splits = chosen_domain.split() self.domain_type = splits[0] self.domain = splits[1] retry = False while True: domain = self.string_ask( f"Please specify the sub domain name you wish to bind to. will be (subdomain).{self.domain}", retry=retry, required=True, is_identifier=True, ) domain = j.sals.zos.get().gateway.correct_domain(domain) if "." in domain: retry = True self.md_show("You can't nest domains. please click next to try again.") else: if j.tools.dnstool.is_free(domain + "." + self.domain): break else: self.md_show(f"domain {domain + "." + self.domain} is not available.") self.domain = domain + "." + self.domain else: self.domain = self.string_ask("Please specify the domain name you wish to bind to:", required=True) self.domain = j.sals.zos.get().gateway.correct_domain(self.domain) self.domain_gateway, self.domain_pool = deployer.select_gateway(self) self.domain_type = "Custom Domain" res = """\ Please create a `CNAME` record in your DNS manager for domain: `{{domain}}` pointing to: {% for dns in gateway.dns_nameserver -%} - {{dns}} {% endfor %} """ res = j.tools.jinja2.render_template(template_text=res, gateway=self.domain_gateway, domain=self.domain) self.md_show(dedent(res), md=True) self.name_server = self.domain_gateway.dns_nameserver[0] self.secret = f"{j.core.identity.me.tid}:{uuid.uuid4().hex}" @chatflow_step(title="Reservation", disable_previous=True) @deployment_context() def reservation(self): metadata = {"name": self.domain, "form_info": {"Solution name": self.domain, "chatflow": "exposed"}} self.solution_metadata.update(metadata) query = {"mru": 1, "cru": 1, "sru": 1} self.selected_node = deployer.schedule_container(self.pool_id, **query) self.network_name = self.solution["Network"] result = deployer.add_network_node( self.network_name, self.selected_node, self.pool_id, bot=self, owner=self.solution_metadata.get("owner") ) if result: for wid in result["ids"]: success = deployer.wait_workload(wid, self, breaking_node_id=self.selected_node.node_id) if not success: raise DeploymentFailed(f"Failed to add node to network {wid}", wid=wid) self.network_view = deployer.get_network_view(self.network_name) self.tcprouter_ip = self.network_view.get_free_ip(self.selected_node) if not self.tcprouter_ip: raise StopChatFlow( f"No available ips one for network {self.network_view.name} node {self.selected_node.node_id}" ) if self.domain_type != "Custom Domain": self.dom_id = deployer.create_subdomain( pool_id=self.domain_pool.pool_id, gateway_id=self.domain_gateway.node_id, subdomain=self.domain, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.dom_id, self) if not success: raise DeploymentFailed( f"Failed to reserve sub-domain workload {self.dom_id}", solution_uuid=self.solution_id ) if self.proxy_type == "TRC": self.proxy_id = deployer.create_proxy( pool_id=self.domain_pool.pool_id, gateway_id=self.domain_gateway.node_id, domain_name=self.domain, trc_secret=self.secret, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.proxy_id, self) if not success: raise DeploymentFailed( f"Failed to reserve reverse proxy workload {self.proxy_id}", solution_uuid=self.solution_id ) trc_log_config = j.core.config.get("LOGGING_SINK", {}) if trc_log_config: trc_log_config["channel_name"] = f"{self.threebot_name}-{self.solution_name}-trc".lower() if self.proxy_type == "NGINX": self.tcprouter_id = deployer.expose_and_create_certificate( domain=self.domain, email=self.email, pool_id=self.pool_id, gateway_id=self.domain_gateway.node_id, network_name=self.network_name, solution_ip=self.solution_ip, solution_port=self.port, trc_secret=self.secret, bot=self, enforce_https=self.force_https, log_config=trc_log_config, **self.solution_metadata, solution_uuid=self.solution_id, ) else: self.tcprouter_id, _ = deployer.expose_address( pool_id=self.pool_id, gateway_id=self.domain_gateway.node_id, network_name=self.network_name, local_ip=self.solution_ip, port=self.port, tls_port=self.tls_port, trc_secret=self.secret, bot=self, log_config=trc_log_config, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.tcprouter_id, self) if not success: raise DeploymentFailed( f"Failed to reserve TCP Router container workload {self.tcprouter_id}", solution_uuid=self.solution_id, wid=self.tcprouter_id, ) def _determine_solution_protocol(self, timeout=60): def _get_protocol(): prots = ["https", "http"] for prot in prots: if j.sals.nettools.wait_http_test(f"{prot}://{self.domain}", 5, verify=False): return prot return None start_time = j.data.time.now() while (j.data.time.now() - start_time).seconds < timeout: if _get_protocol() is not None: return _get_protocol() return "https" @chatflow_step(title="Success", disable_previous=True, final_step=True) def success(self): protocol = self._determine_solution_protocol() message = f"""\ # Congratulations! Your solution has been exposed successfully: <br />\n - You can access it via the browser using: <a href="{protocol}://{self.domain}" target="_blank">{protocol}://{self.domain}</a> """ self.md_show(dedent(message), md=True) chat = SolutionExpose
import uuid from textwrap import dedent from jumpscale.loader import j from jumpscale.sals.chatflows.chatflows import GedisChatBot, chatflow_step, StopChatFlow from jumpscale.sals.reservation_chatflow import deployer, solutions, deployment_context, DeploymentFailed kinds = { "minio": solutions.list_minio_solutions, "kubernetes": solutions.list_kubernetes_solutions, "ubuntu": solutions.list_ubuntu_solutions, "flist": solutions.list_flist_solutions, "gitea": solutions.list_gitea_solutions, } ports = {"minio": 9000, "kubernetes": 6443, "gitea": 3000} class SolutionExpose(GedisChatBot): steps = [ "solution_type", "exposed_solution", "expose_type", "exposed_ports", "domain_selection", "reservation", "success", ] title = "Solution Expose" def _deployment_start(self): self.solution_id = uuid.uuid4().hex self.user_form_data = {} self.solution_metadata = {} self.email = self.user_info()["email"] self.username = self.user_info()["username"] self.threebot_name = j.data.text.removesuffix(self.username, ".3bot") @chatflow_step(title="Solution type") def solution_type(self): self.md_show_update("Initializing chatflow....") self._deployment_start() available_solutions = {} for kind in list(kinds.keys()): solutions = kinds[kind]() if solutions: available_solutions.update({kind: solutions}) if not available_solutions: raise StopChatFlow(f"You don't have any solutions to expose") self.kind = self.single_choice( "Please choose the solution type", list(available_solutions.keys()), required=True ) self.sols = {} for sol in available_solutions[self.kind]: name = sol["Name"] self.sols[name] = sol @chatflow_step(title="Solution to be exposed") def exposed_solution(self): self.solution_name = self.single_choice( "Please choose the solution to expose", list(self.sols.keys()), required=True ) self.solution = self.sols[self.solution_name] if self.kind == "kubernetes": self.pool_id = self.solution["Master Pool"] elif self.kind == "minio": self.pool_id = self.solution["Primary Pool"] else: self.pool_id = self.solution["Pool"] @chatflow_step(title="Expose Type") def expose_type(self): choices = ["TRC", "NGINX"] self.proxy_type = self.single_choice( "Select how you want to expose your solution (TRC forwards the traffic as is to the specified HTTP/HTTPS ports while NGINX reverse proxies the HTTP/HTTPS requests to an HTTP port)", choices, default="TRC", ) if self.proxy_type == "NGINX": force_https = self.single_choice("Do you want to force HTTPS?", ["YES", "NO"], default="NO") self.force_https = force_https == "YES" @chatflow_step(title="Ports") def exposed_ports(self): port = ports.get(self.kind) if self.proxy_type == "TRC": form = self.new_form() tlsport = form.int_ask("Which tls port you want to expose", default=port or 443, required=True, min=1) port = form.int_ask("Which port you want to expose", default=port or 80, required=True, min=1) form.ask() self.port = port.value self.tls_port = tlsport.value elif self.proxy_type == "NGINX": self.port = self.int_ask("Which port you want to expose", default=port or 80, required=True, min=1) if self.kind == "kubernetes": self.solution_ip = self.solution["Master IP"] elif self.kind == "minio": self.solution_ip = self.solution["Primary IPv4"] else: self.solution_ip = self.solution["IPv4 Address"] @chatflow_step(title="Domain") def domain_selection(self): # {"domain": {"gateway": gw, "pool": p}} gateways = deployer.list_all_gateways() if not gateways: raise StopChatFlow("There are no available gateways in the farms bound to your pools") # add managed domains gateway_id_dict = {} pool_id_dict = {} messages = {} for gw_dict in gateways.values(): gateway_id_dict[gw_dict["gateway"].node_id] = gw_dict["gateway"] pool_id_dict[gw_dict["pool"].pool_id] = gw_dict["pool"] for dom in gw_dict["gateway"].managed_domains: location_list = [ gw_dict["gateway"].location.continent, gw_dict["gateway"].location.country, gw_dict["gateway"].location.city, ] location = " - ".join([info for info in location_list if info and info != "Unknown"]) if location: location = f" Location: {location}" messages[f"Managed {dom}{location}"] = gw_dict # add delegate domains delegated_domains = solutions.list_delegated_domain_solutions() for dom in delegated_domains: if dom["Pool"] not in pool_id_dict: pool_id_dict[dom["Pool"]] = gateway_id_dict[dom["Gateway"]] gw_dict = {"gateway": gateway_id_dict[dom["Gateway"]], "pool": pool_id_dict[dom["Pool"]]} messages[f"Delegated {dom['Name']}"] = gw_dict domain_ask_list = list(messages.keys()) # add custom_domain domain_ask_list.append("Custom Domain") chosen_domain = self.single_choice("Please choose the domain you wish to use", domain_ask_list, required=True) if chosen_domain != "Custom Domain": self.domain_gateway = messages[chosen_domain]["gateway"] self.domain_pool = messages[chosen_domain]["pool"] splits = chosen_domain.split() self.domain_type = splits[0] self.domain = splits[1] retry = False while True: domain = self.string_ask( f"Please specify the sub domain name you wish to bind to. will be (subdomain).{self.domain}", retry=retry, required=True, is_identifier=True, ) domain = j.sals.zos.get().gateway.correct_domain(domain) if "." in domain: retry = True self.md_show("You can't nest domains. please click next to try again.") else: if j.tools.dnstool.is_free(domain + "." + self.domain): break else: self.md_show(f"domain {domain + '.' + self.domain} is not available.") self.domain = domain + "." + self.domain else: self.domain = self.string_ask("Please specify the domain name you wish to bind to:", required=True) self.domain = j.sals.zos.get().gateway.correct_domain(self.domain) self.domain_gateway, self.domain_pool = deployer.select_gateway(self) self.domain_type = "Custom Domain" res = """\ Please create a `CNAME` record in your DNS manager for domain: `{{domain}}` pointing to: {% for dns in gateway.dns_nameserver -%} - {{dns}} {% endfor %} """ res = j.tools.jinja2.render_template(template_text=res, gateway=self.domain_gateway, domain=self.domain) self.md_show(dedent(res), md=True) self.name_server = self.domain_gateway.dns_nameserver[0] self.secret = f"{j.core.identity.me.tid}:{uuid.uuid4().hex}" @chatflow_step(title="Reservation", disable_previous=True) @deployment_context() def reservation(self): metadata = {"name": self.domain, "form_info": {"Solution name": self.domain, "chatflow": "exposed"}} self.solution_metadata.update(metadata) query = {"mru": 1, "cru": 1, "sru": 1} self.selected_node = deployer.schedule_container(self.pool_id, **query) self.network_name = self.solution["Network"] result = deployer.add_network_node( self.network_name, self.selected_node, self.pool_id, bot=self, owner=self.solution_metadata.get("owner") ) if result: for wid in result["ids"]: success = deployer.wait_workload(wid, self, breaking_node_id=self.selected_node.node_id) if not success: raise DeploymentFailed(f"Failed to add node to network {wid}", wid=wid) self.network_view = deployer.get_network_view(self.network_name) self.tcprouter_ip = self.network_view.get_free_ip(self.selected_node) if not self.tcprouter_ip: raise StopChatFlow( f"No available ips one for network {self.network_view.name} node {self.selected_node.node_id}" ) if self.domain_type != "Custom Domain": self.dom_id = deployer.create_subdomain( pool_id=self.domain_pool.pool_id, gateway_id=self.domain_gateway.node_id, subdomain=self.domain, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.dom_id, self) if not success: raise DeploymentFailed( f"Failed to reserve sub-domain workload {self.dom_id}", solution_uuid=self.solution_id ) if self.proxy_type == "TRC": self.proxy_id = deployer.create_proxy( pool_id=self.domain_pool.pool_id, gateway_id=self.domain_gateway.node_id, domain_name=self.domain, trc_secret=self.secret, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.proxy_id, self) if not success: raise DeploymentFailed( f"Failed to reserve reverse proxy workload {self.proxy_id}", solution_uuid=self.solution_id ) trc_log_config = j.core.config.get("LOGGING_SINK", {}) if trc_log_config: trc_log_config["channel_name"] = f"{self.threebot_name}-{self.solution_name}-trc".lower() if self.proxy_type == "NGINX": self.tcprouter_id = deployer.expose_and_create_certificate( domain=self.domain, email=self.email, pool_id=self.pool_id, gateway_id=self.domain_gateway.node_id, network_name=self.network_name, solution_ip=self.solution_ip, solution_port=self.port, trc_secret=self.secret, bot=self, enforce_https=self.force_https, log_config=trc_log_config, **self.solution_metadata, solution_uuid=self.solution_id, ) else: self.tcprouter_id, _ = deployer.expose_address( pool_id=self.pool_id, gateway_id=self.domain_gateway.node_id, network_name=self.network_name, local_ip=self.solution_ip, port=self.port, tls_port=self.tls_port, trc_secret=self.secret, bot=self, log_config=trc_log_config, **self.solution_metadata, solution_uuid=self.solution_id, ) success = deployer.wait_workload(self.tcprouter_id, self) if not success: raise DeploymentFailed( f"Failed to reserve TCP Router container workload {self.tcprouter_id}", solution_uuid=self.solution_id, wid=self.tcprouter_id, ) def _determine_solution_protocol(self, timeout=60): def _get_protocol(): prots = ["https", "http"] for prot in prots: if j.sals.nettools.wait_http_test(f"{prot}://{self.domain}", 5, verify=False): return prot return None start_time = j.data.time.now() while (j.data.time.now() - start_time).seconds < timeout: if _get_protocol() is not None: return _get_protocol() return "https" @chatflow_step(title="Success", disable_previous=True, final_step=True) def success(self): protocol = self._determine_solution_protocol() message = f"""\ # Congratulations! Your solution has been exposed successfully: <br />\n - You can access it via the browser using: <a href="{protocol}://{self.domain}" target="_blank">{protocol}://{self.domain}</a> """ self.md_show(dedent(message), md=True) chat = SolutionExpose
"""utilities to handle markdown """ import re from d2lbook import common from typing import List, Dict import logging def split_markdown(source: str) -> List[Dict[str, str]]: """Split markdown into a list of text and code cells. A cell has three fields: 1. type: either code or markdown 2. class: code class or tab class 3. source: single string for the source """ cells: List[Dict] = [] in_code = False in_tab = False cur_code_mark = None cur_tag = None cur_src = [] def _add_cell(cur_src: List[str], cells: List[Dict]): if cur_src: src = '\n'.join(cur_src).strip() if in_code: cells.append({'type':'code', 'fence':cur_code_mark, 'class':cur_tag, 'source':src}) else: if not src and not cur_tag: return cells.append({'type':'markdown', 'source':src}) if cur_tag: cells[-1]['class'] = cur_tag for l in source.splitlines(): code = common.md_code_fence.match(l) tab = common.md_mark_pattern.match(l) if code: # code can be nested if in_tab or (in_code and code.groups()[0] != cur_code_mark): cur_src.append(l) else: _add_cell(cur_src, cells) cur_src = [] cur_code_mark, cur_tag = code.groups() in_code ^= True elif tab: begin = tab.groups()[0] == 'begin_tab' end = tab.groups()[0] == 'end_tab' if in_code or (not begin and not end): cur_src.append(l) else: _add_cell(cur_src, cells) cur_src = [] if begin: cur_tag = tab.groups()[1] else: cur_tag = None in_tab = begin else: cur_src.append(l) _add_cell(cur_src, cells) return cells def join_markdown_cells(cells: List[Dict]) -> str: """Join a list of cells into a markdown string""" src = [] for c in cells: cell_src = [] if c['type'] == 'markdown': if 'class' in c: cell_src.append(f':begin_tab:{c['class']}') cell_src.append(c['source']) if 'class' in c: if cell_src[-1].endswith('\n'): cell_src[-1] = cell_src[-1][:-1] cell_src.append(':end_tab:') else: cell_src += [c['fence']+c['class'], c['source'], c['fence']] src.append('\n'.join(cell_src).strip()) return '\n\n'.join(src)+'\n' basic_token = r'[\ \*-\/\\\._\w\d\:/]+' token = r'[\|\'\:\;\<\>\^\(\)\{\}\[\]\ \*-\/\\\.,_=\w\d]+' def _is_mark(lines): if isinstance(lines, str): lines = [lines] for l in lines: l = l.strip() if l: m = re.match(rf':{token}:(`{token}`)?', l) if m is None or m.span() != (0, len(l)): return False return True def _list(line, prev_prefix): m = re.match(r' *[-\*\+] *', line) or re.match(r' *[\d]+\. *', line) if m: if prev_prefix is not None and len(prev_prefix.split('__')) == 2: p = int(prev_prefix.split('__')[1]) + 1 else: p = 0 return m[0] + '__' + str(p) if prev_prefix == '': return '' if prev_prefix is not None and len( re.match(r' *', line)[0]) > len(re.match(r' *', prev_prefix)[0]): return prev_prefix return '' def split_text(text: str) -> List[Dict[str, str]]: """Split text into a list of paragraphs 1. type: text, list, image, title, equation, table 1. source: 1. prefix: 1. mark: """ # split into paragraphs lines = text.splitlines() groups = common.group_list(lines, lambda a, _: a.strip()=='') paras = ['\n'.join(item) for empty_line, item in groups if not empty_line] def _fallback(p, type): logging.warn(f'Wrong {type} format:\n'+p) cells.append({'type':'text', 'source':p}) cells = [] for p in paras: lines = p.splitlines() + [''] p += '\n' if p.startswith('#'): # parse title if not _is_mark(lines[1:]): _fallback(p, 'title') else: m = re.match(r'#+ *', lines[0]) cells.append( {'type':'title', 'prefix':m[0], 'source':lines[0][m.span()[1]:], 'mark':'\n'.join(lines[1:])}) elif p.startswith('$$'): # parse equations m = re.findall(r'\$\$', p) if len(m) != 2: _fallback(p, 'equation') else: cells.append( {'type':'equation', 'source':p}) elif p.startswith('!['): # parse images if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]): _fallback(p, 'image') else: cells.append( {'type':'image', 'source':p}) elif p.startswith('|'): # parse table for i, l in enumerate(lines): if not l.startswith('|'): break if not _is_mark(lines[i:]): _fallback(p, 'equation') else: cells.append( {'type':'table', 'source':p}) else: groups = common.group_list(lines, _list) for prefix, item in groups: if len(prefix.split('__')) == 2: prefix = prefix.split('__')[0] source = '\n'.join(item)[len(prefix):] if prefix == '': cells.append({'type':'text', 'source':source}) else: cells.append({'type':'list', 'prefix':prefix, 'source':source}) return cells def join_text(cells) -> str: paras = [] for cell in cells: l = cell['source'] if 'prefix' in cell: l = cell['prefix'] + l if 'mark' in cell: l += '\n' + cell['mark'] paras.append(l) return '\n'.join(paras)
"""utilities to handle markdown """ import re from d2lbook import common from typing import List, Dict import logging def split_markdown(source: str) -> List[Dict[str, str]]: """Split markdown into a list of text and code cells. A cell has three fields: 1. type: either code or markdown 2. class: code class or tab class 3. source: single string for the source """ cells: List[Dict] = [] in_code = False in_tab = False cur_code_mark = None cur_tag = None cur_src = [] def _add_cell(cur_src: List[str], cells: List[Dict]): if cur_src: src = '\n'.join(cur_src).strip() if in_code: cells.append({'type':'code', 'fence':cur_code_mark, 'class':cur_tag, 'source':src}) else: if not src and not cur_tag: return cells.append({'type':'markdown', 'source':src}) if cur_tag: cells[-1]['class'] = cur_tag for l in source.splitlines(): code = common.md_code_fence.match(l) tab = common.md_mark_pattern.match(l) if code: # code can be nested if in_tab or (in_code and code.groups()[0] != cur_code_mark): cur_src.append(l) else: _add_cell(cur_src, cells) cur_src = [] cur_code_mark, cur_tag = code.groups() in_code ^= True elif tab: begin = tab.groups()[0] == 'begin_tab' end = tab.groups()[0] == 'end_tab' if in_code or (not begin and not end): cur_src.append(l) else: _add_cell(cur_src, cells) cur_src = [] if begin: cur_tag = tab.groups()[1] else: cur_tag = None in_tab = begin else: cur_src.append(l) _add_cell(cur_src, cells) return cells def join_markdown_cells(cells: List[Dict]) -> str: """Join a list of cells into a markdown string""" src = [] for c in cells: cell_src = [] if c['type'] == 'markdown': if 'class' in c: cell_src.append(f':begin_tab:{c["class"]}') cell_src.append(c['source']) if 'class' in c: if cell_src[-1].endswith('\n'): cell_src[-1] = cell_src[-1][:-1] cell_src.append(':end_tab:') else: cell_src += [c['fence']+c['class'], c['source'], c['fence']] src.append('\n'.join(cell_src).strip()) return '\n\n'.join(src)+'\n' basic_token = r'[\ \*-\/\\\._\w\d\:/]+' token = r'[\|\'\:\;\<\>\^\(\)\{\}\[\]\ \*-\/\\\.,_=\w\d]+' def _is_mark(lines): if isinstance(lines, str): lines = [lines] for l in lines: l = l.strip() if l: m = re.match(rf':{token}:(`{token}`)?', l) if m is None or m.span() != (0, len(l)): return False return True def _list(line, prev_prefix): m = re.match(r' *[-\*\+] *', line) or re.match(r' *[\d]+\. *', line) if m: if prev_prefix is not None and len(prev_prefix.split('__')) == 2: p = int(prev_prefix.split('__')[1]) + 1 else: p = 0 return m[0] + '__' + str(p) if prev_prefix == '': return '' if prev_prefix is not None and len( re.match(r' *', line)[0]) > len(re.match(r' *', prev_prefix)[0]): return prev_prefix return '' def split_text(text: str) -> List[Dict[str, str]]: """Split text into a list of paragraphs 1. type: text, list, image, title, equation, table 1. source: 1. prefix: 1. mark: """ # split into paragraphs lines = text.splitlines() groups = common.group_list(lines, lambda a, _: a.strip()=='') paras = ['\n'.join(item) for empty_line, item in groups if not empty_line] def _fallback(p, type): logging.warn(f'Wrong {type} format:\n'+p) cells.append({'type':'text', 'source':p}) cells = [] for p in paras: lines = p.splitlines() + [''] p += '\n' if p.startswith('#'): # parse title if not _is_mark(lines[1:]): _fallback(p, 'title') else: m = re.match(r'#+ *', lines[0]) cells.append( {'type':'title', 'prefix':m[0], 'source':lines[0][m.span()[1]:], 'mark':'\n'.join(lines[1:])}) elif p.startswith('$$'): # parse equations m = re.findall(r'\$\$', p) if len(m) != 2: _fallback(p, 'equation') else: cells.append( {'type':'equation', 'source':p}) elif p.startswith('!['): # parse images if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]): _fallback(p, 'image') else: cells.append( {'type':'image', 'source':p}) elif p.startswith('|'): # parse table for i, l in enumerate(lines): if not l.startswith('|'): break if not _is_mark(lines[i:]): _fallback(p, 'equation') else: cells.append( {'type':'table', 'source':p}) else: groups = common.group_list(lines, _list) for prefix, item in groups: if len(prefix.split('__')) == 2: prefix = prefix.split('__')[0] source = '\n'.join(item)[len(prefix):] if prefix == '': cells.append({'type':'text', 'source':source}) else: cells.append({'type':'list', 'prefix':prefix, 'source':source}) return cells def join_text(cells) -> str: paras = [] for cell in cells: l = cell['source'] if 'prefix' in cell: l = cell['prefix'] + l if 'mark' in cell: l += '\n' + cell['mark'] paras.append(l) return '\n'.join(paras)
# encoding: utf-8 """ @author: l1aoxingyu @contact: sherlockliao01@gmail.com """ import os import shutil import time import random import numpy as np import torch import torch.nn.functional as F from torch import nn # Changed by Xinchen Liu from data import get_dataloader_mask from data.datasets.eval_reid import evaluate from data.prefetcher import data_prefetcher_mask from modeling import build_model_selfgcn from modeling.losses import TripletLoss from solver.build import make_lr_scheduler, make_optimizer from utils.meters import AverageMeter def L_Matrix(adj_npy, adj_size): D =np.zeros((adj_size, adj_size)) for i in range(adj_size): tmp = adj_npy[i,:] count = np.sum(tmp==1) if count>0: number = count ** (-1/2) D[i,i] = number x = np.matmul(D,adj_npy) L = np.matmul(x,D) return L coarse_adj_list = [ # 1 2 3 4 5 6 7 8 9 [ 1, 1, 0, 1, 0, 1, 0, 1, 0], #1 [ 1, 1, 1, 1, 0, 1, 0, 0, 0], #2 [ 0, 1, 1, 0, 1, 0, 1, 0, 0], #3 [ 1, 1, 0, 1, 1, 0, 0, 1, 0], #4 [ 0, 0, 1, 1, 1, 0, 0, 0, 1], #5 [ 1, 1, 0, 0, 0, 1, 1, 1, 0], #6 [ 0, 0, 1, 0, 0, 1, 1, 0, 1], #7 [ 1, 0, 0, 1, 0, 1, 0, 1, 1], #8 [ 0, 0, 0, 0, 1, 0, 1, 1, 1] #9 ] coarse_adj_npy = np.array(coarse_adj_list) coarse_adj_npy = L_Matrix(coarse_adj_npy, len(coarse_adj_npy)) class ReidSystem(): def __init__(self, cfg, logger, writer): self.cfg, self.logger, self.writer = cfg, logger, writer # Define dataloader self.tng_dataloader, self.val_dataloader_collection, self.num_classes, self.num_query_len_collection = get_dataloader_mask(cfg) # networks self.use_part_erasing = False self.num_parts = cfg.MODEL.NUM_PARTS self.model = build_model_selfgcn(cfg, self.num_classes) self.adj = torch.from_numpy(coarse_adj_npy).float() # loss function self.ce_loss = nn.CrossEntropyLoss() self.triplet = TripletLoss(cfg.SOLVER.MARGIN) self.mse_loss = nn.MSELoss() # optimizer and scheduler self.opt = make_optimizer(self.cfg, self.model) self.lr_sched = make_lr_scheduler(self.cfg, self.opt) self.loss_weight = [1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.4] self.logger.info(f"Loss weights: {self.loss_weight}, use_pe: {self.use_part_erasing}, use_bnfeat: {True}") self._construct() def _construct(self): self.global_step = 0 self.current_epoch = 0 self.batch_nb = 0 self.max_epochs = self.cfg.SOLVER.MAX_EPOCHS self.log_interval = self.cfg.SOLVER.LOG_INTERVAL self.eval_period = self.cfg.SOLVER.EVAL_PERIOD self.use_dp = False self.use_ddp = False def loss_fns(self, outputs, labels_global, labels_gcn): loss_dict = {} if 'softmax' in list(self.cfg.SOLVER.LOSSTYPE): loss_dict['ce_g'] = self.ce_loss(outputs[0], labels_global)*self.loss_weight[0] loss_dict['ce_l1'] = self.ce_loss(outputs[2], labels_gcn)*self.loss_weight[2] loss_dict['ce_l2'] = self.ce_loss(outputs[4], labels_gcn)*self.loss_weight[4] if 'triplet' in list(self.cfg.SOLVER.LOSSTYPE): loss_dict['tr_g'] = self.triplet(outputs[1], labels_global)[0]*self.loss_weight[1] loss_dict['tr_l1'] = self.triplet(outputs[3], labels_gcn)[0]*self.loss_weight[3] loss_dict['tr_l2'] = self.triplet(outputs[5], labels_gcn)[0]*self.loss_weight[5] # target_gcn_feat = outputs[6].clone().detach().requires_grad_(False) # loss_dict['mse'] = self.mse_loss(target_gcn_feat, outputs[7])*self.loss_weight[6] loss_dict['mse'] = self.mse_loss(outputs[6], outputs[7])*self.loss_weight[6] return loss_dict def on_train_begin(self): self.best_mAP = -np.inf self.running_loss = AverageMeter() log_save_dir = os.path.join(self.cfg.OUTPUT_DIR, '-'.join(self.cfg.DATASETS.TEST_NAMES), self.cfg.MODEL.VERSION) self.model_save_dir = os.path.join(log_save_dir, 'ckpts') if not os.path.exists(self.model_save_dir): os.makedirs(self.model_save_dir) self.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',') self.use_dp = (len(self.gpus) > 0) and (self.cfg.MODEL.DIST_BACKEND == 'dp') if self.use_dp: self.model = nn.DataParallel(self.model) self.model = self.model.cuda() self.model.train() self.adj = self.adj.cuda() def on_epoch_begin(self): self.batch_nb = 0 self.current_epoch += 1 self.t0 = time.time() self.running_loss.reset() self.tng_prefetcher = data_prefetcher_mask(self.tng_dataloader) def training_step(self, batch): inputs, masks, labels, _ = batch adj_batch = self.adj.repeat(inputs.size(0), 1, 1) inputs_global = inputs inputs_selfgcn = inputs labels_global = labels labels_selfgcn = labels if self.use_part_erasing: # inputs_masked = torch.zeros(inputs.size(), dtype=inputs.dtype) # random part erasing for i in range(inputs.size(0)): input = inputs[i] mask = masks[i] part_list = [] for c in range(1, self.num_parts): part = (mask.long() == c) if part.any(): part_list.append(c) drop_part = random.choice(part_list) mask = (mask.long() != drop_part) if random.uniform(0, 1) > 0.5: inputs_selfgcn[i] = mask.float()*input # inputs_masked[i] = mask.float()*input # inputs_masked = inputs_masked.cuda() # inputs_global = torch.cat([inputs, inputs_masked], dim=0) # labels_global = torch.cat([labels, labels], dim=0) outputs = self.model(inputs_global, inputs_selfgcn, masks, adj_batch) loss_dict = self.loss_fns(outputs, labels_global, labels_selfgcn) total_loss = 0 print_str = f'\r Epoch {self.current_epoch} Iter {self.batch_nb}/{len(self.tng_dataloader)} ' for loss_name, loss_value in loss_dict.items(): total_loss += loss_value print_str += (loss_name+f': {loss_value.item():.3f} ') loss_dict['total_loss'] = total_loss.item() print_str += f'Total loss: {total_loss.item():.3f} ' print(print_str, end=' ') if (self.global_step+1) % self.log_interval == 0: for loss_name, loss_value in loss_dict.items(): self.writer.add_scalar(loss_name, loss_value, self.global_step) self.writer.add_scalar('total_loss', loss_dict['total_loss'], self.global_step) self.running_loss.update(total_loss.item()) self.opt.zero_grad() total_loss.backward() self.opt.step() self.global_step += 1 self.batch_nb += 1 def on_epoch_end(self): elapsed = time.time() - self.t0 mins = int(elapsed) // 60 seconds = int(elapsed - mins * 60) print('') self.logger.info(f'Epoch {self.current_epoch} Total loss: {self.running_loss.avg:.3f} ' f'lr: {self.opt.param_groups[0]['lr']:.2e} During {mins:d}min:{seconds:d}s') # update learning rate self.lr_sched.step() def test(self): # convert to eval mode self.model.eval() metric_dict = list() for val_dataset_name, val_dataloader, num_query in zip(self.cfg.DATASETS.TEST_NAMES, self.val_dataloader_collection, self.num_query_len_collection): feats, pids, camids = [], [], [] val_prefetcher = data_prefetcher_mask(val_dataloader) batch = val_prefetcher.next() while batch[0] is not None: img, mask, pid, camid = batch adj_batch = self.adj.repeat(img.size(0), 1, 1) with torch.no_grad(): output = self.model(img, img, mask, adj_batch) # feat = output[1] feat = torch.cat([output[1], output[3]], dim=1) feats.append(feat) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = val_prefetcher.next() feats = torch.cat(feats, dim=0) if self.cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) # query qf = feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # m, n = qf.shape[0], gf.shape[0] distmat = torch.mm(qf, gf.t()).cpu().numpy() # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(1, -2, qf, gf.t()) # distmat = distmat.numpy() cmc, mAP = evaluate(-distmat, q_pids, g_pids, q_camids, g_camids) self.logger.info(f"Test Results on {val_dataset_name} - Epoch: {self.current_epoch}") self.logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: self.logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") self.writer.add_scalar('rank1', cmc[0], self.global_step) self.writer.add_scalar('mAP', mAP, self.global_step) metric_dict.append({'rank1': cmc[0], 'mAP': mAP}) # convert to train mode self.model.train() return metric_dict[0] def train(self): self.on_train_begin() # self.test() for epoch in range(self.max_epochs): self.on_epoch_begin() batch = self.tng_prefetcher.next() while batch[0] is not None: self.training_step(batch) batch = self.tng_prefetcher.next() self.on_epoch_end() if (epoch+1) % self.eval_period == 0: metric_dict = self.test() if metric_dict['mAP'] > self.best_mAP: is_best = True self.best_mAP = metric_dict['mAP'] else: is_best = False self.save_checkpoints(is_best) torch.cuda.empty_cache() def save_checkpoints(self, is_best): if self.use_dp: state_dict = self.model.module.state_dict() else: state_dict = self.model.state_dict() # TODO: add optimizer state dict and lr scheduler filepath = os.path.join(self.model_save_dir, f'model_epoch{self.current_epoch}.pth') torch.save(state_dict, filepath) if is_best: best_filepath = os.path.join(self.model_save_dir, 'model_best.pth') shutil.copyfile(filepath, best_filepath)
# encoding: utf-8 """ @author: l1aoxingyu @contact: sherlockliao01@gmail.com """ import os import shutil import time import random import numpy as np import torch import torch.nn.functional as F from torch import nn # Changed by Xinchen Liu from data import get_dataloader_mask from data.datasets.eval_reid import evaluate from data.prefetcher import data_prefetcher_mask from modeling import build_model_selfgcn from modeling.losses import TripletLoss from solver.build import make_lr_scheduler, make_optimizer from utils.meters import AverageMeter def L_Matrix(adj_npy, adj_size): D =np.zeros((adj_size, adj_size)) for i in range(adj_size): tmp = adj_npy[i,:] count = np.sum(tmp==1) if count>0: number = count ** (-1/2) D[i,i] = number x = np.matmul(D,adj_npy) L = np.matmul(x,D) return L coarse_adj_list = [ # 1 2 3 4 5 6 7 8 9 [ 1, 1, 0, 1, 0, 1, 0, 1, 0], #1 [ 1, 1, 1, 1, 0, 1, 0, 0, 0], #2 [ 0, 1, 1, 0, 1, 0, 1, 0, 0], #3 [ 1, 1, 0, 1, 1, 0, 0, 1, 0], #4 [ 0, 0, 1, 1, 1, 0, 0, 0, 1], #5 [ 1, 1, 0, 0, 0, 1, 1, 1, 0], #6 [ 0, 0, 1, 0, 0, 1, 1, 0, 1], #7 [ 1, 0, 0, 1, 0, 1, 0, 1, 1], #8 [ 0, 0, 0, 0, 1, 0, 1, 1, 1] #9 ] coarse_adj_npy = np.array(coarse_adj_list) coarse_adj_npy = L_Matrix(coarse_adj_npy, len(coarse_adj_npy)) class ReidSystem(): def __init__(self, cfg, logger, writer): self.cfg, self.logger, self.writer = cfg, logger, writer # Define dataloader self.tng_dataloader, self.val_dataloader_collection, self.num_classes, self.num_query_len_collection = get_dataloader_mask(cfg) # networks self.use_part_erasing = False self.num_parts = cfg.MODEL.NUM_PARTS self.model = build_model_selfgcn(cfg, self.num_classes) self.adj = torch.from_numpy(coarse_adj_npy).float() # loss function self.ce_loss = nn.CrossEntropyLoss() self.triplet = TripletLoss(cfg.SOLVER.MARGIN) self.mse_loss = nn.MSELoss() # optimizer and scheduler self.opt = make_optimizer(self.cfg, self.model) self.lr_sched = make_lr_scheduler(self.cfg, self.opt) self.loss_weight = [1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.4] self.logger.info(f"Loss weights: {self.loss_weight}, use_pe: {self.use_part_erasing}, use_bnfeat: {True}") self._construct() def _construct(self): self.global_step = 0 self.current_epoch = 0 self.batch_nb = 0 self.max_epochs = self.cfg.SOLVER.MAX_EPOCHS self.log_interval = self.cfg.SOLVER.LOG_INTERVAL self.eval_period = self.cfg.SOLVER.EVAL_PERIOD self.use_dp = False self.use_ddp = False def loss_fns(self, outputs, labels_global, labels_gcn): loss_dict = {} if 'softmax' in list(self.cfg.SOLVER.LOSSTYPE): loss_dict['ce_g'] = self.ce_loss(outputs[0], labels_global)*self.loss_weight[0] loss_dict['ce_l1'] = self.ce_loss(outputs[2], labels_gcn)*self.loss_weight[2] loss_dict['ce_l2'] = self.ce_loss(outputs[4], labels_gcn)*self.loss_weight[4] if 'triplet' in list(self.cfg.SOLVER.LOSSTYPE): loss_dict['tr_g'] = self.triplet(outputs[1], labels_global)[0]*self.loss_weight[1] loss_dict['tr_l1'] = self.triplet(outputs[3], labels_gcn)[0]*self.loss_weight[3] loss_dict['tr_l2'] = self.triplet(outputs[5], labels_gcn)[0]*self.loss_weight[5] # target_gcn_feat = outputs[6].clone().detach().requires_grad_(False) # loss_dict['mse'] = self.mse_loss(target_gcn_feat, outputs[7])*self.loss_weight[6] loss_dict['mse'] = self.mse_loss(outputs[6], outputs[7])*self.loss_weight[6] return loss_dict def on_train_begin(self): self.best_mAP = -np.inf self.running_loss = AverageMeter() log_save_dir = os.path.join(self.cfg.OUTPUT_DIR, '-'.join(self.cfg.DATASETS.TEST_NAMES), self.cfg.MODEL.VERSION) self.model_save_dir = os.path.join(log_save_dir, 'ckpts') if not os.path.exists(self.model_save_dir): os.makedirs(self.model_save_dir) self.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',') self.use_dp = (len(self.gpus) > 0) and (self.cfg.MODEL.DIST_BACKEND == 'dp') if self.use_dp: self.model = nn.DataParallel(self.model) self.model = self.model.cuda() self.model.train() self.adj = self.adj.cuda() def on_epoch_begin(self): self.batch_nb = 0 self.current_epoch += 1 self.t0 = time.time() self.running_loss.reset() self.tng_prefetcher = data_prefetcher_mask(self.tng_dataloader) def training_step(self, batch): inputs, masks, labels, _ = batch adj_batch = self.adj.repeat(inputs.size(0), 1, 1) inputs_global = inputs inputs_selfgcn = inputs labels_global = labels labels_selfgcn = labels if self.use_part_erasing: # inputs_masked = torch.zeros(inputs.size(), dtype=inputs.dtype) # random part erasing for i in range(inputs.size(0)): input = inputs[i] mask = masks[i] part_list = [] for c in range(1, self.num_parts): part = (mask.long() == c) if part.any(): part_list.append(c) drop_part = random.choice(part_list) mask = (mask.long() != drop_part) if random.uniform(0, 1) > 0.5: inputs_selfgcn[i] = mask.float()*input # inputs_masked[i] = mask.float()*input # inputs_masked = inputs_masked.cuda() # inputs_global = torch.cat([inputs, inputs_masked], dim=0) # labels_global = torch.cat([labels, labels], dim=0) outputs = self.model(inputs_global, inputs_selfgcn, masks, adj_batch) loss_dict = self.loss_fns(outputs, labels_global, labels_selfgcn) total_loss = 0 print_str = f'\r Epoch {self.current_epoch} Iter {self.batch_nb}/{len(self.tng_dataloader)} ' for loss_name, loss_value in loss_dict.items(): total_loss += loss_value print_str += (loss_name+f': {loss_value.item():.3f} ') loss_dict['total_loss'] = total_loss.item() print_str += f'Total loss: {total_loss.item():.3f} ' print(print_str, end=' ') if (self.global_step+1) % self.log_interval == 0: for loss_name, loss_value in loss_dict.items(): self.writer.add_scalar(loss_name, loss_value, self.global_step) self.writer.add_scalar('total_loss', loss_dict['total_loss'], self.global_step) self.running_loss.update(total_loss.item()) self.opt.zero_grad() total_loss.backward() self.opt.step() self.global_step += 1 self.batch_nb += 1 def on_epoch_end(self): elapsed = time.time() - self.t0 mins = int(elapsed) // 60 seconds = int(elapsed - mins * 60) print('') self.logger.info(f'Epoch {self.current_epoch} Total loss: {self.running_loss.avg:.3f} ' f'lr: {self.opt.param_groups[0]["lr"]:.2e} During {mins:d}min:{seconds:d}s') # update learning rate self.lr_sched.step() def test(self): # convert to eval mode self.model.eval() metric_dict = list() for val_dataset_name, val_dataloader, num_query in zip(self.cfg.DATASETS.TEST_NAMES, self.val_dataloader_collection, self.num_query_len_collection): feats, pids, camids = [], [], [] val_prefetcher = data_prefetcher_mask(val_dataloader) batch = val_prefetcher.next() while batch[0] is not None: img, mask, pid, camid = batch adj_batch = self.adj.repeat(img.size(0), 1, 1) with torch.no_grad(): output = self.model(img, img, mask, adj_batch) # feat = output[1] feat = torch.cat([output[1], output[3]], dim=1) feats.append(feat) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = val_prefetcher.next() feats = torch.cat(feats, dim=0) if self.cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) # query qf = feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # m, n = qf.shape[0], gf.shape[0] distmat = torch.mm(qf, gf.t()).cpu().numpy() # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(1, -2, qf, gf.t()) # distmat = distmat.numpy() cmc, mAP = evaluate(-distmat, q_pids, g_pids, q_camids, g_camids) self.logger.info(f"Test Results on {val_dataset_name} - Epoch: {self.current_epoch}") self.logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: self.logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") self.writer.add_scalar('rank1', cmc[0], self.global_step) self.writer.add_scalar('mAP', mAP, self.global_step) metric_dict.append({'rank1': cmc[0], 'mAP': mAP}) # convert to train mode self.model.train() return metric_dict[0] def train(self): self.on_train_begin() # self.test() for epoch in range(self.max_epochs): self.on_epoch_begin() batch = self.tng_prefetcher.next() while batch[0] is not None: self.training_step(batch) batch = self.tng_prefetcher.next() self.on_epoch_end() if (epoch+1) % self.eval_period == 0: metric_dict = self.test() if metric_dict['mAP'] > self.best_mAP: is_best = True self.best_mAP = metric_dict['mAP'] else: is_best = False self.save_checkpoints(is_best) torch.cuda.empty_cache() def save_checkpoints(self, is_best): if self.use_dp: state_dict = self.model.module.state_dict() else: state_dict = self.model.state_dict() # TODO: add optimizer state dict and lr scheduler filepath = os.path.join(self.model_save_dir, f'model_epoch{self.current_epoch}.pth') torch.save(state_dict, filepath) if is_best: best_filepath = os.path.join(self.model_save_dir, 'model_best.pth') shutil.copyfile(filepath, best_filepath)
import tkinter as tk import threading import time import asyncio from notifications_provider import NotificationsProvider from load_configuration import getConfig, updateValue from overlay_components import * fg_colour = '#FEFEFE' #Transparency will only work on windows class App(): """In charge of displaying information to the user and getting input from the user""" def __init__(self, size='standard', main=None): self.inspector = None self.setupWindow(size=size, main=main) def setupWindow(self, size='standard', main=None): if not size in presets['size'].keys(): raise ValueError(f"Did not recognise size: {size}. Expected one of: {presets["size"].keys()}") else: self._size = size self.gatherable_labels = {} #Root window: self._window = tk.Tk() self._window.configure(background="white") self._window.overrideredirect(True) #Makes the window borderless self._window.wm_attributes("-topmost", True) #Window is always on top self._window.wm_attributes("-transparentcolor", "white") #Where there was once white there is now transparency #Primary display frame self._root = tk.Frame(self._window, bg='white') self._root.grid(sticky='nw') #Options Panel self._options_labels = [] self._toggle_panel_button_image = tk.PhotoImage(file=presets['size'][self._size]['mainButton']) self._settings_button_image = tk.PhotoImage(file='../res/black_dot_32.png') #TODO Temporary self._settings = Settings(self, main=main) self._options_panel = tk.Frame(self._root, bg='white') self._options_panel.grid(row=0, column=0, sticky='nw') self._options_panel_removed = True self._toggle_panel_button = tk.Label(self._options_panel, image=self._toggle_panel_button_image, bg='white', borderwidth=0) self._toggle_panel_button.grid(row=0, column=0, sticky='nw') self._toggle_panel_button.bind('<Button-1>', self.toggleOptionsPanel) self._toggle_panel_button.bind('<Enter>', self.hover) self._toggle_panel_button.bind('<Leave>', self.unhover) self._toggle_panel_button_padding = tk.Label(self._options_panel, height=1, font=('Helvetica', 8), bg='white') self._toggle_panel_button_padding.grid(row=1, column=0) self._settings_button = tk.Label(self._options_panel, image=self._settings_button_image) self._settings_button.bind('<Button-1>', self._settings.showSettings) self._options_labels.append(self._settings_button) self._settings_button.grid(row=0, column=1, rowspan=2, sticky='w') self._settings_button.grid_remove() def mainloop(self): self._root.mainloop() def destroyWindow(self): self._window.destroy() def freezeWindow(self): self._window.wm_attributes("-disabled", True) def unfreezeWindow(self): self._window.wm_attributes("-disabled", False) def hover(self, event): print("Main button moused over") #TODO Will change main button image def unhover(self, event): print("Mouse moved off of main button") #TODO Will change main button image def toggleOptionsPanel(self, event): """Displays the options panel if it is currently hidden, hides it if it is currently visible""" if self._options_panel_removed: self._options_panel_removed = False for i in range(len(self._options_labels)): self._options_labels[i].grid(row=0,column=i+1) else: self._options_panel_removed = True for l in self._options_labels: l.grid_remove() def showInspector(self, label): """Displays the label as inspector. Hides the previous inspector if one was already shown""" self.hideInspector() print("Showing: " + str(label)) self.inspector = label self.inspector.grid(row=0, column=1) def hideInspector(self): """Hides the currently displayed inspector if one is displayed""" try: self.inspector.grid_remove() self.inspector = None except AttributeError as e:#If self.inspector does not exist or has already been destroyed it will be None print(f"Unable to destroy inspector: {repr(e)}") if self.inspector is not None: raise e def setGatherableLabels(self, *args:(str, tk.Label)): """Empties the gatherableLabels dictionary, adds the provided labels to it, then displays those labels""" self.gatherable_labels = {k:v for k,v in args} print(self.gatherable_labels) self.redrawGatherableLabels() def removeAllGatherableLabels(self): """Destroys all labels present in the gatherableLabels dictionary then empties the gatherableLabels dictionary""" for l in self.gatherable_labels.values(): l.destroy() self.gatherable_labels = {} def redrawGatherableLabels(self): """Displays all labels in gatherableLabels""" i = 2 for l in self.gatherable_labels.values(): l.grid(row=i, column=0, columnspan=10, sticky='nw') i+=1 async def removeGatherableLabel(self, key): """"Removes the gatherable label named 'key' from the dictionary and destroys it""" self.gatherable_labels[key].destroy() self.gatherable_labels.pop(key) async def addGatherableLabel(self, name, item_info): """Creates an InspectableLabel with an associated panel providing additional information (inspectPanel), adds it to the list of displayed gatherable labels, then redraws the list of gatherable labels""" INSPECT_PANEL_BG_COLOUR = '#6F7066'#Background colour for the Inspect Panel inspect_panel = tk.Frame(self._window, bg=INSPECT_PANEL_BG_COLOUR) grid_number = 0 #Iterator for column Number if item_info['itemValues']: location_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Location: {item_info["itemValues"]["map"]} ({item_info["itemValues"]["x"]}, {item_info["itemValues"]["y"]})") location_label.grid(row=grid_number, sticky='w') grid_number+=1 if item_info['spawnTime']: spawn_time_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Spawn Time: {item_info["spawnTime"]}:00")#Will need changing if something ever spawns not on the hour spawn_time_label.grid(row=grid_number, sticky='w') grid_number+=1 if item_info['despawnTime']: despawn_time_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Despawn Time: {item_info["despawnTime"]}:00") despawn_time_label.grid(row=grid_number, sticky='w') grid_number+=1 #TODO priceOnEachServerAndLastUpdateTime #TODO buttonToOpenInGamerescape label = InspectableLabel(self, self._root, inspect_panel, text=f"{name} | {item_info["price"]}gil", font=('Helvetica', presets['size'][self._size]['font-size']), bg='#565356', fg=fg_colour) self.gatherable_labels[name] = label self.redrawGatherableLabels() class Main(): """Sets up the App object and provides it with the information it needs to display to the user""" def start(self): """Starts the application""" self.gatherable_labels = {} self.config_values = getConfig() self.app = App(size=self.config_values['general']['size'], main=self) self.notifications_provider_thread = threading.Thread(target = self.setupNotificationsProvider) self.notifications_provider_thread.start() self.app.mainloop() def restart(self): """Checks if the settings have been changed. If they have, the new settings are applied""" new_config_values = getConfig() if new_config_values == self.config_values: return #Don't want to waste time restarting the program if none of the settings changed #Don't necessarily need to restart the program if the datacenter changed but the size didn't, but this seems easier to code and it's fast enough self.app.destroyWindow() self.config_values['general']['size'] = new_config_values['general']['size'] self.app.setupWindow(size=self.config_values['general']['size'], main=self) if new_config_values['general']['datacenter'] != self.config_values['general']['datacenter']: self.gatherable_labels = {} self.notifications_provider.stopGatherAlerts() while not self.notifications_provider_thread.isAlive(): time.sleep(0.25) #TODO there must be a better way of doing this self.config_values['general']['datacenter'] = new_config_values['general']['datacenter'] self.notifications_provider_thread = threading.Thread(target = self.setupNotificationsProvider) self.notifications_provider_thread.start() asyncio.run(self.redrawLabels()) self.app.mainloop() def setupNotificationsProvider(self): """ Creates and runs a NotificationsProvider object. \n (Should always be run in a seperate thread because it will not stop on its own) """ self.notifications_provider = NotificationsProvider(gatherable_items_location, self.config_values['general']['datacenter'], self.nodeSpawn, self.nodeDespawn) self.notifications_provider.beginGatherAlerts() async def nodeSpawn(self, name=None, price=None, item_values=None, spawn_time=None, despawn_time=None, market_data=None): """Records the information about this node spawn and then tells the app to display a label for it""" self.gatherable_labels[name] = {'price':price,'itemValues':item_values,'spawnTime':spawn_time,'despawnTime':despawn_time,'marketData':market_data} await self.app.addGatherableLabel(name, self.gatherable_labels[name]) async def nodeDespawn(self, name=None): """Removes the information about this node spawn and tells the app to remove the label for it""" self.gatherable_labels.pop(name) await self.app.removeGatherableLabel(name) async def redrawLabels(self): """Tells the app to create labels for all node spawns that this object has information for""" for key in self.gatherable_labels.keys(): await self.app.addGatherableLabel(key, self.gatherable_labels[key]) if __name__ == "__main__": main = Main() main.start()
import tkinter as tk import threading import time import asyncio from notifications_provider import NotificationsProvider from load_configuration import getConfig, updateValue from overlay_components import * fg_colour = '#FEFEFE' #Transparency will only work on windows class App(): """In charge of displaying information to the user and getting input from the user""" def __init__(self, size='standard', main=None): self.inspector = None self.setupWindow(size=size, main=main) def setupWindow(self, size='standard', main=None): if not size in presets['size'].keys(): raise ValueError(f"Did not recognise size: {size}. Expected one of: {presets['size'].keys()}") else: self._size = size self.gatherable_labels = {} #Root window: self._window = tk.Tk() self._window.configure(background="white") self._window.overrideredirect(True) #Makes the window borderless self._window.wm_attributes("-topmost", True) #Window is always on top self._window.wm_attributes("-transparentcolor", "white") #Where there was once white there is now transparency #Primary display frame self._root = tk.Frame(self._window, bg='white') self._root.grid(sticky='nw') #Options Panel self._options_labels = [] self._toggle_panel_button_image = tk.PhotoImage(file=presets['size'][self._size]['mainButton']) self._settings_button_image = tk.PhotoImage(file='../res/black_dot_32.png') #TODO Temporary self._settings = Settings(self, main=main) self._options_panel = tk.Frame(self._root, bg='white') self._options_panel.grid(row=0, column=0, sticky='nw') self._options_panel_removed = True self._toggle_panel_button = tk.Label(self._options_panel, image=self._toggle_panel_button_image, bg='white', borderwidth=0) self._toggle_panel_button.grid(row=0, column=0, sticky='nw') self._toggle_panel_button.bind('<Button-1>', self.toggleOptionsPanel) self._toggle_panel_button.bind('<Enter>', self.hover) self._toggle_panel_button.bind('<Leave>', self.unhover) self._toggle_panel_button_padding = tk.Label(self._options_panel, height=1, font=('Helvetica', 8), bg='white') self._toggle_panel_button_padding.grid(row=1, column=0) self._settings_button = tk.Label(self._options_panel, image=self._settings_button_image) self._settings_button.bind('<Button-1>', self._settings.showSettings) self._options_labels.append(self._settings_button) self._settings_button.grid(row=0, column=1, rowspan=2, sticky='w') self._settings_button.grid_remove() def mainloop(self): self._root.mainloop() def destroyWindow(self): self._window.destroy() def freezeWindow(self): self._window.wm_attributes("-disabled", True) def unfreezeWindow(self): self._window.wm_attributes("-disabled", False) def hover(self, event): print("Main button moused over") #TODO Will change main button image def unhover(self, event): print("Mouse moved off of main button") #TODO Will change main button image def toggleOptionsPanel(self, event): """Displays the options panel if it is currently hidden, hides it if it is currently visible""" if self._options_panel_removed: self._options_panel_removed = False for i in range(len(self._options_labels)): self._options_labels[i].grid(row=0,column=i+1) else: self._options_panel_removed = True for l in self._options_labels: l.grid_remove() def showInspector(self, label): """Displays the label as inspector. Hides the previous inspector if one was already shown""" self.hideInspector() print("Showing: " + str(label)) self.inspector = label self.inspector.grid(row=0, column=1) def hideInspector(self): """Hides the currently displayed inspector if one is displayed""" try: self.inspector.grid_remove() self.inspector = None except AttributeError as e:#If self.inspector does not exist or has already been destroyed it will be None print(f"Unable to destroy inspector: {repr(e)}") if self.inspector is not None: raise e def setGatherableLabels(self, *args:(str, tk.Label)): """Empties the gatherableLabels dictionary, adds the provided labels to it, then displays those labels""" self.gatherable_labels = {k:v for k,v in args} print(self.gatherable_labels) self.redrawGatherableLabels() def removeAllGatherableLabels(self): """Destroys all labels present in the gatherableLabels dictionary then empties the gatherableLabels dictionary""" for l in self.gatherable_labels.values(): l.destroy() self.gatherable_labels = {} def redrawGatherableLabels(self): """Displays all labels in gatherableLabels""" i = 2 for l in self.gatherable_labels.values(): l.grid(row=i, column=0, columnspan=10, sticky='nw') i+=1 async def removeGatherableLabel(self, key): """"Removes the gatherable label named 'key' from the dictionary and destroys it""" self.gatherable_labels[key].destroy() self.gatherable_labels.pop(key) async def addGatherableLabel(self, name, item_info): """Creates an InspectableLabel with an associated panel providing additional information (inspectPanel), adds it to the list of displayed gatherable labels, then redraws the list of gatherable labels""" INSPECT_PANEL_BG_COLOUR = '#6F7066'#Background colour for the Inspect Panel inspect_panel = tk.Frame(self._window, bg=INSPECT_PANEL_BG_COLOUR) grid_number = 0 #Iterator for column Number if item_info['itemValues']: location_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Location: {item_info['itemValues']['map']} ({item_info['itemValues']['x']}, {item_info['itemValues']['y']})") location_label.grid(row=grid_number, sticky='w') grid_number+=1 if item_info['spawnTime']: spawn_time_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Spawn Time: {item_info['spawnTime']}:00")#Will need changing if something ever spawns not on the hour spawn_time_label.grid(row=grid_number, sticky='w') grid_number+=1 if item_info['despawnTime']: despawn_time_label = tk.Label(inspect_panel, fg=fg_colour, bg=INSPECT_PANEL_BG_COLOUR, text=f"Despawn Time: {item_info['despawnTime']}:00") despawn_time_label.grid(row=grid_number, sticky='w') grid_number+=1 #TODO priceOnEachServerAndLastUpdateTime #TODO buttonToOpenInGamerescape label = InspectableLabel(self, self._root, inspect_panel, text=f"{name} | {item_info['price']}gil", font=('Helvetica', presets['size'][self._size]['font-size']), bg='#565356', fg=fg_colour) self.gatherable_labels[name] = label self.redrawGatherableLabels() class Main(): """Sets up the App object and provides it with the information it needs to display to the user""" def start(self): """Starts the application""" self.gatherable_labels = {} self.config_values = getConfig() self.app = App(size=self.config_values['general']['size'], main=self) self.notifications_provider_thread = threading.Thread(target = self.setupNotificationsProvider) self.notifications_provider_thread.start() self.app.mainloop() def restart(self): """Checks if the settings have been changed. If they have, the new settings are applied""" new_config_values = getConfig() if new_config_values == self.config_values: return #Don't want to waste time restarting the program if none of the settings changed #Don't necessarily need to restart the program if the datacenter changed but the size didn't, but this seems easier to code and it's fast enough self.app.destroyWindow() self.config_values['general']['size'] = new_config_values['general']['size'] self.app.setupWindow(size=self.config_values['general']['size'], main=self) if new_config_values['general']['datacenter'] != self.config_values['general']['datacenter']: self.gatherable_labels = {} self.notifications_provider.stopGatherAlerts() while not self.notifications_provider_thread.isAlive(): time.sleep(0.25) #TODO there must be a better way of doing this self.config_values['general']['datacenter'] = new_config_values['general']['datacenter'] self.notifications_provider_thread = threading.Thread(target = self.setupNotificationsProvider) self.notifications_provider_thread.start() asyncio.run(self.redrawLabels()) self.app.mainloop() def setupNotificationsProvider(self): """ Creates and runs a NotificationsProvider object. \n (Should always be run in a seperate thread because it will not stop on its own) """ self.notifications_provider = NotificationsProvider(gatherable_items_location, self.config_values['general']['datacenter'], self.nodeSpawn, self.nodeDespawn) self.notifications_provider.beginGatherAlerts() async def nodeSpawn(self, name=None, price=None, item_values=None, spawn_time=None, despawn_time=None, market_data=None): """Records the information about this node spawn and then tells the app to display a label for it""" self.gatherable_labels[name] = {'price':price,'itemValues':item_values,'spawnTime':spawn_time,'despawnTime':despawn_time,'marketData':market_data} await self.app.addGatherableLabel(name, self.gatherable_labels[name]) async def nodeDespawn(self, name=None): """Removes the information about this node spawn and tells the app to remove the label for it""" self.gatherable_labels.pop(name) await self.app.removeGatherableLabel(name) async def redrawLabels(self): """Tells the app to create labels for all node spawns that this object has information for""" for key in self.gatherable_labels.keys(): await self.app.addGatherableLabel(key, self.gatherable_labels[key]) if __name__ == "__main__": main = Main() main.start()
import hashlib import logging import os import os.path as osp import sys cur_dir = osp.dirname(osp.abspath(__file__)) PROJ_ROOT = osp.normpath(osp.join(cur_dir, "../../..")) sys.path.insert(0, PROJ_ROOT) import time from collections import OrderedDict import mmcv import numpy as np from tqdm import tqdm from transforms3d.quaternions import mat2quat, quat2mat import ref from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.structures import BoxMode from lib.pysixd import inout, misc from lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask from lib.utils.utils import dprint, iprint, lazy_property logger = logging.getLogger(__name__) DATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, "datasets")) class YCBV_BOP_TEST_Dataset: """ycbv bop test.""" def __init__(self, data_cfg): """ Set with_depth and with_masks default to True, and decide whether to load them into dataloader/network later with_masks: """ self.name = data_cfg["name"] self.data_cfg = data_cfg self.objs = data_cfg["objs"] # selected objects # all classes are self.objs, but this enables us to evaluate on selected objs self.select_objs = data_cfg.get("select_objs", self.objs) self.ann_file = data_cfg["ann_file"] # json file with scene_id and im_id items self.dataset_root = data_cfg["dataset_root"] # BOP_DATASETS/ycbv/test self.models_root = data_cfg["models_root"] # BOP_DATASETS/ycbv/models self.scale_to_meter = data_cfg["scale_to_meter"] # 0.001 self.with_masks = data_cfg["with_masks"] # True (load masks but may not use it) self.with_depth = data_cfg["with_depth"] # True (load depth path here, but may not use it) self.height = data_cfg["height"] # 480 self.width = data_cfg["width"] # 640 self.cache_dir = data_cfg.get("cache_dir", osp.join(PROJ_ROOT, ".cache")) # .cache self.use_cache = data_cfg.get("use_cache", True) self.num_to_load = data_cfg["num_to_load"] # -1 self.filter_invalid = data_cfg["filter_invalid"] ################################################## # NOTE: careful! Only the selected objects self.cat_ids = [cat_id for cat_id, obj_name in ref.ycbv.id2obj.items() if obj_name in self.objs] # map selected objs to [0, num_objs-1] self.cat2label = {v: i for i, v in enumerate(self.cat_ids)} # id_map self.label2cat = {label: cat for cat, label in self.cat2label.items()} self.obj2label = OrderedDict((obj, obj_id) for obj_id, obj in enumerate(self.objs)) ########################################################## def __call__(self): """Load light-weight instance annotations of all images into a list of dicts in Detectron2 format. Do not load heavy data into memory in this file, since we will load the annotations of all images into memory. """ # cache the dataset_dicts to avoid loading masks from files hashed_file_name = hashlib.md5( ( "".join([str(fn) for fn in self.objs]) + "dataset_dicts_{}_{}_{}_{}_{}".format( self.name, self.dataset_root, self.with_masks, self.with_depth, __name__, ) ).encode("utf-8") ).hexdigest() cache_path = osp.join( self.cache_dir, "dataset_dicts_{}_{}.pkl".format(self.name, hashed_file_name), ) if osp.exists(cache_path) and self.use_cache: logger.info("load cached dataset dicts from {}".format(cache_path)) return mmcv.load(cache_path) t_start = time.perf_counter() logger.info("loading dataset dicts: {}".format(self.name)) self.num_instances_without_valid_segmentation = 0 self.num_instances_without_valid_box = 0 dataset_dicts = [] # ###################################################### im_id_global = 0 if True: targets = mmcv.load(self.ann_file) scene_im_ids = [(item["scene_id"], item["im_id"]) for item in targets] scene_im_ids = sorted(list(set(scene_im_ids))) # load infos for each scene gt_dicts = {} gt_info_dicts = {} cam_dicts = {} for scene_id, im_id in scene_im_ids: scene_root = osp.join(self.dataset_root, f"{scene_id:06d}") if scene_id not in gt_dicts: gt_dicts[scene_id] = mmcv.load(osp.join(scene_root, "scene_gt.json")) if scene_id not in gt_info_dicts: gt_info_dicts[scene_id] = mmcv.load( osp.join(scene_root, "scene_gt_info.json") ) # bbox_obj, bbox_visib if scene_id not in cam_dicts: cam_dicts[scene_id] = mmcv.load(osp.join(scene_root, "scene_camera.json")) for scene_id, im_id in tqdm(scene_im_ids): str_im_id = str(im_id) scene_root = osp.join(self.dataset_root, f"{scene_id:06d}") rgb_path = osp.join(scene_root, "rgb/{:06d}.png").format(im_id) assert osp.exists(rgb_path), rgb_path depth_path = osp.join(scene_root, "depth/{:06d}.png".format(im_id)) scene_id = int(rgb_path.split("/")[-3]) cam = np.array(cam_dicts[scene_id][str_im_id]["cam_K"], dtype=np.float32).reshape(3, 3) depth_factor = 1000.0 / cam_dicts[scene_id][str_im_id]["depth_scale"] record = { "dataset_name": self.name, "file_name": osp.relpath(rgb_path, PROJ_ROOT), "depth_file": osp.relpath(depth_path, PROJ_ROOT), "depth_factor": depth_factor, "height": self.height, "width": self.width, "image_id": im_id_global, # unique image_id in the dataset, for coco evaluation "scene_im_id": "{}/{}".format(scene_id, im_id), # for evaluation "cam": cam, "img_type": "real", } im_id_global += 1 insts = [] for anno_i, anno in enumerate(gt_dicts[scene_id][str_im_id]): obj_id = anno["obj_id"] if ref.ycbv.id2obj[obj_id] not in self.select_objs: continue cur_label = self.cat2label[obj_id] # 0-based label R = np.array(anno["cam_R_m2c"], dtype="float32").reshape(3, 3) t = np.array(anno["cam_t_m2c"], dtype="float32") / 1000.0 pose = np.hstack([R, t.reshape(3, 1)]) quat = mat2quat(R).astype("float32") proj = (record["cam"] @ t.T).T proj = proj[:2] / proj[2] bbox_visib = gt_info_dicts[scene_id][str_im_id][anno_i]["bbox_visib"] bbox_obj = gt_info_dicts[scene_id][str_im_id][anno_i]["bbox_obj"] x1, y1, w, h = bbox_visib if self.filter_invalid: if h <= 1 or w <= 1: self.num_instances_without_valid_box += 1 continue mask_file = osp.join( scene_root, "mask/{:06d}_{:06d}.png".format(im_id, anno_i), ) mask_visib_file = osp.join( scene_root, "mask_visib/{:06d}_{:06d}.png".format(im_id, anno_i), ) assert osp.exists(mask_file), mask_file assert osp.exists(mask_visib_file), mask_visib_file # load mask visib mask_single = mmcv.imread(mask_visib_file, "unchanged") area = mask_single.sum() if area < 3: # filter out too small or nearly invisible instances self.num_instances_without_valid_segmentation += 1 continue mask_rle = binary_mask_to_rle(mask_single, compressed=True) # load mask full mask_full = mmcv.imread(mask_file, "unchanged") mask_full = mask_full.astype("bool") mask_full_rle = binary_mask_to_rle(mask_full, compressed=True) inst = { "category_id": cur_label, # 0-based label "bbox": bbox_visib, # TODO: load both bbox_obj and bbox_visib "bbox_mode": BoxMode.XYWH_ABS, "pose": pose, "quat": quat, "trans": t, "centroid_2d": proj, # absolute (cx, cy) "segmentation": mask_rle, "mask_full": mask_full_rle, # TODO: load as mask_full, rle } model_info = self.models_info[str(obj_id)] inst["model_info"] = model_info # TODO: using full mask and full xyz for key in ["bbox3d_and_center"]: inst[key] = self.models[cur_label][key] insts.append(inst) if len(insts) == 0: # filter im without anno continue record["annotations"] = insts dataset_dicts.append(record) if self.num_instances_without_valid_segmentation > 0: logger.warning( "Filtered out {} instances without valid segmentation. " "There might be issues in your dataset generation process.".format( self.num_instances_without_valid_segmentation ) ) if self.num_instances_without_valid_box > 0: logger.warning( "Filtered out {} instances without valid box. " "There might be issues in your dataset generation process.".format(self.num_instances_without_valid_box) ) ########################################################################## if self.num_to_load > 0: self.num_to_load = min(int(self.num_to_load), len(dataset_dicts)) dataset_dicts = dataset_dicts[: self.num_to_load] logger.info("loaded {} dataset dicts, using {}s".format(len(dataset_dicts), time.perf_counter() - t_start)) mmcv.mkdir_or_exist(osp.dirname(cache_path)) mmcv.dump(dataset_dicts, cache_path, protocol=4) logger.info("Dumped dataset_dicts to {}".format(cache_path)) return dataset_dicts @lazy_property def models_info(self): models_info_path = osp.join(self.models_root, "models_info.json") assert osp.exists(models_info_path), models_info_path models_info = mmcv.load(models_info_path) # key is str(obj_id) return models_info @lazy_property def models(self): """Load models into a list.""" cache_path = osp.join(self.models_root, f"models_{self.name}.pkl") if osp.exists(cache_path) and self.use_cache: # dprint("{}: load cached object models from {}".format(self.name, cache_path)) return mmcv.load(cache_path) models = [] for obj_name in self.objs: model = inout.load_ply( osp.join( self.models_root, f"obj_{ref.ycbv.obj2id[obj_name]:06d}.ply", ), vertex_scale=self.scale_to_meter, ) # NOTE: the bbox3d_and_center is not obtained from centered vertices # for BOP models, not a big problem since they had been centered model["bbox3d_and_center"] = misc.get_bbox3d_and_center(model["pts"]) models.append(model) logger.info("cache models to {}".format(cache_path)) mmcv.dump(models, cache_path, protocol=4) return models def image_aspect_ratio(self): return self.width / self.height # 4/3 ########### register datasets ############################################################ def get_ycbv_metadata(obj_names, ref_key): """task specific metadata.""" data_ref = ref.__dict__[ref_key] cur_sym_infos = {} # label based key loaded_models_info = data_ref.get_models_info() for i, obj_name in enumerate(obj_names): obj_id = data_ref.obj2id[obj_name] model_info = loaded_models_info[str(obj_id)] if "symmetries_discrete" in model_info or "symmetries_continuous" in model_info: sym_transforms = misc.get_symmetry_transformations(model_info, max_sym_disc_step=0.01) sym_info = np.array([sym["R"] for sym in sym_transforms], dtype=np.float32) else: sym_info = None cur_sym_infos[i] = sym_info meta = {"thing_classes": obj_names, "sym_infos": cur_sym_infos} return meta ################################################################################ SPLITS_YCBV = dict( ycbv_bop_test=dict( name="ycbv_bop_test", dataset_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test"), models_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/models"), objs=ref.ycbv.objects, # selected objects ann_file=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test_targets_bop19.json"), scale_to_meter=0.001, with_masks=True, # (load masks but may not use it) with_depth=True, # (load depth path here, but may not use it) height=480, width=640, cache_dir=osp.join(PROJ_ROOT, ".cache"), use_cache=True, num_to_load=-1, filter_invalid=False, ref_key="ycbv", ) ) # single objs (num_class is from all objs) for obj in ref.ycbv.objects: name = "ycbv_bop_{}_test".format(obj) select_objs = [obj] if name not in SPLITS_YCBV: SPLITS_YCBV[name] = dict( name=name, dataset_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test"), models_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/models"), objs=ref.ycbv.objects, select_objs=select_objs, # selected objects ann_file=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test_targets_bop19.json"), scale_to_meter=0.001, with_masks=True, # (load masks but may not use it) with_depth=True, # (load depth path here, but may not use it) height=480, width=640, cache_dir=osp.join(PROJ_ROOT, ".cache"), use_cache=True, num_to_load=-1, filter_invalid=False, ref_key="ycbv", ) def register_with_name_cfg(name, data_cfg=None): """Assume pre-defined datasets live in `./datasets`. Args: name: datasnet_name, data_cfg: if name is in existing SPLITS, use pre-defined data_cfg otherwise requires data_cfg data_cfg can be set in cfg.DATA_CFG.name """ dprint("register dataset: {}".format(name)) if name in SPLITS_YCBV: used_cfg = SPLITS_YCBV[name] else: assert data_cfg is not None, f"dataset name {name} is not registered" used_cfg = data_cfg DatasetCatalog.register(name, YCBV_BOP_TEST_Dataset(used_cfg)) # something like eval_types MetadataCatalog.get(name).set( id="ycbv", # NOTE: for pvnet to determine module ref_key=used_cfg["ref_key"], objs=used_cfg["objs"], eval_error_types=["ad", "rete", "proj"], evaluator_type="bop", **get_ycbv_metadata(obj_names=used_cfg["objs"], ref_key=used_cfg["ref_key"]), ) def get_available_datasets(): return list(SPLITS_YCBV.keys()) #### tests ############################################### def test_vis(): dset_name = sys.argv[1] assert dset_name in DatasetCatalog.list() meta = MetadataCatalog.get(dset_name) dprint("MetadataCatalog: ", meta) objs = meta.objs t_start = time.perf_counter() dicts = DatasetCatalog.get(dset_name) logger.info("Done loading {} samples with {:.3f}s.".format(len(dicts), time.perf_counter() - t_start)) dirname = "output/{}-data-vis".format(dset_name) os.makedirs(dirname, exist_ok=True) for d in dicts: img = read_image_mmcv(d["file_name"], format="BGR") depth = mmcv.imread(d["depth_file"], "unchanged") / 1000.0 imH, imW = img.shape[:2] annos = d["annotations"] masks = [cocosegm2mask(anno["segmentation"], imH, imW) for anno in annos] bboxes = [anno["bbox"] for anno in annos] bbox_modes = [anno["bbox_mode"] for anno in annos] bboxes_xyxy = np.array( [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)] ) kpts_3d_list = [anno["bbox3d_and_center"] for anno in annos] quats = [anno["quat"] for anno in annos] transes = [anno["trans"] for anno in annos] Rs = [quat2mat(quat) for quat in quats] # 0-based label cat_ids = [anno["category_id"] for anno in annos] K = d["cam"] kpts_2d = [misc.project_pts(kpt3d, K, R, t) for kpt3d, R, t in zip(kpts_3d_list, Rs, transes)] # # TODO: visualize pose and keypoints labels = [objs[cat_id] for cat_id in cat_ids] # img_vis = vis_image_bboxes_cv2(img, bboxes=bboxes_xyxy, labels=labels) img_vis = vis_image_mask_bbox_cv2(img, masks, bboxes=bboxes_xyxy, labels=labels) img_vis_kpts2d = img.copy() for anno_i in range(len(annos)): img_vis_kpts2d = misc.draw_projected_box3d(img_vis_kpts2d, kpts_2d[anno_i]) grid_show( [ img[:, :, [2, 1, 0]], img_vis[:, :, [2, 1, 0]], img_vis_kpts2d[:, :, [2, 1, 0]], depth, ], [f"img:{d["file_name"]}", "vis_img", "img_vis_kpts2d", "depth"], row=2, col=2, ) if __name__ == "__main__": """Test the dataset loader. Usage: python -m core.datasets.ycbv_bop_test dataset_name """ from lib.vis_utils.image import grid_show from lib.utils.setup_logger import setup_my_logger import detectron2.data.datasets # noqa # add pre-defined metadata from core.utils.data_utils import read_image_mmcv from lib.vis_utils.image import vis_image_mask_bbox_cv2 print("sys.argv:", sys.argv) logger = setup_my_logger(name="core") register_with_name_cfg(sys.argv[1]) print("dataset catalog: ", DatasetCatalog.list()) test_vis()
import hashlib import logging import os import os.path as osp import sys cur_dir = osp.dirname(osp.abspath(__file__)) PROJ_ROOT = osp.normpath(osp.join(cur_dir, "../../..")) sys.path.insert(0, PROJ_ROOT) import time from collections import OrderedDict import mmcv import numpy as np from tqdm import tqdm from transforms3d.quaternions import mat2quat, quat2mat import ref from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.structures import BoxMode from lib.pysixd import inout, misc from lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask from lib.utils.utils import dprint, iprint, lazy_property logger = logging.getLogger(__name__) DATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, "datasets")) class YCBV_BOP_TEST_Dataset: """ycbv bop test.""" def __init__(self, data_cfg): """ Set with_depth and with_masks default to True, and decide whether to load them into dataloader/network later with_masks: """ self.name = data_cfg["name"] self.data_cfg = data_cfg self.objs = data_cfg["objs"] # selected objects # all classes are self.objs, but this enables us to evaluate on selected objs self.select_objs = data_cfg.get("select_objs", self.objs) self.ann_file = data_cfg["ann_file"] # json file with scene_id and im_id items self.dataset_root = data_cfg["dataset_root"] # BOP_DATASETS/ycbv/test self.models_root = data_cfg["models_root"] # BOP_DATASETS/ycbv/models self.scale_to_meter = data_cfg["scale_to_meter"] # 0.001 self.with_masks = data_cfg["with_masks"] # True (load masks but may not use it) self.with_depth = data_cfg["with_depth"] # True (load depth path here, but may not use it) self.height = data_cfg["height"] # 480 self.width = data_cfg["width"] # 640 self.cache_dir = data_cfg.get("cache_dir", osp.join(PROJ_ROOT, ".cache")) # .cache self.use_cache = data_cfg.get("use_cache", True) self.num_to_load = data_cfg["num_to_load"] # -1 self.filter_invalid = data_cfg["filter_invalid"] ################################################## # NOTE: careful! Only the selected objects self.cat_ids = [cat_id for cat_id, obj_name in ref.ycbv.id2obj.items() if obj_name in self.objs] # map selected objs to [0, num_objs-1] self.cat2label = {v: i for i, v in enumerate(self.cat_ids)} # id_map self.label2cat = {label: cat for cat, label in self.cat2label.items()} self.obj2label = OrderedDict((obj, obj_id) for obj_id, obj in enumerate(self.objs)) ########################################################## def __call__(self): """Load light-weight instance annotations of all images into a list of dicts in Detectron2 format. Do not load heavy data into memory in this file, since we will load the annotations of all images into memory. """ # cache the dataset_dicts to avoid loading masks from files hashed_file_name = hashlib.md5( ( "".join([str(fn) for fn in self.objs]) + "dataset_dicts_{}_{}_{}_{}_{}".format( self.name, self.dataset_root, self.with_masks, self.with_depth, __name__, ) ).encode("utf-8") ).hexdigest() cache_path = osp.join( self.cache_dir, "dataset_dicts_{}_{}.pkl".format(self.name, hashed_file_name), ) if osp.exists(cache_path) and self.use_cache: logger.info("load cached dataset dicts from {}".format(cache_path)) return mmcv.load(cache_path) t_start = time.perf_counter() logger.info("loading dataset dicts: {}".format(self.name)) self.num_instances_without_valid_segmentation = 0 self.num_instances_without_valid_box = 0 dataset_dicts = [] # ###################################################### im_id_global = 0 if True: targets = mmcv.load(self.ann_file) scene_im_ids = [(item["scene_id"], item["im_id"]) for item in targets] scene_im_ids = sorted(list(set(scene_im_ids))) # load infos for each scene gt_dicts = {} gt_info_dicts = {} cam_dicts = {} for scene_id, im_id in scene_im_ids: scene_root = osp.join(self.dataset_root, f"{scene_id:06d}") if scene_id not in gt_dicts: gt_dicts[scene_id] = mmcv.load(osp.join(scene_root, "scene_gt.json")) if scene_id not in gt_info_dicts: gt_info_dicts[scene_id] = mmcv.load( osp.join(scene_root, "scene_gt_info.json") ) # bbox_obj, bbox_visib if scene_id not in cam_dicts: cam_dicts[scene_id] = mmcv.load(osp.join(scene_root, "scene_camera.json")) for scene_id, im_id in tqdm(scene_im_ids): str_im_id = str(im_id) scene_root = osp.join(self.dataset_root, f"{scene_id:06d}") rgb_path = osp.join(scene_root, "rgb/{:06d}.png").format(im_id) assert osp.exists(rgb_path), rgb_path depth_path = osp.join(scene_root, "depth/{:06d}.png".format(im_id)) scene_id = int(rgb_path.split("/")[-3]) cam = np.array(cam_dicts[scene_id][str_im_id]["cam_K"], dtype=np.float32).reshape(3, 3) depth_factor = 1000.0 / cam_dicts[scene_id][str_im_id]["depth_scale"] record = { "dataset_name": self.name, "file_name": osp.relpath(rgb_path, PROJ_ROOT), "depth_file": osp.relpath(depth_path, PROJ_ROOT), "depth_factor": depth_factor, "height": self.height, "width": self.width, "image_id": im_id_global, # unique image_id in the dataset, for coco evaluation "scene_im_id": "{}/{}".format(scene_id, im_id), # for evaluation "cam": cam, "img_type": "real", } im_id_global += 1 insts = [] for anno_i, anno in enumerate(gt_dicts[scene_id][str_im_id]): obj_id = anno["obj_id"] if ref.ycbv.id2obj[obj_id] not in self.select_objs: continue cur_label = self.cat2label[obj_id] # 0-based label R = np.array(anno["cam_R_m2c"], dtype="float32").reshape(3, 3) t = np.array(anno["cam_t_m2c"], dtype="float32") / 1000.0 pose = np.hstack([R, t.reshape(3, 1)]) quat = mat2quat(R).astype("float32") proj = (record["cam"] @ t.T).T proj = proj[:2] / proj[2] bbox_visib = gt_info_dicts[scene_id][str_im_id][anno_i]["bbox_visib"] bbox_obj = gt_info_dicts[scene_id][str_im_id][anno_i]["bbox_obj"] x1, y1, w, h = bbox_visib if self.filter_invalid: if h <= 1 or w <= 1: self.num_instances_without_valid_box += 1 continue mask_file = osp.join( scene_root, "mask/{:06d}_{:06d}.png".format(im_id, anno_i), ) mask_visib_file = osp.join( scene_root, "mask_visib/{:06d}_{:06d}.png".format(im_id, anno_i), ) assert osp.exists(mask_file), mask_file assert osp.exists(mask_visib_file), mask_visib_file # load mask visib mask_single = mmcv.imread(mask_visib_file, "unchanged") area = mask_single.sum() if area < 3: # filter out too small or nearly invisible instances self.num_instances_without_valid_segmentation += 1 continue mask_rle = binary_mask_to_rle(mask_single, compressed=True) # load mask full mask_full = mmcv.imread(mask_file, "unchanged") mask_full = mask_full.astype("bool") mask_full_rle = binary_mask_to_rle(mask_full, compressed=True) inst = { "category_id": cur_label, # 0-based label "bbox": bbox_visib, # TODO: load both bbox_obj and bbox_visib "bbox_mode": BoxMode.XYWH_ABS, "pose": pose, "quat": quat, "trans": t, "centroid_2d": proj, # absolute (cx, cy) "segmentation": mask_rle, "mask_full": mask_full_rle, # TODO: load as mask_full, rle } model_info = self.models_info[str(obj_id)] inst["model_info"] = model_info # TODO: using full mask and full xyz for key in ["bbox3d_and_center"]: inst[key] = self.models[cur_label][key] insts.append(inst) if len(insts) == 0: # filter im without anno continue record["annotations"] = insts dataset_dicts.append(record) if self.num_instances_without_valid_segmentation > 0: logger.warning( "Filtered out {} instances without valid segmentation. " "There might be issues in your dataset generation process.".format( self.num_instances_without_valid_segmentation ) ) if self.num_instances_without_valid_box > 0: logger.warning( "Filtered out {} instances without valid box. " "There might be issues in your dataset generation process.".format(self.num_instances_without_valid_box) ) ########################################################################## if self.num_to_load > 0: self.num_to_load = min(int(self.num_to_load), len(dataset_dicts)) dataset_dicts = dataset_dicts[: self.num_to_load] logger.info("loaded {} dataset dicts, using {}s".format(len(dataset_dicts), time.perf_counter() - t_start)) mmcv.mkdir_or_exist(osp.dirname(cache_path)) mmcv.dump(dataset_dicts, cache_path, protocol=4) logger.info("Dumped dataset_dicts to {}".format(cache_path)) return dataset_dicts @lazy_property def models_info(self): models_info_path = osp.join(self.models_root, "models_info.json") assert osp.exists(models_info_path), models_info_path models_info = mmcv.load(models_info_path) # key is str(obj_id) return models_info @lazy_property def models(self): """Load models into a list.""" cache_path = osp.join(self.models_root, f"models_{self.name}.pkl") if osp.exists(cache_path) and self.use_cache: # dprint("{}: load cached object models from {}".format(self.name, cache_path)) return mmcv.load(cache_path) models = [] for obj_name in self.objs: model = inout.load_ply( osp.join( self.models_root, f"obj_{ref.ycbv.obj2id[obj_name]:06d}.ply", ), vertex_scale=self.scale_to_meter, ) # NOTE: the bbox3d_and_center is not obtained from centered vertices # for BOP models, not a big problem since they had been centered model["bbox3d_and_center"] = misc.get_bbox3d_and_center(model["pts"]) models.append(model) logger.info("cache models to {}".format(cache_path)) mmcv.dump(models, cache_path, protocol=4) return models def image_aspect_ratio(self): return self.width / self.height # 4/3 ########### register datasets ############################################################ def get_ycbv_metadata(obj_names, ref_key): """task specific metadata.""" data_ref = ref.__dict__[ref_key] cur_sym_infos = {} # label based key loaded_models_info = data_ref.get_models_info() for i, obj_name in enumerate(obj_names): obj_id = data_ref.obj2id[obj_name] model_info = loaded_models_info[str(obj_id)] if "symmetries_discrete" in model_info or "symmetries_continuous" in model_info: sym_transforms = misc.get_symmetry_transformations(model_info, max_sym_disc_step=0.01) sym_info = np.array([sym["R"] for sym in sym_transforms], dtype=np.float32) else: sym_info = None cur_sym_infos[i] = sym_info meta = {"thing_classes": obj_names, "sym_infos": cur_sym_infos} return meta ################################################################################ SPLITS_YCBV = dict( ycbv_bop_test=dict( name="ycbv_bop_test", dataset_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test"), models_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/models"), objs=ref.ycbv.objects, # selected objects ann_file=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test_targets_bop19.json"), scale_to_meter=0.001, with_masks=True, # (load masks but may not use it) with_depth=True, # (load depth path here, but may not use it) height=480, width=640, cache_dir=osp.join(PROJ_ROOT, ".cache"), use_cache=True, num_to_load=-1, filter_invalid=False, ref_key="ycbv", ) ) # single objs (num_class is from all objs) for obj in ref.ycbv.objects: name = "ycbv_bop_{}_test".format(obj) select_objs = [obj] if name not in SPLITS_YCBV: SPLITS_YCBV[name] = dict( name=name, dataset_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test"), models_root=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/models"), objs=ref.ycbv.objects, select_objs=select_objs, # selected objects ann_file=osp.join(DATASETS_ROOT, "BOP_DATASETS/ycbv/test_targets_bop19.json"), scale_to_meter=0.001, with_masks=True, # (load masks but may not use it) with_depth=True, # (load depth path here, but may not use it) height=480, width=640, cache_dir=osp.join(PROJ_ROOT, ".cache"), use_cache=True, num_to_load=-1, filter_invalid=False, ref_key="ycbv", ) def register_with_name_cfg(name, data_cfg=None): """Assume pre-defined datasets live in `./datasets`. Args: name: datasnet_name, data_cfg: if name is in existing SPLITS, use pre-defined data_cfg otherwise requires data_cfg data_cfg can be set in cfg.DATA_CFG.name """ dprint("register dataset: {}".format(name)) if name in SPLITS_YCBV: used_cfg = SPLITS_YCBV[name] else: assert data_cfg is not None, f"dataset name {name} is not registered" used_cfg = data_cfg DatasetCatalog.register(name, YCBV_BOP_TEST_Dataset(used_cfg)) # something like eval_types MetadataCatalog.get(name).set( id="ycbv", # NOTE: for pvnet to determine module ref_key=used_cfg["ref_key"], objs=used_cfg["objs"], eval_error_types=["ad", "rete", "proj"], evaluator_type="bop", **get_ycbv_metadata(obj_names=used_cfg["objs"], ref_key=used_cfg["ref_key"]), ) def get_available_datasets(): return list(SPLITS_YCBV.keys()) #### tests ############################################### def test_vis(): dset_name = sys.argv[1] assert dset_name in DatasetCatalog.list() meta = MetadataCatalog.get(dset_name) dprint("MetadataCatalog: ", meta) objs = meta.objs t_start = time.perf_counter() dicts = DatasetCatalog.get(dset_name) logger.info("Done loading {} samples with {:.3f}s.".format(len(dicts), time.perf_counter() - t_start)) dirname = "output/{}-data-vis".format(dset_name) os.makedirs(dirname, exist_ok=True) for d in dicts: img = read_image_mmcv(d["file_name"], format="BGR") depth = mmcv.imread(d["depth_file"], "unchanged") / 1000.0 imH, imW = img.shape[:2] annos = d["annotations"] masks = [cocosegm2mask(anno["segmentation"], imH, imW) for anno in annos] bboxes = [anno["bbox"] for anno in annos] bbox_modes = [anno["bbox_mode"] for anno in annos] bboxes_xyxy = np.array( [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)] ) kpts_3d_list = [anno["bbox3d_and_center"] for anno in annos] quats = [anno["quat"] for anno in annos] transes = [anno["trans"] for anno in annos] Rs = [quat2mat(quat) for quat in quats] # 0-based label cat_ids = [anno["category_id"] for anno in annos] K = d["cam"] kpts_2d = [misc.project_pts(kpt3d, K, R, t) for kpt3d, R, t in zip(kpts_3d_list, Rs, transes)] # # TODO: visualize pose and keypoints labels = [objs[cat_id] for cat_id in cat_ids] # img_vis = vis_image_bboxes_cv2(img, bboxes=bboxes_xyxy, labels=labels) img_vis = vis_image_mask_bbox_cv2(img, masks, bboxes=bboxes_xyxy, labels=labels) img_vis_kpts2d = img.copy() for anno_i in range(len(annos)): img_vis_kpts2d = misc.draw_projected_box3d(img_vis_kpts2d, kpts_2d[anno_i]) grid_show( [ img[:, :, [2, 1, 0]], img_vis[:, :, [2, 1, 0]], img_vis_kpts2d[:, :, [2, 1, 0]], depth, ], [f"img:{d['file_name']}", "vis_img", "img_vis_kpts2d", "depth"], row=2, col=2, ) if __name__ == "__main__": """Test the dataset loader. Usage: python -m core.datasets.ycbv_bop_test dataset_name """ from lib.vis_utils.image import grid_show from lib.utils.setup_logger import setup_my_logger import detectron2.data.datasets # noqa # add pre-defined metadata from core.utils.data_utils import read_image_mmcv from lib.vis_utils.image import vis_image_mask_bbox_cv2 print("sys.argv:", sys.argv) logger = setup_my_logger(name="core") register_with_name_cfg(sys.argv[1]) print("dataset catalog: ", DatasetCatalog.list()) test_vis()
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license """ OpenSfM to kapture import functions. """ import logging import os import os.path as path import numpy as np import quaternion import gzip import pickle import json from tqdm import tqdm from typing import Any, Dict, Optional, Tuple # kapture import kapture from kapture.io.csv import kapture_to_dir import kapture.io.features from kapture.io.records import TransferAction, import_record_data_from_dir_auto from kapture.io.structure import delete_existing_kapture_files logger = logging.getLogger('opensfm') """ opensfm_project/ ├── config.yaml ├── images/ ├── masks/ ├── gcp_list.txt ├── exif/ ├── camera_models.json ├── features/ ├── matches/ ├── tracks.csv ├── reconstruction.json ├── reconstruction.meshed.json └── undistorted/ ├── images/ ├── masks/ ├── tracks.csv ├── reconstruction.json └── depthmaps/ └── merged.ply """ """ reconstruction.json: [RECONSTRUCTION, ...] RECONSTRUCTION: { "cameras": { CAMERA_ID: CAMERA, ... }, "shots": { SHOT_ID: SHOT, ... }, "points": { POINT_ID: POINT, ... } } CAMERA: { "projection_type": "perspective", # Can be perspective, brown, fisheye or equirectangular "width": NUMBER, # Image width in pixels "height": NUMBER, # Image height in pixels # Depending on the projection type more parameters are stored. # These are the parameters of the perspective camera. "focal": NUMBER, # Estimated focal length "k1": NUMBER, # Estimated distortion coefficient "k2": NUMBER, # Estimated distortion coefficient } SHOT: { "camera": CAMERA_ID, "rotation": [X, Y, Z], # Estimated rotation as an angle-axis vector "translation": [X, Y, Z], # Estimated translation "gps_position": [X, Y, Z], # GPS coordinates in the reconstruction reference frame "gps_dop": METERS, # GPS accuracy in meters "orientation": NUMBER, # EXIF orientation tag (can be 1, 3, 6 or 8) "capture_time": SECONDS # Capture time as a UNIX timestamp } POINT: { "coordinates": [X, Y, Z], # Estimated position of the point "color": [R, G, B], # Color of the point } """ def import_camera( opensfm_camera: Dict[str, Any], name: Optional[str] = None ) -> kapture.Camera: """ Converts OpenSfM camera to kapture. :param opensfm_camera: openSfM camera definition in a dictionary :param name: camera name :return: kapture camera definition """ # opensfm_camera['projection_type'] can be perspective, brown, fisheye or equirectangular if 'perspective' == opensfm_camera['projection_type']: # convert to CameraType.RADIAL [w, h, f, cx, cy, k1, k2] # missing principal point, just fake it at image center largest_side_in_pixel = float(max(opensfm_camera['width'], opensfm_camera['height'])) camera_params = [ # w, h: opensfm_camera['width'], opensfm_camera['height'], # f: The focal length provided by the EXIF metadata divided by the sensor width opensfm_camera['focal'] * largest_side_in_pixel, # cx, cy: no principal point, guess one at image center opensfm_camera['width'] / 2, opensfm_camera['height'] / 2, # k1, k2 opensfm_camera.get('k1', 0.0), opensfm_camera.get('k2', 0.0), ] return kapture.Camera(kapture.CameraType.RADIAL, camera_params, name) else: raise ValueError(f'unable to convert camera of type {opensfm_camera['projection_type']}') def _import_gnss(opensfm_root_dir, kapture_sensors, image_sensors, image_timestamps, disable_tqdm) \ -> Optional[kapture.RecordsGnss]: """ Imports the GNSS info from the images exif. """ # gps from pre-extracted exif, in exif/image_name.jpg.exif kapture_gnss = None opensfm_exif_dir_path = path.join(opensfm_root_dir, 'exif') opensfm_exif_suffix = '.exif' if path.isdir(opensfm_exif_dir_path): logger.info('importing GNSS from exif ...') camera_ids = set(image_sensors.values()) # add a gps sensor for each camera map_cam_to_gnss_sensor = {cam_id: 'GPS_' + cam_id for cam_id in camera_ids} for gnss_id in map_cam_to_gnss_sensor.values(): kapture_sensors[gnss_id] = kapture.Sensor(kapture.SensorType.gnss.name, ['EPSG:4326']) # build epsg_code for all cameras kapture_gnss = kapture.RecordsGnss() opensfm_exif_filepath_list = (path.join(dir_path, filename) for dir_path, _, filename_list in os.walk(opensfm_exif_dir_path) for filename in filename_list if filename.endswith(opensfm_exif_suffix)) for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list, disable=disable_tqdm): image_filename = path.relpath(opensfm_exif_filepath, opensfm_exif_dir_path)[:-len(opensfm_exif_suffix)] image_timestamp = image_timestamps[image_filename] image_sensor_id = image_sensors[image_filename] gnss_timestamp = image_timestamp gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id] with open(opensfm_exif_filepath, 'rt') as f: js_root = json.load(f) if 'gps' not in js_root: logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"') continue gps_coords = { 'x': js_root['gps']['longitude'], 'y': js_root['gps']['latitude'], 'z': js_root['gps'].get('altitude', 0.0), 'dop': js_root['gps'].get('dop', 0), 'utc': 0, } logger.debug(f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"') kapture_gnss[gnss_timestamp, gnss_sensor_id] = kapture.RecordGnss(**gps_coords) return kapture_gnss def _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm, keypoints_type: str = 'HessianAffine', descriptors_type: str = 'HOG')\ -> Tuple[Optional[kapture.Descriptors], Optional[kapture.Keypoints], Optional[kapture.Matches]]: # import features (keypoints + descriptors) kapture_keypoints = None # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64) kapture_descriptors = None # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8) opensfm_features_dir_path = path.join(opensfm_root_dir, 'features') opensfm_features_suffix = '.features.npz' if path.isdir(opensfm_features_dir_path): logger.info('importing keypoints and descriptors ...') opensfm_features_file_list = (path.join(dp, fn) for dp, _, fs in os.walk(opensfm_features_dir_path) for fn in fs) opensfm_features_file_list = (filepath for filepath in opensfm_features_file_list if filepath.endswith(opensfm_features_suffix)) for opensfm_feature_filename in tqdm(opensfm_features_file_list, disable=disable_tqdm): image_filename = path.relpath(opensfm_feature_filename, opensfm_features_dir_path)[ :-len(opensfm_features_suffix)] opensfm_image_features = np.load(opensfm_feature_filename) opensfm_image_keypoints = opensfm_image_features['points'] opensfm_image_descriptors = opensfm_image_features['descriptors'] logger.debug(f'parsing keypoints and descriptors in {opensfm_feature_filename}') if kapture_keypoints is None: # print(type(opensfm_image_keypoints.dtype)) # HAHOG = Hessian Affine feature point detector + HOG descriptor kapture_keypoints = kapture.Keypoints( type_name=keypoints_type, dsize=opensfm_image_keypoints.shape[1], dtype=opensfm_image_keypoints.dtype) if kapture_descriptors is None: kapture_descriptors = kapture.Descriptors( type_name=descriptors_type, dsize=opensfm_image_descriptors.shape[1], dtype=opensfm_image_descriptors.dtype, keypoints_type=keypoints_type, metric_type='L2') # convert keypoints file keypoint_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Keypoints, feature_type=keypoints_type, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_keypoints_to_file(keypoint_file_path, opensfm_image_keypoints) # register the file kapture_keypoints.add(image_filename) # convert descriptors file descriptor_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Descriptors, feature_type=descriptors_type, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_descriptors_to_file( filepath=descriptor_file_path, image_descriptors=opensfm_image_descriptors) # register the file kapture_descriptors.add(image_filename) # import matches kapture_matches = kapture.Matches() opensfm_matches_suffix = '_matches.pkl.gz' opensfm_matches_dir_path = path.join(opensfm_root_dir, 'matches') if path.isdir(opensfm_matches_dir_path): logger.info('importing matches ...') opensfm_matches_file_list = (path.join(dp, fn) for dp, _, fs in os.walk(opensfm_matches_dir_path) for fn in fs) opensfm_matches_file_list = (filepath for filepath in opensfm_matches_file_list if filepath.endswith(opensfm_matches_suffix)) for opensfm_matches_filename in tqdm(opensfm_matches_file_list, disable=disable_tqdm): image_filename_1 = path.relpath(opensfm_matches_filename, opensfm_matches_dir_path)[ :-len(opensfm_matches_suffix)] logger.debug(f'parsing matches in {image_filename_1}') with gzip.open(opensfm_matches_filename, 'rb') as f: opensfm_matches = pickle.load(f) for image_filename_2, opensfm_image_matches in opensfm_matches.items(): image_pair = (image_filename_1, image_filename_2) # register the pair to kapture kapture_matches.add(*image_pair) # convert the bin file to kapture kapture_matches_filepath = kapture.io.features.get_matches_fullpath( image_filename_pair=image_pair, keypoints_type=keypoints_type, kapture_dirpath=kapture_root_dir) kapture_image_matches = np.hstack([ opensfm_image_matches.astype(np.float64), # no matches scoring = assume all to one np.ones(shape=(opensfm_image_matches.shape[0], 1), dtype=np.float64)]) kapture.io.features.image_matches_to_file(kapture_matches_filepath, kapture_image_matches) return kapture_descriptors, kapture_keypoints, kapture_matches def import_opensfm(opensfm_root_dir: str, kapture_root_dir: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.copy, keypoints_type: str = 'HessianAffine', descriptors_type: str = 'HOG'): """ Convert an openSfM structure to a kapture on disk. Also copy, move or link the images files if necessary. :param opensfm_root_dir: the openSfM top directory :param kapture_root_dir: top directory of kapture created :param force_overwrite_existing: if true, will remove existing kapture data without prompting the user :param images_import_method: action to apply on images: link, copy, move or do nothing. :param keypoints_type: keypoints type :param descriptors_type: descriptors type :return: the constructed kapture object """ disable_tqdm = logger.getEffectiveLevel() != logging.INFO # load reconstruction opensfm_reconstruction_filepath = path.join(opensfm_root_dir, 'reconstruction.json') with open(opensfm_reconstruction_filepath, 'rt') as f: opensfm_reconstruction = json.load(f) # remove the single list @ root opensfm_reconstruction = opensfm_reconstruction[0] # prepare space for output os.makedirs(kapture_root_dir, exist_ok=True) delete_existing_kapture_files(kapture_root_dir, force_erase=force_overwrite_existing) # import cameras kapture_sensors = kapture.Sensors() assert 'cameras' in opensfm_reconstruction # import cameras for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(): camera = import_camera(osfm_camera, name=osfm_camera_id) kapture_sensors[osfm_camera_id] = camera # import shots logger.info('importing images and trajectories ...') kapture_images = kapture.RecordsCamera() kapture_trajectories = kapture.Trajectories() opensfm_image_dir_path = path.join(opensfm_root_dir, 'images') assert 'shots' in opensfm_reconstruction image_timestamps, image_sensors = {}, {} # used later to retrieve the timestamp of an image. for timestamp, (image_filename, shot) in enumerate(opensfm_reconstruction['shots'].items()): sensor_id = shot['camera'] image_timestamps[image_filename] = timestamp image_sensors[image_filename] = sensor_id # in OpenSfm, (sensor, timestamp) is not unique. rotation_vector = shot['rotation'] q = quaternion.from_rotation_vector(rotation_vector) translation = shot['translation'] # capture_time = shot['capture_time'] # may be invalid # gps_position = shot['gps_position'] kapture_images[timestamp, sensor_id] = image_filename kapture_trajectories[timestamp, sensor_id] = kapture.PoseTransform(r=q, t=translation) # copy image files filename_list = [f for _, _, f in kapture.flatten(kapture_images)] import_record_data_from_dir_auto( source_record_dirpath=opensfm_image_dir_path, destination_kapture_dirpath=kapture_root_dir, filename_list=filename_list, copy_strategy=images_import_method) # Imports Gnss kapture_gnss = _import_gnss(opensfm_root_dir, kapture_sensors, image_sensors, image_timestamps, disable_tqdm) # Imports descriptors, keypoints and matches kapture_descriptors, kapture_keypoints, kapture_matches = _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm, keypoints_type, descriptors_type) # import 3-D points if 'points' in opensfm_reconstruction: logger.info('importing points 3-D') opensfm_points = opensfm_reconstruction['points'] points_data = [] for point_id in sorted(opensfm_points): point_data = opensfm_points[point_id] point_data = point_data['coordinates'] + point_data['color'] points_data.append(point_data) kapture_points = kapture.Points3d(points_data) else: kapture_points = None # saving kapture csv files logger.info('saving kapture files') kapture_data = kapture.Kapture( sensors=kapture_sensors, records_camera=kapture_images, records_gnss=kapture_gnss, trajectories=kapture_trajectories, keypoints={keypoints_type: kapture_keypoints} if kapture_keypoints is not None else None, descriptors={descriptors_type: kapture_descriptors} if kapture_descriptors is not None else None, matches={keypoints_type: kapture_matches} if kapture_matches is not None else None, points3d=kapture_points ) kapture_to_dir(kapture_root_dir, kapture_data)
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license """ OpenSfM to kapture import functions. """ import logging import os import os.path as path import numpy as np import quaternion import gzip import pickle import json from tqdm import tqdm from typing import Any, Dict, Optional, Tuple # kapture import kapture from kapture.io.csv import kapture_to_dir import kapture.io.features from kapture.io.records import TransferAction, import_record_data_from_dir_auto from kapture.io.structure import delete_existing_kapture_files logger = logging.getLogger('opensfm') """ opensfm_project/ ├── config.yaml ├── images/ ├── masks/ ├── gcp_list.txt ├── exif/ ├── camera_models.json ├── features/ ├── matches/ ├── tracks.csv ├── reconstruction.json ├── reconstruction.meshed.json └── undistorted/ ├── images/ ├── masks/ ├── tracks.csv ├── reconstruction.json └── depthmaps/ └── merged.ply """ """ reconstruction.json: [RECONSTRUCTION, ...] RECONSTRUCTION: { "cameras": { CAMERA_ID: CAMERA, ... }, "shots": { SHOT_ID: SHOT, ... }, "points": { POINT_ID: POINT, ... } } CAMERA: { "projection_type": "perspective", # Can be perspective, brown, fisheye or equirectangular "width": NUMBER, # Image width in pixels "height": NUMBER, # Image height in pixels # Depending on the projection type more parameters are stored. # These are the parameters of the perspective camera. "focal": NUMBER, # Estimated focal length "k1": NUMBER, # Estimated distortion coefficient "k2": NUMBER, # Estimated distortion coefficient } SHOT: { "camera": CAMERA_ID, "rotation": [X, Y, Z], # Estimated rotation as an angle-axis vector "translation": [X, Y, Z], # Estimated translation "gps_position": [X, Y, Z], # GPS coordinates in the reconstruction reference frame "gps_dop": METERS, # GPS accuracy in meters "orientation": NUMBER, # EXIF orientation tag (can be 1, 3, 6 or 8) "capture_time": SECONDS # Capture time as a UNIX timestamp } POINT: { "coordinates": [X, Y, Z], # Estimated position of the point "color": [R, G, B], # Color of the point } """ def import_camera( opensfm_camera: Dict[str, Any], name: Optional[str] = None ) -> kapture.Camera: """ Converts OpenSfM camera to kapture. :param opensfm_camera: openSfM camera definition in a dictionary :param name: camera name :return: kapture camera definition """ # opensfm_camera['projection_type'] can be perspective, brown, fisheye or equirectangular if 'perspective' == opensfm_camera['projection_type']: # convert to CameraType.RADIAL [w, h, f, cx, cy, k1, k2] # missing principal point, just fake it at image center largest_side_in_pixel = float(max(opensfm_camera['width'], opensfm_camera['height'])) camera_params = [ # w, h: opensfm_camera['width'], opensfm_camera['height'], # f: The focal length provided by the EXIF metadata divided by the sensor width opensfm_camera['focal'] * largest_side_in_pixel, # cx, cy: no principal point, guess one at image center opensfm_camera['width'] / 2, opensfm_camera['height'] / 2, # k1, k2 opensfm_camera.get('k1', 0.0), opensfm_camera.get('k2', 0.0), ] return kapture.Camera(kapture.CameraType.RADIAL, camera_params, name) else: raise ValueError(f'unable to convert camera of type {opensfm_camera["projection_type"]}') def _import_gnss(opensfm_root_dir, kapture_sensors, image_sensors, image_timestamps, disable_tqdm) \ -> Optional[kapture.RecordsGnss]: """ Imports the GNSS info from the images exif. """ # gps from pre-extracted exif, in exif/image_name.jpg.exif kapture_gnss = None opensfm_exif_dir_path = path.join(opensfm_root_dir, 'exif') opensfm_exif_suffix = '.exif' if path.isdir(opensfm_exif_dir_path): logger.info('importing GNSS from exif ...') camera_ids = set(image_sensors.values()) # add a gps sensor for each camera map_cam_to_gnss_sensor = {cam_id: 'GPS_' + cam_id for cam_id in camera_ids} for gnss_id in map_cam_to_gnss_sensor.values(): kapture_sensors[gnss_id] = kapture.Sensor(kapture.SensorType.gnss.name, ['EPSG:4326']) # build epsg_code for all cameras kapture_gnss = kapture.RecordsGnss() opensfm_exif_filepath_list = (path.join(dir_path, filename) for dir_path, _, filename_list in os.walk(opensfm_exif_dir_path) for filename in filename_list if filename.endswith(opensfm_exif_suffix)) for opensfm_exif_filepath in tqdm(opensfm_exif_filepath_list, disable=disable_tqdm): image_filename = path.relpath(opensfm_exif_filepath, opensfm_exif_dir_path)[:-len(opensfm_exif_suffix)] image_timestamp = image_timestamps[image_filename] image_sensor_id = image_sensors[image_filename] gnss_timestamp = image_timestamp gnss_sensor_id = map_cam_to_gnss_sensor[image_sensor_id] with open(opensfm_exif_filepath, 'rt') as f: js_root = json.load(f) if 'gps' not in js_root: logger.warning(f'NO GPS data in "{opensfm_exif_filepath}"') continue gps_coords = { 'x': js_root['gps']['longitude'], 'y': js_root['gps']['latitude'], 'z': js_root['gps'].get('altitude', 0.0), 'dop': js_root['gps'].get('dop', 0), 'utc': 0, } logger.debug(f'found GPS data for ({gnss_timestamp}, {gnss_sensor_id}) in "{opensfm_exif_filepath}"') kapture_gnss[gnss_timestamp, gnss_sensor_id] = kapture.RecordGnss(**gps_coords) return kapture_gnss def _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm, keypoints_type: str = 'HessianAffine', descriptors_type: str = 'HOG')\ -> Tuple[Optional[kapture.Descriptors], Optional[kapture.Keypoints], Optional[kapture.Matches]]: # import features (keypoints + descriptors) kapture_keypoints = None # kapture.Keypoints(type_name='opensfm', dsize=4, dtype=np.float64) kapture_descriptors = None # kapture.Descriptors(type_name='opensfm', dsize=128, dtype=np.uint8) opensfm_features_dir_path = path.join(opensfm_root_dir, 'features') opensfm_features_suffix = '.features.npz' if path.isdir(opensfm_features_dir_path): logger.info('importing keypoints and descriptors ...') opensfm_features_file_list = (path.join(dp, fn) for dp, _, fs in os.walk(opensfm_features_dir_path) for fn in fs) opensfm_features_file_list = (filepath for filepath in opensfm_features_file_list if filepath.endswith(opensfm_features_suffix)) for opensfm_feature_filename in tqdm(opensfm_features_file_list, disable=disable_tqdm): image_filename = path.relpath(opensfm_feature_filename, opensfm_features_dir_path)[ :-len(opensfm_features_suffix)] opensfm_image_features = np.load(opensfm_feature_filename) opensfm_image_keypoints = opensfm_image_features['points'] opensfm_image_descriptors = opensfm_image_features['descriptors'] logger.debug(f'parsing keypoints and descriptors in {opensfm_feature_filename}') if kapture_keypoints is None: # print(type(opensfm_image_keypoints.dtype)) # HAHOG = Hessian Affine feature point detector + HOG descriptor kapture_keypoints = kapture.Keypoints( type_name=keypoints_type, dsize=opensfm_image_keypoints.shape[1], dtype=opensfm_image_keypoints.dtype) if kapture_descriptors is None: kapture_descriptors = kapture.Descriptors( type_name=descriptors_type, dsize=opensfm_image_descriptors.shape[1], dtype=opensfm_image_descriptors.dtype, keypoints_type=keypoints_type, metric_type='L2') # convert keypoints file keypoint_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Keypoints, feature_type=keypoints_type, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_keypoints_to_file(keypoint_file_path, opensfm_image_keypoints) # register the file kapture_keypoints.add(image_filename) # convert descriptors file descriptor_file_path = kapture.io.features.get_features_fullpath( data_type=kapture.Descriptors, feature_type=descriptors_type, kapture_dirpath=kapture_root_dir, image_filename=image_filename) kapture.io.features.image_descriptors_to_file( filepath=descriptor_file_path, image_descriptors=opensfm_image_descriptors) # register the file kapture_descriptors.add(image_filename) # import matches kapture_matches = kapture.Matches() opensfm_matches_suffix = '_matches.pkl.gz' opensfm_matches_dir_path = path.join(opensfm_root_dir, 'matches') if path.isdir(opensfm_matches_dir_path): logger.info('importing matches ...') opensfm_matches_file_list = (path.join(dp, fn) for dp, _, fs in os.walk(opensfm_matches_dir_path) for fn in fs) opensfm_matches_file_list = (filepath for filepath in opensfm_matches_file_list if filepath.endswith(opensfm_matches_suffix)) for opensfm_matches_filename in tqdm(opensfm_matches_file_list, disable=disable_tqdm): image_filename_1 = path.relpath(opensfm_matches_filename, opensfm_matches_dir_path)[ :-len(opensfm_matches_suffix)] logger.debug(f'parsing matches in {image_filename_1}') with gzip.open(opensfm_matches_filename, 'rb') as f: opensfm_matches = pickle.load(f) for image_filename_2, opensfm_image_matches in opensfm_matches.items(): image_pair = (image_filename_1, image_filename_2) # register the pair to kapture kapture_matches.add(*image_pair) # convert the bin file to kapture kapture_matches_filepath = kapture.io.features.get_matches_fullpath( image_filename_pair=image_pair, keypoints_type=keypoints_type, kapture_dirpath=kapture_root_dir) kapture_image_matches = np.hstack([ opensfm_image_matches.astype(np.float64), # no matches scoring = assume all to one np.ones(shape=(opensfm_image_matches.shape[0], 1), dtype=np.float64)]) kapture.io.features.image_matches_to_file(kapture_matches_filepath, kapture_image_matches) return kapture_descriptors, kapture_keypoints, kapture_matches def import_opensfm(opensfm_root_dir: str, kapture_root_dir: str, force_overwrite_existing: bool = False, images_import_method: TransferAction = TransferAction.copy, keypoints_type: str = 'HessianAffine', descriptors_type: str = 'HOG'): """ Convert an openSfM structure to a kapture on disk. Also copy, move or link the images files if necessary. :param opensfm_root_dir: the openSfM top directory :param kapture_root_dir: top directory of kapture created :param force_overwrite_existing: if true, will remove existing kapture data without prompting the user :param images_import_method: action to apply on images: link, copy, move or do nothing. :param keypoints_type: keypoints type :param descriptors_type: descriptors type :return: the constructed kapture object """ disable_tqdm = logger.getEffectiveLevel() != logging.INFO # load reconstruction opensfm_reconstruction_filepath = path.join(opensfm_root_dir, 'reconstruction.json') with open(opensfm_reconstruction_filepath, 'rt') as f: opensfm_reconstruction = json.load(f) # remove the single list @ root opensfm_reconstruction = opensfm_reconstruction[0] # prepare space for output os.makedirs(kapture_root_dir, exist_ok=True) delete_existing_kapture_files(kapture_root_dir, force_erase=force_overwrite_existing) # import cameras kapture_sensors = kapture.Sensors() assert 'cameras' in opensfm_reconstruction # import cameras for osfm_camera_id, osfm_camera in opensfm_reconstruction['cameras'].items(): camera = import_camera(osfm_camera, name=osfm_camera_id) kapture_sensors[osfm_camera_id] = camera # import shots logger.info('importing images and trajectories ...') kapture_images = kapture.RecordsCamera() kapture_trajectories = kapture.Trajectories() opensfm_image_dir_path = path.join(opensfm_root_dir, 'images') assert 'shots' in opensfm_reconstruction image_timestamps, image_sensors = {}, {} # used later to retrieve the timestamp of an image. for timestamp, (image_filename, shot) in enumerate(opensfm_reconstruction['shots'].items()): sensor_id = shot['camera'] image_timestamps[image_filename] = timestamp image_sensors[image_filename] = sensor_id # in OpenSfm, (sensor, timestamp) is not unique. rotation_vector = shot['rotation'] q = quaternion.from_rotation_vector(rotation_vector) translation = shot['translation'] # capture_time = shot['capture_time'] # may be invalid # gps_position = shot['gps_position'] kapture_images[timestamp, sensor_id] = image_filename kapture_trajectories[timestamp, sensor_id] = kapture.PoseTransform(r=q, t=translation) # copy image files filename_list = [f for _, _, f in kapture.flatten(kapture_images)] import_record_data_from_dir_auto( source_record_dirpath=opensfm_image_dir_path, destination_kapture_dirpath=kapture_root_dir, filename_list=filename_list, copy_strategy=images_import_method) # Imports Gnss kapture_gnss = _import_gnss(opensfm_root_dir, kapture_sensors, image_sensors, image_timestamps, disable_tqdm) # Imports descriptors, keypoints and matches kapture_descriptors, kapture_keypoints, kapture_matches = _import_features_and_matches(opensfm_root_dir, kapture_root_dir, disable_tqdm, keypoints_type, descriptors_type) # import 3-D points if 'points' in opensfm_reconstruction: logger.info('importing points 3-D') opensfm_points = opensfm_reconstruction['points'] points_data = [] for point_id in sorted(opensfm_points): point_data = opensfm_points[point_id] point_data = point_data['coordinates'] + point_data['color'] points_data.append(point_data) kapture_points = kapture.Points3d(points_data) else: kapture_points = None # saving kapture csv files logger.info('saving kapture files') kapture_data = kapture.Kapture( sensors=kapture_sensors, records_camera=kapture_images, records_gnss=kapture_gnss, trajectories=kapture_trajectories, keypoints={keypoints_type: kapture_keypoints} if kapture_keypoints is not None else None, descriptors={descriptors_type: kapture_descriptors} if kapture_descriptors is not None else None, matches={keypoints_type: kapture_matches} if kapture_matches is not None else None, points3d=kapture_points ) kapture_to_dir(kapture_root_dir, kapture_data)
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the protocol generator.""" import itertools import os import shutil # pylint: skip-file from datetime import date from pathlib import Path from typing import Optional, Tuple # pylint: skip-file from aea.__version__ import __version__ as __aea_version__ from aea.configurations.base import ProtocolSpecificationParseError from aea.configurations.constants import ( PROTOCOL_LANGUAGE_PYTHON, SUPPORTED_PROTOCOL_LANGUAGES, ) from aea.configurations.data_types import PublicId from aea.protocols.generator.common import ( CUSTOM_TYPES_DOT_PY_FILE_NAME, DIALOGUE_DOT_PY_FILE_NAME, INIT_FILE_NAME, MESSAGE_DOT_PY_FILE_NAME, MESSAGE_IMPORT, PATH_TO_PACKAGES, PROTOCOL_YAML_FILE_NAME, PYTHON_TYPE_TO_PROTO_TYPE, SERIALIZATION_DOT_PY_FILE_NAME, SERIALIZER_IMPORT, _camel_case_to_snake_case, _create_protocol_file, _get_sub_types_of_compositional_types, _includes_custom_type, _python_pt_or_ct_type_to_proto_type, _to_camel_case, _union_sub_type_to_protobuf_variable_name, apply_protolint, check_prerequisites, compile_protobuf_using_protoc, get_protoc_version, load_protocol_specification, try_run_black_formatting, try_run_isort_formatting, ) from aea.protocols.generator.extract_specification import extract from aea.protocols.generator.validate import validate PYLINT_DISABLE_SERIALIZATION_PY = [ "too-many-statements", "too-many-locals", "no-member", "too-few-public-methods", "redefined-builtin", ] PYLINT_DISABLE_MESSAGE_PY = [ "too-many-statements", "too-many-locals", "no-member", "too-few-public-methods", "too-many-branches", "not-an-iterable", "unidiomatic-typecheck", "unsubscriptable-object", ] def _type_check(variable_name: str, variable_type: str) -> str: """ Return the type check Python instruction. If variable_type == int: type(variable_name) == int else: isinstance(variable_name, variable_type) :param variable_name: the variable name. :param variable_type: the variable type. :return: the Python instruction to check the type, in string form. """ if variable_type != "int": return f"isinstance({variable_name}, {variable_type})" else: return f"type({variable_name}) is {variable_type}" def _copyright_header_str(author: str) -> str: """ Produce the copyright header text for a protocol. :param author: the author of the protocol. :return: The copyright header text. """ copy_right_str = ( "# -*- coding: utf-8 -*-\n" "# ------------------------------------------------------------------------------\n" "#\n" ) copy_right_str += "# Copyright {} {}\n".format(date.today().year, author) copy_right_str += ( "#\n" '# Licensed under the Apache License, Version 2.0 (the "License");\n' "# you may not use this file except in compliance with the License.\n" "# You may obtain a copy of the License at\n" "#\n" "# http://www.apache.org/licenses/LICENSE-2.0\n" "#\n" "# Unless required by applicable law or agreed to in writing, software\n" '# distributed under the License is distributed on an "AS IS" BASIS,\n' "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" "# See the License for the specific language governing permissions and\n" "# limitations under the License.\n" "#\n" "# ------------------------------------------------------------------------------\n" ) return copy_right_str class ProtocolGenerator: """This class generates a protocol_verification package from a ProtocolTemplate object.""" def __init__( self, path_to_protocol_specification: str, output_path: str = ".", dotted_path_to_protocol_package: Optional[str] = None, ) -> None: """ Instantiate a protocol generator. :param path_to_protocol_specification: path to protocol specification file :param output_path: the path to the location in which the protocol module is to be generated. :param dotted_path_to_protocol_package: the path to the protocol package :raises FileNotFoundError if any prerequisite application is not installed :raises yaml.YAMLError if yaml parser encounters an error condition :raises ProtocolSpecificationParseError if specification fails generator's validation """ # Check the prerequisite applications are installed try: check_prerequisites() except FileNotFoundError: raise self.protoc_version = get_protoc_version() # Load protocol specification yaml file self.protocol_specification = load_protocol_specification( path_to_protocol_specification ) # Validate the specification result_bool, result_msg = validate(self.protocol_specification) if not result_bool: raise ProtocolSpecificationParseError(result_msg) # Extract specification fields self.spec = extract(self.protocol_specification) # Helper fields self.path_to_protocol_specification = path_to_protocol_specification self.protocol_specification_in_camel_case = _to_camel_case( self.protocol_specification.name ) self.path_to_generated_protocol_package = os.path.join( output_path, self.protocol_specification.name ) self.dotted_path_to_protocol_package = ( dotted_path_to_protocol_package + self.protocol_specification.name if dotted_path_to_protocol_package is not None else "{}.{}.protocols.{}".format( PATH_TO_PACKAGES, self.protocol_specification.author, self.protocol_specification.name, ) ) self.indent = "" def _change_indent(self, number: int, mode: str = None) -> None: """ Update the value of 'indent' global variable. This function controls the indentation of the code produced throughout the generator. There are two modes: - Setting the indent to a desired 'number' level. In this case, 'mode' has to be set to "s". - Updating the incrementing/decrementing the indentation level by 'number' amounts. In this case 'mode' is None. :param number: the number of indentation levels to set/increment/decrement :param mode: the mode of indentation change """ if mode and mode == "s": if number >= 0: self.indent = number * " " else: raise ValueError("Error: setting indent to be a negative number.") else: if number >= 0: for _ in itertools.repeat(None, number): self.indent += " " else: if abs(number) <= len(self.indent) / 4: self.indent = self.indent[abs(number) * 4 :] else: raise ValueError( "Not enough spaces in the 'indent' variable to remove." ) def _import_from_typing_module(self) -> str: """ Manage import statement for the typing package. :return: import statement for the typing package """ ordered_packages = [ "Dict", "FrozenSet", "Optional", "Set", "Tuple", "Union", "cast", ] import_str = "from typing import Any, " for package in ordered_packages: if self.spec.typing_imports[package]: import_str += "{}, ".format(package) import_str = import_str[:-2] return import_str def _import_from_custom_types_module(self) -> str: """ Manage import statement from custom_types module. :return: import statement for the custom_types module """ import_str = "" if len(self.spec.all_custom_types) == 0: pass else: for custom_class in self.spec.all_custom_types: import_str += "from {}.custom_types import {} as Custom{}\n".format( self.dotted_path_to_protocol_package, custom_class, custom_class, ) import_str = import_str[:-1] return import_str def _performatives_str(self) -> str: """ Generate the performatives instance property string, a set containing all valid performatives of this protocol. :return: the performatives set string """ performatives_str = "{" for performative in self.spec.all_performatives: performatives_str += '"{}", '.format(performative) performatives_str = performatives_str[:-2] performatives_str += "}" return performatives_str def _performatives_enum_str(self) -> str: """ Generate the performatives Enum class. :return: the performatives Enum string """ enum_str = self.indent + "class Performative(Message.Performative):\n" self._change_indent(1) enum_str += self.indent + '"""Performatives for the {} protocol."""\n\n'.format( self.protocol_specification.name ) for performative in self.spec.all_performatives: enum_str += self.indent + '{} = "{}"\n'.format( performative.upper(), performative ) enum_str += "\n" enum_str += self.indent + "def __str__(self) -> str:\n" self._change_indent(1) enum_str += self.indent + '"""Get the string representation."""\n' enum_str += self.indent + "return str(self.value)\n" self._change_indent(-1) enum_str += "\n" self._change_indent(-1) return enum_str def _to_custom_custom(self, content_type: str) -> str: """ Evaluate whether a content type is a custom type or has a custom type as a sub-type. :param content_type: the content type. :return: Boolean result """ new_content_type = content_type if _includes_custom_type(content_type): for custom_type in self.spec.all_custom_types: new_content_type = new_content_type.replace( custom_type, self.spec.custom_custom_types[custom_type] ) return new_content_type def _check_content_type_str(self, content_name: str, content_type: str) -> str: """ Produce the checks of elements of compositional types. :param content_name: the name of the content to be checked :param content_type: the type of the content to be checked :return: the string containing the checks. """ check_str = "" if content_type.startswith("Optional["): optional = True check_str += self.indent + 'if self.is_set("{}"):\n'.format(content_name) self._change_indent(1) check_str += self.indent + "expected_nb_of_contents += 1\n" content_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "{} = cast({}, self.{})\n".format( content_name, self._to_custom_custom(content_type), content_name ) content_variable = content_name else: optional = False content_variable = "self." + content_name if content_type.startswith("Union["): element_types = _get_sub_types_of_compositional_types(content_type) unique_standard_types_set = set() for typing_content_type in element_types: if typing_content_type.startswith("FrozenSet"): unique_standard_types_set.add("frozenset") elif typing_content_type.startswith("Tuple"): unique_standard_types_set.add("tuple") elif typing_content_type.startswith("Dict"): unique_standard_types_set.add("dict") else: unique_standard_types_set.add(typing_content_type) unique_standard_types_list = sorted(unique_standard_types_set) check_str += self.indent check_str += "enforce(" for unique_type in unique_standard_types_list: check_str += "{} or ".format( _type_check(content_variable, self._to_custom_custom(unique_type)) ) check_str = check_str[:-4] check_str += ", \"Invalid type for content '{}'. Expected either of '{}'. Found '{{}}'.\".format(type({})))\n".format( content_name, [ unique_standard_type for unique_standard_type in unique_standard_types_list ], content_variable, ) if "frozenset" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, frozenset):\n".format( content_variable ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) frozen_set_element_types_set = set() for element_type in element_types: if element_type.startswith("FrozenSet"): frozen_set_element_types_set.add( _get_sub_types_of_compositional_types(element_type)[0] ) frozen_set_element_types = sorted(frozen_set_element_types_set) for frozen_set_element_type in frozen_set_element_types: check_str += self.indent + "all({} for element in {}) or\n".format( _type_check( "element", self._to_custom_custom(frozen_set_element_type) ), content_variable, ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(frozen_set_element_types) == 1: check_str += ( self.indent + ", \"Invalid type for elements of content '{}'. Expected ".format( content_name ) ) for frozen_set_element_type in frozen_set_element_types: check_str += "'{}'".format( self._to_custom_custom(frozen_set_element_type) ) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for frozenset elements in content '{}'. Expected either ".format( content_name ) ) for frozen_set_element_type in frozen_set_element_types: check_str += "'{}' or ".format( self._to_custom_custom(frozen_set_element_type) ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-1) if "tuple" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, tuple):\n".format( content_variable ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) tuple_element_types_set = set() for element_type in element_types: if element_type.startswith("Tuple"): tuple_element_types_set.add( _get_sub_types_of_compositional_types(element_type)[0] ) tuple_element_types = sorted(tuple_element_types_set) for tuple_element_type in tuple_element_types: check_str += self.indent + "all({} for element in {}) or \n".format( _type_check( "element", self._to_custom_custom(tuple_element_type) ), content_variable, ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(tuple_element_types) == 1: check_str += ( self.indent + ", \"Invalid type for tuple elements in content '{}'. Expected ".format( content_name ) ) for tuple_element_type in tuple_element_types: check_str += "'{}'".format( self._to_custom_custom(tuple_element_type) ) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for tuple elements in content '{}'. Expected either ".format( content_name ) ) for tuple_element_type in tuple_element_types: check_str += "'{}' or ".format( self._to_custom_custom(tuple_element_type) ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-1) if "dict" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, dict):\n".format( content_variable ) self._change_indent(1) check_str += ( self.indent + "for key_of_{}, value_of_{} in {}.items():\n".format( content_name, content_name, content_variable ) ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) dict_key_value_types = dict() for element_type in element_types: if element_type.startswith("Dict"): dict_key_value_types[ _get_sub_types_of_compositional_types(element_type)[0] ] = _get_sub_types_of_compositional_types(element_type)[1] for element1_type in sorted(dict_key_value_types.keys()): check_str += self.indent + "({} and {}) or\n".format( _type_check( "key_of_" + content_name, self._to_custom_custom(element1_type), ), _type_check( "value_of_" + content_name, self._to_custom_custom(dict_key_value_types[element1_type]), ), ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(dict_key_value_types) == 1: check_str += ( self.indent + ", \"Invalid type for dictionary key, value in content '{}'. Expected ".format( content_name ) ) for key in sorted(dict_key_value_types.keys()): check_str += "'{}', '{}'".format(key, dict_key_value_types[key]) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for dictionary key, value in content '{}'. Expected ".format( content_name ) ) for key in sorted(dict_key_value_types.keys()): check_str += "'{}','{}' or ".format( key, dict_key_value_types[key] ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-2) elif content_type.startswith("FrozenSet["): # check the type check_str += ( self.indent + "enforce(isinstance({}, frozenset), \"Invalid type for content '{}'. Expected 'frozenset'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "enforce(all(\n" self._change_indent(1) check_str += self.indent + "{} for element in {}\n".format( _type_check("element", self._to_custom_custom(element_type)), content_variable, ) self._change_indent(-1) check_str += ( self.indent + "), \"Invalid type for frozenset elements in content '{}'. Expected '{}'.\")\n".format( content_name, element_type ) ) elif content_type.startswith("Tuple["): # check the type check_str += ( self.indent + "enforce(isinstance({}, tuple), \"Invalid type for content '{}'. Expected 'tuple'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "enforce(all(\n" self._change_indent(1) check_str += self.indent + "{} for element in {}\n".format( _type_check("element", self._to_custom_custom(element_type)), content_variable, ) self._change_indent(-1) check_str += ( self.indent + "), \"Invalid type for tuple elements in content '{}'. Expected '{}'.\")\n".format( content_name, element_type ) ) elif content_type.startswith("Dict["): # check the type check_str += ( self.indent + "enforce(isinstance({}, dict), \"Invalid type for content '{}'. Expected 'dict'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type_1 = _get_sub_types_of_compositional_types(content_type)[0] element_type_2 = _get_sub_types_of_compositional_types(content_type)[1] # check the keys type then check the values type check_str += ( self.indent + "for key_of_{}, value_of_{} in {}.items():\n".format( content_name, content_name, content_variable ) ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) check_str += self.indent + "{}\n".format( _type_check( "key_of_" + content_name, self._to_custom_custom(element_type_1) ) ) self._change_indent(-1) check_str += ( self.indent + ", \"Invalid type for dictionary keys in content '{}'. Expected '{}'. Found '{{}}'.\".format(type(key_of_{})))\n".format( content_name, element_type_1, content_name ) ) check_str += self.indent + "enforce(\n" self._change_indent(1) check_str += self.indent + "{}\n".format( _type_check( "value_of_" + content_name, self._to_custom_custom(element_type_2) ) ) self._change_indent(-1) check_str += ( self.indent + ", \"Invalid type for dictionary values in content '{}'. Expected '{}'. Found '{{}}'.\".format(type(value_of_{})))\n".format( content_name, element_type_2, content_name ) ) self._change_indent(-1) else: check_str += ( self.indent + "enforce({}, \"Invalid type for content '{}'. Expected '{}'. Found '{{}}'.\".format(type({})))\n".format( _type_check(content_variable, self._to_custom_custom(content_type)), content_name, content_type, content_variable, ) ) if optional: self._change_indent(-1) return check_str def _message_class_str(self) -> str: """ Produce the content of the Message class. :return: the message.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += ( self.indent + '"""This module contains {}\'s message definition."""\n\n'.format( self.protocol_specification.name ) ) cls_str += f"# pylint: disable={",".join(PYLINT_DISABLE_MESSAGE_PY)}\n" # Imports cls_str += self.indent + "import logging\n" cls_str += self._import_from_typing_module() + "\n\n" cls_str += self.indent + "from aea.configurations.base import PublicId\n" cls_str += self.indent + "from aea.exceptions import AEAEnforceError, enforce\n" cls_str += MESSAGE_IMPORT + "\n" if self._import_from_custom_types_module() != "": cls_str += "\n" + self._import_from_custom_types_module() + "\n" else: cls_str += self._import_from_custom_types_module() cls_str += ( self.indent + '\n_default_logger = logging.getLogger("aea.packages.{}.protocols.{}.message")\n'.format( self.protocol_specification.author, self.protocol_specification.name ) ) cls_str += self.indent + "\nDEFAULT_BODY_SIZE = 4\n" # Class Header cls_str += self.indent + "\n\nclass {}Message(Message):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += self.indent + '"""{}"""\n\n'.format( self.protocol_specification.description ) # Class attributes cls_str += self.indent + 'protocol_id = PublicId.from_str("{}/{}:{}")\n'.format( self.protocol_specification.author, self.protocol_specification.name, self.protocol_specification.version, ) cls_str += ( self.indent + 'protocol_specification_id = PublicId.from_str("{}/{}:{}")\n'.format( self.protocol_specification.protocol_specification_id.author, self.protocol_specification.protocol_specification_id.name, self.protocol_specification.protocol_specification_id.version, ) ) for custom_type in self.spec.all_custom_types: cls_str += "\n" cls_str += self.indent + "{} = Custom{}\n".format(custom_type, custom_type) # Performatives Enum cls_str += "\n" + self._performatives_enum_str() cls_str += self.indent + "_performatives = {}\n".format( self._performatives_str() ) # slots cls_str += self.indent + "__slots__: Tuple[str, ...] = tuple()\n" cls_str += self.indent + "class _SlotsCls():\n" self._change_indent(1) cls_str += self.indent + "__slots__ = (\n" self._change_indent(1) # default fields default_slots = ["performative", "dialogue_reference", "message_id", "target"] slots = list(self.spec.all_unique_contents.keys()) + default_slots for field_name in sorted(slots): cls_str += self.indent + f'"{field_name}",' self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-1) # __init__ cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "performative: Performative,\n" cls_str += self.indent + 'dialogue_reference: Tuple[str, str] = ("", ""),\n' cls_str += self.indent + "message_id: int = 1,\n" cls_str += self.indent + "target: int = 0,\n" cls_str += self.indent + "**kwargs: Any,\n" self._change_indent(-1) cls_str += self.indent + "):\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialise an instance of {}Message.\n\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + ":param message_id: the message id.\n" cls_str += self.indent + ":param dialogue_reference: the dialogue reference.\n" cls_str += self.indent + ":param target: the message target.\n" cls_str += self.indent + ":param performative: the message performative.\n" cls_str += self.indent + ":param **kwargs: extra options.\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "super().__init__(\n" self._change_indent(1) cls_str += self.indent + "dialogue_reference=dialogue_reference,\n" cls_str += self.indent + "message_id=message_id,\n" cls_str += self.indent + "target=target,\n" cls_str += ( self.indent + "performative={}Message.Performative(performative),\n".format( self.protocol_specification_in_camel_case ) ) cls_str += self.indent + "**kwargs,\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-1) # Instance properties cls_str += self.indent + "@property\n" cls_str += self.indent + "def valid_performatives(self) -> Set[str]:\n" self._change_indent(1) cls_str += self.indent + '"""Get valid performatives."""\n' cls_str += self.indent + "return self._performatives\n\n" self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def dialogue_reference(self) -> Tuple[str, str]:\n" self._change_indent(1) cls_str += self.indent + '"""Get the dialogue_reference of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.")\n' ) cls_str += ( self.indent + 'return cast(Tuple[str, str], self.get("dialogue_reference"))\n\n' ) self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def message_id(self) -> int:\n" self._change_indent(1) cls_str += self.indent + '"""Get the message_id of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("message_id"), "message_id is not set.")\n' ) cls_str += self.indent + 'return cast(int, self.get("message_id"))\n\n' self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += ( self.indent + "def performative(self) -> Performative: # type: ignore # noqa: F821\n" ) self._change_indent(1) cls_str += self.indent + '"""Get the performative of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("performative"), "performative is not set.")\n' ) cls_str += ( self.indent + 'return cast({}Message.Performative, self.get("performative"))\n\n'.format( self.protocol_specification_in_camel_case ) ) self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def target(self) -> int:\n" self._change_indent(1) cls_str += self.indent + '"""Get the target of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("target"), "target is not set.")\n' ) cls_str += self.indent + 'return cast(int, self.get("target"))\n\n' self._change_indent(-1) for content_name in sorted(self.spec.all_unique_contents.keys()): content_type = self.spec.all_unique_contents[content_name] cls_str += self.indent + "@property\n" cls_str += self.indent + "def {}(self) -> {}:\n".format( content_name, self._to_custom_custom(content_type) ) self._change_indent(1) cls_str += ( self.indent + '"""Get the \'{}\' content from the message."""\n'.format( content_name ) ) if not content_type.startswith("Optional"): cls_str += ( self.indent + 'enforce(self.is_set("{}"), "\'{}\' content is not set.")\n'.format( content_name, content_name ) ) cls_str += self.indent + 'return cast({}, self.get("{}"))\n\n'.format( self._to_custom_custom(content_type), content_name ) self._change_indent(-1) # check_consistency method cls_str += self.indent + "def _is_consistent(self) -> bool:\n" self._change_indent(1) cls_str += ( self.indent + '"""Check that the message follows the {} protocol."""\n'.format( self.protocol_specification.name ) ) cls_str += self.indent + "try:\n" self._change_indent(1) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference, tuple), \"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.\"" ".format(type(self.dialogue_reference)))\n" ) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference[0], str), \"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.\"" ".format(type(self.dialogue_reference[0])))\n" ) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference[1], str), \"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.\"" ".format(type(self.dialogue_reference[1])))\n" ) cls_str += ( self.indent + "enforce(" + _type_check("self.message_id", "int") + ", \"Invalid type for 'message_id'. Expected 'int'. Found '{}'.\"" ".format(type(self.message_id)))\n" ) cls_str += ( self.indent + "enforce(" + _type_check("self.target", "int") + ", \"Invalid type for 'target'. Expected 'int'. Found '{}'.\"" ".format(type(self.target)))\n\n" ) cls_str += self.indent + "# Light Protocol Rule 2\n" cls_str += self.indent + "# Check correct performative\n" cls_str += ( self.indent + "enforce(isinstance(self.performative, {}Message.Performative)".format( self.protocol_specification_in_camel_case ) ) cls_str += ( ", \"Invalid 'performative'. Expected either of '{}'. Found '{}'.\".format(" ) cls_str += "self.valid_performatives, self.performative" cls_str += "))\n\n" cls_str += self.indent + "# Check correct contents\n" cls_str += ( self.indent + "actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE\n" ) cls_str += self.indent + "expected_nb_of_contents = 0\n" counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "self.performative == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper(), ) self._change_indent(1) nb_of_non_optional_contents = 0 for content_type in contents.values(): if not content_type.startswith("Optional"): nb_of_non_optional_contents += 1 cls_str += self.indent + "expected_nb_of_contents = {}\n".format( nb_of_non_optional_contents ) for content_name, content_type in contents.items(): cls_str += self._check_content_type_str(content_name, content_type) counter += 1 self._change_indent(-1) cls_str += "\n" cls_str += self.indent + "# Check correct content count\n" cls_str += ( self.indent + "enforce(expected_nb_of_contents == actual_nb_of_contents, " '"Incorrect number of contents. Expected {}. Found {}"' ".format(expected_nb_of_contents, actual_nb_of_contents))\n\n" ) cls_str += self.indent + "# Light Protocol Rule 3\n" cls_str += self.indent + "if self.message_id == 1:\n" self._change_indent(1) cls_str += ( self.indent + "enforce(self.target == 0, \"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.\".format(self.target))\n" ) self._change_indent(-2) cls_str += ( self.indent + "except (AEAEnforceError, ValueError, KeyError) as e:\n" ) self._change_indent(1) cls_str += self.indent + "_default_logger.error(str(e))\n" cls_str += self.indent + "return False\n\n" self._change_indent(-1) cls_str += self.indent + "return True\n" return cls_str def _valid_replies_str(self) -> str: """ Generate the `valid replies` dictionary. :return: the `valid replies` dictionary string """ valid_replies_str = ( self.indent + "VALID_REPLIES: Dict[Message.Performative, FrozenSet[Message.Performative]] = {\n" ) self._change_indent(1) for performative in sorted(self.spec.reply.keys()): valid_replies_str += ( self.indent + "{}Message.Performative.{}: frozenset(".format( self.protocol_specification_in_camel_case, performative.upper() ) ) if len(self.spec.reply[performative]) > 0: valid_replies_str += "\n" self._change_indent(1) valid_replies_str += self.indent + "{" for reply in self.spec.reply[performative]: valid_replies_str += "{}Message.Performative.{}, ".format( self.protocol_specification_in_camel_case, reply.upper() ) valid_replies_str = valid_replies_str[:-2] valid_replies_str += "}\n" self._change_indent(-1) valid_replies_str += self.indent + "),\n" self._change_indent(-1) valid_replies_str += self.indent + "}" return valid_replies_str def _end_state_enum_str(self) -> str: """ Generate the end state Enum class. :return: the end state Enum string """ enum_str = self.indent + "class EndState(Dialogue.EndState):\n" self._change_indent(1) enum_str += ( self.indent + '"""This class defines the end states of a {} dialogue."""\n\n'.format( self.protocol_specification.name ) ) tag = 0 for end_state in self.spec.end_states: enum_str += self.indent + "{} = {}\n".format(end_state.upper(), tag) tag += 1 self._change_indent(-1) return enum_str def _agent_role_enum_str(self) -> str: """ Generate the agent role Enum class. :return: the agent role Enum string """ enum_str = self.indent + "class Role(Dialogue.Role):\n" self._change_indent(1) enum_str += ( self.indent + '"""This class defines the agent\'s role in a {} dialogue."""\n\n'.format( self.protocol_specification.name ) ) for role in self.spec.roles: enum_str += self.indent + '{} = "{}"\n'.format(role.upper(), role) self._change_indent(-1) return enum_str def _dialogue_class_str(self) -> str: """ Produce the content of the Message class. :return: the message.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += self.indent + '"""\n' cls_str += ( self.indent + "This module contains the classes required for {} dialogue management.\n\n".format( self.protocol_specification.name ) ) cls_str += ( self.indent + "- {}Dialogue: The dialogue class maintains state of a dialogue and manages it.\n".format( self.protocol_specification_in_camel_case ) ) cls_str += ( self.indent + "- {}Dialogues: The dialogues class keeps track of all dialogues.\n".format( self.protocol_specification_in_camel_case ) ) cls_str += self.indent + '"""\n\n' # Imports cls_str += self.indent + "from abc import ABC\n" cls_str += ( self.indent + "from typing import Callable, Dict, FrozenSet, Type, cast\n\n" ) cls_str += self.indent + "from aea.common import Address\n" cls_str += self.indent + "from aea.protocols.base import Message\n" cls_str += ( self.indent + "from aea.protocols.dialogue.base import Dialogue, DialogueLabel, Dialogues\n\n" ) cls_str += self.indent + "from {}.message import {}Message\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) # Class Header cls_str += "\nclass {}Dialogue(Dialogue):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += ( self.indent + '"""The {} dialogue class maintains state of a dialogue and manages it."""\n'.format( self.protocol_specification.name ) ) # Class Constants initial_performatives_str = ", ".join( [ "{}Message.Performative.{}".format( self.protocol_specification_in_camel_case, initial_performative ) for initial_performative in self.spec.initial_performatives ] ) terminal_performatives_str = ", ".join( [ "{}Message.Performative.{}".format( self.protocol_specification_in_camel_case, terminal_performative ) for terminal_performative in self.spec.terminal_performatives ] ) cls_str += ( self.indent + "INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({" + initial_performatives_str + "})\n" + self.indent + "TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({" + terminal_performatives_str + "})\n" + self._valid_replies_str() ) # Enums cls_str += "\n" + self._agent_role_enum_str() cls_str += "\n" + self._end_state_enum_str() cls_str += "\n" # initializer cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "dialogue_label: DialogueLabel,\n" cls_str += self.indent + "self_address: Address,\n" cls_str += self.indent + "role: Dialogue.Role,\n" cls_str += self.indent + "message_class: Type[{}Message] = {}Message,\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) self._change_indent(-1) cls_str += self.indent + ") -> None:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialize a dialogue.\n\n" cls_str += ( self.indent + ":param dialogue_label: the identifier of the dialogue\n" ) cls_str += ( self.indent + ":param self_address: the address of the entity for whom this dialogue is maintained\n" ) cls_str += ( self.indent + ":param role: the role of the agent this dialogue is maintained for\n" ) cls_str += self.indent + ":param message_class: the message class used\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "Dialogue.__init__(\n" cls_str += self.indent + "self,\n" cls_str += self.indent + "dialogue_label=dialogue_label,\n" cls_str += self.indent + "message_class=message_class,\n" cls_str += self.indent + "self_address=self_address,\n" cls_str += self.indent + "role=role,\n" cls_str += self.indent + ")\n" self._change_indent(-2) # dialogues class cls_str += self.indent + "class {}Dialogues(Dialogues, ABC):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += ( self.indent + '"""This class keeps track of all {} dialogues."""\n\n'.format( self.protocol_specification.name ) ) end_states_str = ", ".join( [ "{}Dialogue.EndState.{}".format( self.protocol_specification_in_camel_case, end_state.upper() ) for end_state in self.spec.end_states ] ) cls_str += self.indent + "END_STATES = frozenset(\n" cls_str += self.indent + "{" + end_states_str + "}" cls_str += self.indent + ")\n\n" cls_str += ( self.indent + f"_keep_terminal_state_dialogues = {repr(self.spec.keep_terminal_state_dialogues)}\n\n" ) cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "self_address: Address,\n" cls_str += ( self.indent + "role_from_first_message: Callable[[Message, Address], Dialogue.Role],\n" ) cls_str += ( self.indent + "dialogue_class: Type[{}Dialogue] = {}Dialogue,\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) ) self._change_indent(-1) cls_str += self.indent + ") -> None:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialize dialogues.\n\n" cls_str += ( self.indent + ":param self_address: the address of the entity for whom dialogues are maintained\n" ) cls_str += self.indent + ":param dialogue_class: the dialogue class used\n" cls_str += ( self.indent + ":param role_from_first_message: the callable determining role from first message\n" ) cls_str += self.indent + '"""\n' cls_str += self.indent + "Dialogues.__init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "self_address=self_address,\n" cls_str += ( self.indent + "end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES),\n" ) cls_str += self.indent + "message_class={}Message,\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + "dialogue_class=dialogue_class,\n" cls_str += self.indent + "role_from_first_message=role_from_first_message,\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-2) cls_str += self.indent + "\n" return cls_str def _custom_types_module_str(self) -> str: """ Produce the contents of the custom_types module, containing classes corresponding to every custom type in the protocol specification. :return: the custom_types.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += '"""This module contains class representations corresponding to every custom type in the protocol specification."""\n' # class code per custom type for custom_type in self.spec.all_custom_types: cls_str += self.indent + "\n\nclass {}:\n".format(custom_type) self._change_indent(1) cls_str += ( self.indent + '"""This class represents an instance of {}."""\n\n'.format( custom_type ) ) cls_str += self.indent + "def __init__(self):\n" self._change_indent(1) cls_str += self.indent + '"""Initialise an instance of {}."""\n'.format( custom_type ) cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "@staticmethod\n" cls_str += ( self.indent + 'def encode({}_protobuf_object, {}_object: "{}") -> None:\n'.format( _camel_case_to_snake_case(custom_type), _camel_case_to_snake_case(custom_type), custom_type, ) ) self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += ( self.indent + "Encode an instance of this class into the protocol buffer object.\n\n" ) cls_str += ( self.indent + "The protocol buffer object in the {}_protobuf_object argument is matched with the instance of this class in the '{}_object' argument.\n\n".format( _camel_case_to_snake_case(custom_type), _camel_case_to_snake_case(custom_type), ) ) cls_str += ( self.indent + ":param {}_protobuf_object: the protocol buffer object whose type corresponds with this class.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":param {}_object: an instance of this class to be encoded in the protocol buffer object.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += self.indent + '"""\n' cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "@classmethod\n" cls_str += ( self.indent + 'def decode(cls, {}_protobuf_object) -> "{}":\n'.format( _camel_case_to_snake_case(custom_type), custom_type, ) ) self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += ( self.indent + "Decode a protocol buffer object that corresponds with this class into an instance of this class.\n\n" ) cls_str += ( self.indent + "A new instance of this class is created that matches the protocol buffer object in the '{}_protobuf_object' argument.\n\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":param {}_protobuf_object: the protocol buffer object whose type corresponds with this class.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":return: A new instance of this class that matches the protocol buffer object in the '{}_protobuf_object' argument.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += self.indent + '"""\n' cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "def __eq__(self, other):\n" self._change_indent(1) cls_str += self.indent + "raise NotImplementedError\n" self._change_indent(-2) return cls_str def _encoding_message_content_from_python_to_protobuf( self, content_name: str, content_type: str, ) -> str: """ Produce the encoding of message contents for the serialisation class. :param content_name: the name of the content to be encoded :param content_type: the type of the content to be encoded :return: the encoding string """ encoding_str = "" if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys(): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{} = {}\n".format( content_name, content_name ) elif content_type.startswith("FrozenSet") or content_type.startswith("Tuple"): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{}.extend({})\n".format( content_name, content_name ) elif content_type.startswith("Dict"): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{}.update({})\n".format( content_name, content_name ) elif content_type.startswith("Union"): sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name_in_protobuf = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) encoding_str += self.indent + 'if msg.is_set("{}"):\n'.format( sub_type_name_in_protobuf ) self._change_indent(1) encoding_str += self.indent + "performative.{}_is_set = True\n".format( sub_type_name_in_protobuf ) encoding_str += self._encoding_message_content_from_python_to_protobuf( sub_type_name_in_protobuf, sub_type ) self._change_indent(-1) elif content_type.startswith("Optional"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] if not sub_type.startswith("Union"): encoding_str += self.indent + 'if msg.is_set("{}"):\n'.format( content_name ) self._change_indent(1) encoding_str += self.indent + "performative.{}_is_set = True\n".format( content_name ) encoding_str += self._encoding_message_content_from_python_to_protobuf( content_name, sub_type ) if not sub_type.startswith("Union"): self._change_indent(-1) else: encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "{}.encode(performative.{}, {})\n".format( content_type, content_name, content_name ) return encoding_str def _decoding_message_content_from_protobuf_to_python( self, performative: str, content_name: str, content_type: str, variable_name_in_protobuf: Optional[str] = "", ) -> str: """ Produce the decoding of message contents for the serialisation class. :param performative: the performative to which the content belongs :param content_name: the name of the content to be decoded :param content_type: the type of the content to be decoded :param variable_name_in_protobuf: the name of the variable in the protobuf schema :return: the decoding string """ decoding_str = "" variable_name = ( content_name if variable_name_in_protobuf == "" else variable_name_in_protobuf ) if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys(): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, variable_name, ) decoding_str += self.indent + 'performative_content["{}"] = {}\n'.format( content_name, content_name ) elif content_type.startswith("FrozenSet"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_frozenset = frozenset({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_frozenset\n'.format( content_name, content_name ) ) elif content_type.startswith("Tuple"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_tuple = tuple({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_tuple\n'.format( content_name, content_name ) ) elif content_type.startswith("Dict"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_dict = dict({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_dict\n'.format( content_name, content_name ) ) elif content_type.startswith("Union"): sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name_in_protobuf = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) decoding_str += self.indent + "if {}_pb.{}.{}_is_set:\n".format( self.protocol_specification.name, performative, sub_type_name_in_protobuf, ) self._change_indent(1) decoding_str += self._decoding_message_content_from_protobuf_to_python( performative=performative, content_name=content_name, content_type=sub_type, variable_name_in_protobuf=sub_type_name_in_protobuf, ) self._change_indent(-1) elif content_type.startswith("Optional"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] if not sub_type.startswith("Union"): decoding_str += self.indent + "if {}_pb.{}.{}_is_set:\n".format( self.protocol_specification.name, performative, content_name ) self._change_indent(1) decoding_str += self._decoding_message_content_from_protobuf_to_python( performative, content_name, sub_type ) if not sub_type.startswith("Union"): self._change_indent(-1) else: decoding_str += self.indent + "pb2_{} = {}_pb.{}.{}\n".format( variable_name, self.protocol_specification.name, performative, variable_name, ) decoding_str += self.indent + "{} = {}.decode(pb2_{})\n".format( content_name, content_type, variable_name, ) decoding_str += self.indent + 'performative_content["{}"] = {}\n'.format( content_name, content_name ) return decoding_str def _serialization_class_str(self) -> str: """ Produce the content of the Serialization class. :return: the serialization.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += ( self.indent + '"""Serialization module for {} protocol."""\n\n'.format( self.protocol_specification.name ) ) cls_str += f"# pylint: disable={",".join(PYLINT_DISABLE_SERIALIZATION_PY)}\n" # Imports cls_str += self.indent + "from typing import Any, Dict, cast\n\n" cls_str += ( self.indent + "from aea.mail.base_pb2 import DialogueMessage, Message as ProtobufMessage\n" ) cls_str += MESSAGE_IMPORT + "\n" cls_str += SERIALIZER_IMPORT + "\n\n" cls_str += self.indent + "from {} import (\n {}_pb2,\n)\n".format( self.dotted_path_to_protocol_package, self.protocol_specification.name, ) for custom_type in self.spec.all_custom_types: cls_str += ( self.indent + "from {}.custom_types import (\n {},\n)\n".format( self.dotted_path_to_protocol_package, custom_type, ) ) cls_str += self.indent + "from {}.message import (\n {}Message,\n)\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) # Class Header cls_str += self.indent + "\n\nclass {}Serializer(Serializer):\n".format( self.protocol_specification_in_camel_case, ) self._change_indent(1) cls_str += ( self.indent + '"""Serialization for the \'{}\' protocol."""\n\n'.format( self.protocol_specification.name, ) ) # encoder cls_str += self.indent + "@staticmethod\n" cls_str += self.indent + "def encode(msg: Message) -> bytes:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Encode a '{}' message into bytes.\n\n".format( self.protocol_specification_in_camel_case, ) cls_str += self.indent + ":param msg: the message object.\n" cls_str += self.indent + ":return: the bytes.\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "msg = cast({}Message, msg)\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + "message_pb = ProtobufMessage()\n" cls_str += self.indent + "dialogue_message_pb = DialogueMessage()\n" cls_str += self.indent + "{}_msg = {}_pb2.{}Message()\n\n".format( self.protocol_specification.name, self.protocol_specification.name, self.protocol_specification_in_camel_case, ) cls_str += self.indent + "dialogue_message_pb.message_id = msg.message_id\n" cls_str += self.indent + "dialogue_reference = msg.dialogue_reference\n" cls_str += ( self.indent + "dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]\n" ) cls_str += ( self.indent + "dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]\n" ) cls_str += self.indent + "dialogue_message_pb.target = msg.target\n\n" cls_str += self.indent + "performative_id = msg.performative\n" counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "performative_id == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper() ) self._change_indent(1) cls_str += ( self.indent + "performative = {}_pb2.{}Message.{}_Performative() # type: ignore\n".format( self.protocol_specification.name, self.protocol_specification_in_camel_case, performative.title(), ) ) for content_name, content_type in contents.items(): cls_str += self._encoding_message_content_from_python_to_protobuf( content_name, content_type ) cls_str += self.indent + "{}_msg.{}.CopyFrom(performative)\n".format( self.protocol_specification.name, performative ) counter += 1 self._change_indent(-1) cls_str += self.indent + "else:\n" self._change_indent(1) cls_str += ( self.indent + 'raise ValueError("Performative not valid: {}".format(performative_id))\n\n' ) self._change_indent(-1) cls_str += ( self.indent + "dialogue_message_pb.content = {}_msg.SerializeToString()\n\n".format( self.protocol_specification.name, ) ) cls_str += ( self.indent + "message_pb.dialogue_message.CopyFrom(dialogue_message_pb)\n" ) cls_str += self.indent + "message_bytes = message_pb.SerializeToString()\n" cls_str += self.indent + "return message_bytes\n" self._change_indent(-1) # decoder cls_str += self.indent + "@staticmethod\n" cls_str += self.indent + "def decode(obj: bytes) -> Message:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Decode bytes into a '{}' message.\n\n".format( self.protocol_specification_in_camel_case, ) cls_str += self.indent + ":param obj: the bytes object.\n" cls_str += self.indent + ":return: the '{}' message.\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + '"""\n' cls_str += self.indent + "message_pb = ProtobufMessage()\n" cls_str += self.indent + "{}_pb = {}_pb2.{}Message()\n".format( self.protocol_specification.name, self.protocol_specification.name, self.protocol_specification_in_camel_case, ) cls_str += self.indent + "message_pb.ParseFromString(obj)\n" cls_str += self.indent + "message_id = message_pb.dialogue_message.message_id\n" cls_str += ( self.indent + "dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)\n" ) cls_str += self.indent + "target = message_pb.dialogue_message.target\n\n" cls_str += ( self.indent + "{}_pb.ParseFromString(message_pb.dialogue_message.content)\n".format( self.protocol_specification.name ) ) cls_str += ( self.indent + 'performative = {}_pb.WhichOneof("performative")\n'.format( self.protocol_specification.name ) ) cls_str += ( self.indent + "performative_id = {}Message.Performative(str(performative))\n".format( self.protocol_specification_in_camel_case ) ) cls_str += ( self.indent + "performative_content = dict() # type: Dict[str, Any]\n" ) counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "performative_id == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper() ) self._change_indent(1) if len(contents.keys()) == 0: cls_str += self.indent + "pass\n" else: for content_name, content_type in contents.items(): cls_str += self._decoding_message_content_from_protobuf_to_python( performative, content_name, content_type ) counter += 1 self._change_indent(-1) cls_str += self.indent + "else:\n" self._change_indent(1) cls_str += ( self.indent + 'raise ValueError("Performative not valid: {}.".format(performative_id))\n\n' ) self._change_indent(-1) cls_str += self.indent + "return {}Message(\n".format( self.protocol_specification_in_camel_case, ) self._change_indent(1) cls_str += self.indent + "message_id=message_id,\n" cls_str += self.indent + "dialogue_reference=dialogue_reference,\n" cls_str += self.indent + "target=target,\n" cls_str += self.indent + "performative=performative,\n" cls_str += self.indent + "**performative_content\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-2) return cls_str def _content_to_proto_field_str( self, content_name: str, content_type: str, tag_no: int, ) -> Tuple[str, int]: """ Convert a message content to its representation in a protocol buffer schema. :param content_name: the name of the content :param content_type: the type of the content :param tag_no: the tag number :return: the content in protocol buffer schema and the next tag number to be used """ entry = "" if content_type.startswith("FrozenSet") or content_type.startswith( "Tuple" ): # it is a <PCT> element_type = _get_sub_types_of_compositional_types(content_type)[0] proto_type = _python_pt_or_ct_type_to_proto_type(element_type) entry = self.indent + "repeated {} {} = {};\n".format( proto_type, content_name, tag_no ) tag_no += 1 elif content_type.startswith("Dict"): # it is a <PMT> key_type = _get_sub_types_of_compositional_types(content_type)[0] value_type = _get_sub_types_of_compositional_types(content_type)[1] proto_key_type = _python_pt_or_ct_type_to_proto_type(key_type) proto_value_type = _python_pt_or_ct_type_to_proto_type(value_type) entry = self.indent + "map<{}, {}> {} = {};\n".format( proto_key_type, proto_value_type, content_name, tag_no ) tag_no += 1 elif content_type.startswith("Union"): # it is an <MT> sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) content_to_proto_field_str, tag_no = self._content_to_proto_field_str( sub_type_name, sub_type, tag_no ) entry += content_to_proto_field_str elif content_type.startswith("Optional"): # it is an <O> sub_type = _get_sub_types_of_compositional_types(content_type)[0] content_to_proto_field_str, tag_no = self._content_to_proto_field_str( content_name, sub_type, tag_no ) entry = content_to_proto_field_str entry += self.indent + "bool {}_is_set = {};\n".format(content_name, tag_no) tag_no += 1 else: # it is a <CT> or <PT> proto_type = _python_pt_or_ct_type_to_proto_type(content_type) entry = self.indent + "{} {} = {};\n".format( proto_type, content_name, tag_no ) tag_no += 1 return entry, tag_no def _protocol_buffer_schema_str(self) -> str: """ Produce the content of the Protocol Buffers schema. :return: the protocol buffers schema content """ self._change_indent(0, "s") # heading proto_buff_schema_str = self.indent + 'syntax = "proto3";\n\n' proto_buff_schema_str += self.indent + "package {};\n\n".format( public_id_to_package_name( self.protocol_specification.protocol_specification_id ) ) proto_buff_schema_str += self.indent + "message {}Message{{\n\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) # custom types if ( (len(self.spec.all_custom_types) != 0) and (self.protocol_specification.protobuf_snippets is not None) and (self.protocol_specification.protobuf_snippets != "") ): proto_buff_schema_str += self.indent + "// Custom Types\n" for custom_type in self.spec.all_custom_types: proto_buff_schema_str += self.indent + "message {}{{\n".format( custom_type ) self._change_indent(1) # formatting and adding the custom type protobuf entry specification_custom_type = "ct:" + custom_type proto_part = self.protocol_specification.protobuf_snippets[ specification_custom_type ] number_of_new_lines = proto_part.count("\n") if number_of_new_lines != 0: formatted_proto_part = proto_part.replace( "\n", "\n" + self.indent, number_of_new_lines - 1 ) else: formatted_proto_part = proto_part proto_buff_schema_str += self.indent + formatted_proto_part self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n\n" proto_buff_schema_str += "\n" # performatives proto_buff_schema_str += self.indent + "// Performatives and contents\n" for performative, contents in self.spec.speech_acts.items(): proto_buff_schema_str += self.indent + "message {}_Performative{{".format( performative.title() ) self._change_indent(1) tag_no = 1 if len(contents) == 0: proto_buff_schema_str += "}\n\n" self._change_indent(-1) else: proto_buff_schema_str += "\n" for content_name, content_type in contents.items(): ( content_to_proto_field_str, tag_no, ) = self._content_to_proto_field_str( content_name, content_type, tag_no ) proto_buff_schema_str += content_to_proto_field_str self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n\n" proto_buff_schema_str += "\n" proto_buff_schema_str += self.indent + "oneof performative{\n" self._change_indent(1) tag_no = 5 for performative in self.spec.all_performatives: proto_buff_schema_str += self.indent + "{}_Performative {} = {};\n".format( performative.title(), performative, tag_no ) tag_no += 1 self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n" self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n" return proto_buff_schema_str def _protocol_yaml_str(self) -> str: """ Produce the content of the protocol.yaml file. :return: the protocol.yaml content """ protocol_yaml_str = "name: {}\n".format(self.protocol_specification.name) protocol_yaml_str += "author: {}\n".format(self.protocol_specification.author) protocol_yaml_str += "version: {}\n".format(self.protocol_specification.version) protocol_yaml_str += "protocol_specification_id: {}\n".format( str(self.protocol_specification.protocol_specification_id) ) protocol_yaml_str += "type: {}\n".format( self.protocol_specification.component_type ) protocol_yaml_str += "description: {}\n".format( self.protocol_specification.description ) protocol_yaml_str += "license: {}\n".format(self.protocol_specification.license) protocol_yaml_str += "aea_version: '{}'\n".format( self.protocol_specification.aea_version ) protocol_yaml_str += "fingerprint: {}\n" protocol_yaml_str += "fingerprint_ignore_patterns: []\n" protocol_yaml_str += "dependencies:\n" protocol_yaml_str += " protobuf: {}\n" return protocol_yaml_str def _init_str(self) -> str: """ Produce the content of the __init__.py file. :return: the __init__.py content """ init_str = _copyright_header_str(self.protocol_specification.author) init_str += "\n" init_str += '"""\nThis module contains the support resources for the {} protocol.\n\nIt was created with protocol buffer compiler version `{}` and aea version `{}`.\n"""\n\n'.format( self.protocol_specification.name, self.protoc_version, __aea_version__ ) init_str += "from {}.message import {}Message\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) init_str += "from {}.serialization import {}Serializer\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) init_str += "{}Message.serializer = {}Serializer\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) return init_str def generate_protobuf_only_mode( self, language: str = PROTOCOL_LANGUAGE_PYTHON, run_protolint: bool = True, ) -> Optional[str]: """ Run the generator in "protobuf only" mode: a) validate the protocol specification. b) create the protocol buffer schema file. c) create the protocol buffer implementation file via 'protoc'. :param language: the target language in which to generate the package. :param run_protolint: whether to run protolint or not. :return: None """ if language not in SUPPORTED_PROTOCOL_LANGUAGES: raise ValueError( f"Unsupported language. Expected one of {SUPPORTED_PROTOCOL_LANGUAGES}. Found {language}." ) protobuf_output = None # type: Optional[str] # Create the output folder output_folder = Path(self.path_to_generated_protocol_package) if not output_folder.exists(): os.mkdir(output_folder) # Generate protocol buffer schema file _create_protocol_file( self.path_to_generated_protocol_package, "{}.proto".format(self.protocol_specification.name), self._protocol_buffer_schema_str(), ) # Try to compile protobuf schema file is_compiled, msg = compile_protobuf_using_protoc( self.path_to_generated_protocol_package, self.protocol_specification.name, language, ) if not is_compiled: # Remove the generated folder and files shutil.rmtree(output_folder) raise SyntaxError( "Error when trying to compile the protocol buffer schema file:\n" + msg ) # Run protolint if run_protolint: is_correctly_formatted, protolint_output = apply_protolint( self.path_to_generated_protocol_package, self.protocol_specification.name, ) if not is_correctly_formatted and protolint_output != "": protobuf_output = "Protolint warnings:\n" + protolint_output # Run black and isort formatting for python if language == PROTOCOL_LANGUAGE_PYTHON: try_run_black_formatting(self.path_to_generated_protocol_package) try_run_isort_formatting(self.path_to_generated_protocol_package) return protobuf_output def generate_full_mode(self, language: str) -> Optional[str]: """ Run the generator in "full" mode: Runs the generator in protobuf only mode: a) validate the protocol specification. b) create the protocol buffer schema file. c) create the protocol buffer implementation file via 'protoc'. Additionally: d) generates python modules. e) applies black formatting f) applies isort formatting :param language: the language for which to create protobuf files :return: optional warning message """ if language != PROTOCOL_LANGUAGE_PYTHON: raise ValueError( f"Unsupported language. Expected 'python' because currently the framework supports full generation of protocols only in Python. Found {language}." ) # Run protobuf only mode full_mode_output = self.generate_protobuf_only_mode( language=PROTOCOL_LANGUAGE_PYTHON ) # Generate Python protocol package _create_protocol_file( self.path_to_generated_protocol_package, INIT_FILE_NAME, self._init_str() ) _create_protocol_file( self.path_to_generated_protocol_package, PROTOCOL_YAML_FILE_NAME, self._protocol_yaml_str(), ) _create_protocol_file( self.path_to_generated_protocol_package, MESSAGE_DOT_PY_FILE_NAME, self._message_class_str(), ) if ( self.protocol_specification.dialogue_config is not None and self.protocol_specification.dialogue_config != {} ): _create_protocol_file( self.path_to_generated_protocol_package, DIALOGUE_DOT_PY_FILE_NAME, self._dialogue_class_str(), ) if len(self.spec.all_custom_types) > 0: _create_protocol_file( self.path_to_generated_protocol_package, CUSTOM_TYPES_DOT_PY_FILE_NAME, self._custom_types_module_str(), ) _create_protocol_file( self.path_to_generated_protocol_package, SERIALIZATION_DOT_PY_FILE_NAME, self._serialization_class_str(), ) # Run black formatting try_run_black_formatting(self.path_to_generated_protocol_package) # Run isort formatting try_run_isort_formatting(self.path_to_generated_protocol_package) # Warn if specification has custom types if len(self.spec.all_custom_types) > 0: incomplete_generation_warning_msg = "The generated protocol is incomplete, because the protocol specification contains the following custom types: {}. Update the generated '{}' file with the appropriate implementations of these custom types.".format( self.spec.all_custom_types, CUSTOM_TYPES_DOT_PY_FILE_NAME ) if full_mode_output is not None: full_mode_output += incomplete_generation_warning_msg else: full_mode_output = incomplete_generation_warning_msg return full_mode_output def generate( self, protobuf_only: bool = False, language: str = PROTOCOL_LANGUAGE_PYTHON ) -> Optional[str]: """ Run the generator either in "full" or "protobuf only" mode. :param protobuf_only: mode of running the generator. :param language: the target language in which to generate the protocol package. :return: optional warning message. """ if protobuf_only: output = self.generate_protobuf_only_mode(language) # type: Optional[str] # Warn about the protobuf only mode protobuf_mode_warning_msg = ( "The generated protocol is incomplete. It only includes the protocol buffer definitions. " + "You must implement and add other definitions (e.g. messages, serialisation, dialogue, etc) to this package." ) if output is not None: output += protobuf_mode_warning_msg else: output = protobuf_mode_warning_msg else: output = self.generate_full_mode(language) return output def public_id_to_package_name(public_id: PublicId) -> str: """Make package name string from public_id provided.""" return f'aea.{public_id.author}.{public_id.name}.v{public_id.version.replace('.', '_')}'
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the protocol generator.""" import itertools import os import shutil # pylint: skip-file from datetime import date from pathlib import Path from typing import Optional, Tuple # pylint: skip-file from aea.__version__ import __version__ as __aea_version__ from aea.configurations.base import ProtocolSpecificationParseError from aea.configurations.constants import ( PROTOCOL_LANGUAGE_PYTHON, SUPPORTED_PROTOCOL_LANGUAGES, ) from aea.configurations.data_types import PublicId from aea.protocols.generator.common import ( CUSTOM_TYPES_DOT_PY_FILE_NAME, DIALOGUE_DOT_PY_FILE_NAME, INIT_FILE_NAME, MESSAGE_DOT_PY_FILE_NAME, MESSAGE_IMPORT, PATH_TO_PACKAGES, PROTOCOL_YAML_FILE_NAME, PYTHON_TYPE_TO_PROTO_TYPE, SERIALIZATION_DOT_PY_FILE_NAME, SERIALIZER_IMPORT, _camel_case_to_snake_case, _create_protocol_file, _get_sub_types_of_compositional_types, _includes_custom_type, _python_pt_or_ct_type_to_proto_type, _to_camel_case, _union_sub_type_to_protobuf_variable_name, apply_protolint, check_prerequisites, compile_protobuf_using_protoc, get_protoc_version, load_protocol_specification, try_run_black_formatting, try_run_isort_formatting, ) from aea.protocols.generator.extract_specification import extract from aea.protocols.generator.validate import validate PYLINT_DISABLE_SERIALIZATION_PY = [ "too-many-statements", "too-many-locals", "no-member", "too-few-public-methods", "redefined-builtin", ] PYLINT_DISABLE_MESSAGE_PY = [ "too-many-statements", "too-many-locals", "no-member", "too-few-public-methods", "too-many-branches", "not-an-iterable", "unidiomatic-typecheck", "unsubscriptable-object", ] def _type_check(variable_name: str, variable_type: str) -> str: """ Return the type check Python instruction. If variable_type == int: type(variable_name) == int else: isinstance(variable_name, variable_type) :param variable_name: the variable name. :param variable_type: the variable type. :return: the Python instruction to check the type, in string form. """ if variable_type != "int": return f"isinstance({variable_name}, {variable_type})" else: return f"type({variable_name}) is {variable_type}" def _copyright_header_str(author: str) -> str: """ Produce the copyright header text for a protocol. :param author: the author of the protocol. :return: The copyright header text. """ copy_right_str = ( "# -*- coding: utf-8 -*-\n" "# ------------------------------------------------------------------------------\n" "#\n" ) copy_right_str += "# Copyright {} {}\n".format(date.today().year, author) copy_right_str += ( "#\n" '# Licensed under the Apache License, Version 2.0 (the "License");\n' "# you may not use this file except in compliance with the License.\n" "# You may obtain a copy of the License at\n" "#\n" "# http://www.apache.org/licenses/LICENSE-2.0\n" "#\n" "# Unless required by applicable law or agreed to in writing, software\n" '# distributed under the License is distributed on an "AS IS" BASIS,\n' "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" "# See the License for the specific language governing permissions and\n" "# limitations under the License.\n" "#\n" "# ------------------------------------------------------------------------------\n" ) return copy_right_str class ProtocolGenerator: """This class generates a protocol_verification package from a ProtocolTemplate object.""" def __init__( self, path_to_protocol_specification: str, output_path: str = ".", dotted_path_to_protocol_package: Optional[str] = None, ) -> None: """ Instantiate a protocol generator. :param path_to_protocol_specification: path to protocol specification file :param output_path: the path to the location in which the protocol module is to be generated. :param dotted_path_to_protocol_package: the path to the protocol package :raises FileNotFoundError if any prerequisite application is not installed :raises yaml.YAMLError if yaml parser encounters an error condition :raises ProtocolSpecificationParseError if specification fails generator's validation """ # Check the prerequisite applications are installed try: check_prerequisites() except FileNotFoundError: raise self.protoc_version = get_protoc_version() # Load protocol specification yaml file self.protocol_specification = load_protocol_specification( path_to_protocol_specification ) # Validate the specification result_bool, result_msg = validate(self.protocol_specification) if not result_bool: raise ProtocolSpecificationParseError(result_msg) # Extract specification fields self.spec = extract(self.protocol_specification) # Helper fields self.path_to_protocol_specification = path_to_protocol_specification self.protocol_specification_in_camel_case = _to_camel_case( self.protocol_specification.name ) self.path_to_generated_protocol_package = os.path.join( output_path, self.protocol_specification.name ) self.dotted_path_to_protocol_package = ( dotted_path_to_protocol_package + self.protocol_specification.name if dotted_path_to_protocol_package is not None else "{}.{}.protocols.{}".format( PATH_TO_PACKAGES, self.protocol_specification.author, self.protocol_specification.name, ) ) self.indent = "" def _change_indent(self, number: int, mode: str = None) -> None: """ Update the value of 'indent' global variable. This function controls the indentation of the code produced throughout the generator. There are two modes: - Setting the indent to a desired 'number' level. In this case, 'mode' has to be set to "s". - Updating the incrementing/decrementing the indentation level by 'number' amounts. In this case 'mode' is None. :param number: the number of indentation levels to set/increment/decrement :param mode: the mode of indentation change """ if mode and mode == "s": if number >= 0: self.indent = number * " " else: raise ValueError("Error: setting indent to be a negative number.") else: if number >= 0: for _ in itertools.repeat(None, number): self.indent += " " else: if abs(number) <= len(self.indent) / 4: self.indent = self.indent[abs(number) * 4 :] else: raise ValueError( "Not enough spaces in the 'indent' variable to remove." ) def _import_from_typing_module(self) -> str: """ Manage import statement for the typing package. :return: import statement for the typing package """ ordered_packages = [ "Dict", "FrozenSet", "Optional", "Set", "Tuple", "Union", "cast", ] import_str = "from typing import Any, " for package in ordered_packages: if self.spec.typing_imports[package]: import_str += "{}, ".format(package) import_str = import_str[:-2] return import_str def _import_from_custom_types_module(self) -> str: """ Manage import statement from custom_types module. :return: import statement for the custom_types module """ import_str = "" if len(self.spec.all_custom_types) == 0: pass else: for custom_class in self.spec.all_custom_types: import_str += "from {}.custom_types import {} as Custom{}\n".format( self.dotted_path_to_protocol_package, custom_class, custom_class, ) import_str = import_str[:-1] return import_str def _performatives_str(self) -> str: """ Generate the performatives instance property string, a set containing all valid performatives of this protocol. :return: the performatives set string """ performatives_str = "{" for performative in self.spec.all_performatives: performatives_str += '"{}", '.format(performative) performatives_str = performatives_str[:-2] performatives_str += "}" return performatives_str def _performatives_enum_str(self) -> str: """ Generate the performatives Enum class. :return: the performatives Enum string """ enum_str = self.indent + "class Performative(Message.Performative):\n" self._change_indent(1) enum_str += self.indent + '"""Performatives for the {} protocol."""\n\n'.format( self.protocol_specification.name ) for performative in self.spec.all_performatives: enum_str += self.indent + '{} = "{}"\n'.format( performative.upper(), performative ) enum_str += "\n" enum_str += self.indent + "def __str__(self) -> str:\n" self._change_indent(1) enum_str += self.indent + '"""Get the string representation."""\n' enum_str += self.indent + "return str(self.value)\n" self._change_indent(-1) enum_str += "\n" self._change_indent(-1) return enum_str def _to_custom_custom(self, content_type: str) -> str: """ Evaluate whether a content type is a custom type or has a custom type as a sub-type. :param content_type: the content type. :return: Boolean result """ new_content_type = content_type if _includes_custom_type(content_type): for custom_type in self.spec.all_custom_types: new_content_type = new_content_type.replace( custom_type, self.spec.custom_custom_types[custom_type] ) return new_content_type def _check_content_type_str(self, content_name: str, content_type: str) -> str: """ Produce the checks of elements of compositional types. :param content_name: the name of the content to be checked :param content_type: the type of the content to be checked :return: the string containing the checks. """ check_str = "" if content_type.startswith("Optional["): optional = True check_str += self.indent + 'if self.is_set("{}"):\n'.format(content_name) self._change_indent(1) check_str += self.indent + "expected_nb_of_contents += 1\n" content_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "{} = cast({}, self.{})\n".format( content_name, self._to_custom_custom(content_type), content_name ) content_variable = content_name else: optional = False content_variable = "self." + content_name if content_type.startswith("Union["): element_types = _get_sub_types_of_compositional_types(content_type) unique_standard_types_set = set() for typing_content_type in element_types: if typing_content_type.startswith("FrozenSet"): unique_standard_types_set.add("frozenset") elif typing_content_type.startswith("Tuple"): unique_standard_types_set.add("tuple") elif typing_content_type.startswith("Dict"): unique_standard_types_set.add("dict") else: unique_standard_types_set.add(typing_content_type) unique_standard_types_list = sorted(unique_standard_types_set) check_str += self.indent check_str += "enforce(" for unique_type in unique_standard_types_list: check_str += "{} or ".format( _type_check(content_variable, self._to_custom_custom(unique_type)) ) check_str = check_str[:-4] check_str += ", \"Invalid type for content '{}'. Expected either of '{}'. Found '{{}}'.\".format(type({})))\n".format( content_name, [ unique_standard_type for unique_standard_type in unique_standard_types_list ], content_variable, ) if "frozenset" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, frozenset):\n".format( content_variable ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) frozen_set_element_types_set = set() for element_type in element_types: if element_type.startswith("FrozenSet"): frozen_set_element_types_set.add( _get_sub_types_of_compositional_types(element_type)[0] ) frozen_set_element_types = sorted(frozen_set_element_types_set) for frozen_set_element_type in frozen_set_element_types: check_str += self.indent + "all({} for element in {}) or\n".format( _type_check( "element", self._to_custom_custom(frozen_set_element_type) ), content_variable, ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(frozen_set_element_types) == 1: check_str += ( self.indent + ", \"Invalid type for elements of content '{}'. Expected ".format( content_name ) ) for frozen_set_element_type in frozen_set_element_types: check_str += "'{}'".format( self._to_custom_custom(frozen_set_element_type) ) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for frozenset elements in content '{}'. Expected either ".format( content_name ) ) for frozen_set_element_type in frozen_set_element_types: check_str += "'{}' or ".format( self._to_custom_custom(frozen_set_element_type) ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-1) if "tuple" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, tuple):\n".format( content_variable ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) tuple_element_types_set = set() for element_type in element_types: if element_type.startswith("Tuple"): tuple_element_types_set.add( _get_sub_types_of_compositional_types(element_type)[0] ) tuple_element_types = sorted(tuple_element_types_set) for tuple_element_type in tuple_element_types: check_str += self.indent + "all({} for element in {}) or \n".format( _type_check( "element", self._to_custom_custom(tuple_element_type) ), content_variable, ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(tuple_element_types) == 1: check_str += ( self.indent + ", \"Invalid type for tuple elements in content '{}'. Expected ".format( content_name ) ) for tuple_element_type in tuple_element_types: check_str += "'{}'".format( self._to_custom_custom(tuple_element_type) ) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for tuple elements in content '{}'. Expected either ".format( content_name ) ) for tuple_element_type in tuple_element_types: check_str += "'{}' or ".format( self._to_custom_custom(tuple_element_type) ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-1) if "dict" in unique_standard_types_list: check_str += self.indent + "if isinstance({}, dict):\n".format( content_variable ) self._change_indent(1) check_str += ( self.indent + "for key_of_{}, value_of_{} in {}.items():\n".format( content_name, content_name, content_variable ) ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) dict_key_value_types = dict() for element_type in element_types: if element_type.startswith("Dict"): dict_key_value_types[ _get_sub_types_of_compositional_types(element_type)[0] ] = _get_sub_types_of_compositional_types(element_type)[1] for element1_type in sorted(dict_key_value_types.keys()): check_str += self.indent + "({} and {}) or\n".format( _type_check( "key_of_" + content_name, self._to_custom_custom(element1_type), ), _type_check( "value_of_" + content_name, self._to_custom_custom(dict_key_value_types[element1_type]), ), ) check_str = check_str[:-4] check_str += "\n" self._change_indent(-1) if len(dict_key_value_types) == 1: check_str += ( self.indent + ", \"Invalid type for dictionary key, value in content '{}'. Expected ".format( content_name ) ) for key in sorted(dict_key_value_types.keys()): check_str += "'{}', '{}'".format(key, dict_key_value_types[key]) check_str += '.")\n' else: check_str += ( self.indent + ", \"Invalid type for dictionary key, value in content '{}'. Expected ".format( content_name ) ) for key in sorted(dict_key_value_types.keys()): check_str += "'{}','{}' or ".format( key, dict_key_value_types[key] ) check_str = check_str[:-4] check_str += '.")\n' self._change_indent(-2) elif content_type.startswith("FrozenSet["): # check the type check_str += ( self.indent + "enforce(isinstance({}, frozenset), \"Invalid type for content '{}'. Expected 'frozenset'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "enforce(all(\n" self._change_indent(1) check_str += self.indent + "{} for element in {}\n".format( _type_check("element", self._to_custom_custom(element_type)), content_variable, ) self._change_indent(-1) check_str += ( self.indent + "), \"Invalid type for frozenset elements in content '{}'. Expected '{}'.\")\n".format( content_name, element_type ) ) elif content_type.startswith("Tuple["): # check the type check_str += ( self.indent + "enforce(isinstance({}, tuple), \"Invalid type for content '{}'. Expected 'tuple'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type = _get_sub_types_of_compositional_types(content_type)[0] check_str += self.indent + "enforce(all(\n" self._change_indent(1) check_str += self.indent + "{} for element in {}\n".format( _type_check("element", self._to_custom_custom(element_type)), content_variable, ) self._change_indent(-1) check_str += ( self.indent + "), \"Invalid type for tuple elements in content '{}'. Expected '{}'.\")\n".format( content_name, element_type ) ) elif content_type.startswith("Dict["): # check the type check_str += ( self.indent + "enforce(isinstance({}, dict), \"Invalid type for content '{}'. Expected 'dict'. Found '{{}}'.\".format(type({})))\n".format( content_variable, content_name, content_variable ) ) element_type_1 = _get_sub_types_of_compositional_types(content_type)[0] element_type_2 = _get_sub_types_of_compositional_types(content_type)[1] # check the keys type then check the values type check_str += ( self.indent + "for key_of_{}, value_of_{} in {}.items():\n".format( content_name, content_name, content_variable ) ) self._change_indent(1) check_str += self.indent + "enforce(\n" self._change_indent(1) check_str += self.indent + "{}\n".format( _type_check( "key_of_" + content_name, self._to_custom_custom(element_type_1) ) ) self._change_indent(-1) check_str += ( self.indent + ", \"Invalid type for dictionary keys in content '{}'. Expected '{}'. Found '{{}}'.\".format(type(key_of_{})))\n".format( content_name, element_type_1, content_name ) ) check_str += self.indent + "enforce(\n" self._change_indent(1) check_str += self.indent + "{}\n".format( _type_check( "value_of_" + content_name, self._to_custom_custom(element_type_2) ) ) self._change_indent(-1) check_str += ( self.indent + ", \"Invalid type for dictionary values in content '{}'. Expected '{}'. Found '{{}}'.\".format(type(value_of_{})))\n".format( content_name, element_type_2, content_name ) ) self._change_indent(-1) else: check_str += ( self.indent + "enforce({}, \"Invalid type for content '{}'. Expected '{}'. Found '{{}}'.\".format(type({})))\n".format( _type_check(content_variable, self._to_custom_custom(content_type)), content_name, content_type, content_variable, ) ) if optional: self._change_indent(-1) return check_str def _message_class_str(self) -> str: """ Produce the content of the Message class. :return: the message.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += ( self.indent + '"""This module contains {}\'s message definition."""\n\n'.format( self.protocol_specification.name ) ) cls_str += f"# pylint: disable={','.join(PYLINT_DISABLE_MESSAGE_PY)}\n" # Imports cls_str += self.indent + "import logging\n" cls_str += self._import_from_typing_module() + "\n\n" cls_str += self.indent + "from aea.configurations.base import PublicId\n" cls_str += self.indent + "from aea.exceptions import AEAEnforceError, enforce\n" cls_str += MESSAGE_IMPORT + "\n" if self._import_from_custom_types_module() != "": cls_str += "\n" + self._import_from_custom_types_module() + "\n" else: cls_str += self._import_from_custom_types_module() cls_str += ( self.indent + '\n_default_logger = logging.getLogger("aea.packages.{}.protocols.{}.message")\n'.format( self.protocol_specification.author, self.protocol_specification.name ) ) cls_str += self.indent + "\nDEFAULT_BODY_SIZE = 4\n" # Class Header cls_str += self.indent + "\n\nclass {}Message(Message):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += self.indent + '"""{}"""\n\n'.format( self.protocol_specification.description ) # Class attributes cls_str += self.indent + 'protocol_id = PublicId.from_str("{}/{}:{}")\n'.format( self.protocol_specification.author, self.protocol_specification.name, self.protocol_specification.version, ) cls_str += ( self.indent + 'protocol_specification_id = PublicId.from_str("{}/{}:{}")\n'.format( self.protocol_specification.protocol_specification_id.author, self.protocol_specification.protocol_specification_id.name, self.protocol_specification.protocol_specification_id.version, ) ) for custom_type in self.spec.all_custom_types: cls_str += "\n" cls_str += self.indent + "{} = Custom{}\n".format(custom_type, custom_type) # Performatives Enum cls_str += "\n" + self._performatives_enum_str() cls_str += self.indent + "_performatives = {}\n".format( self._performatives_str() ) # slots cls_str += self.indent + "__slots__: Tuple[str, ...] = tuple()\n" cls_str += self.indent + "class _SlotsCls():\n" self._change_indent(1) cls_str += self.indent + "__slots__ = (\n" self._change_indent(1) # default fields default_slots = ["performative", "dialogue_reference", "message_id", "target"] slots = list(self.spec.all_unique_contents.keys()) + default_slots for field_name in sorted(slots): cls_str += self.indent + f'"{field_name}",' self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-1) # __init__ cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "performative: Performative,\n" cls_str += self.indent + 'dialogue_reference: Tuple[str, str] = ("", ""),\n' cls_str += self.indent + "message_id: int = 1,\n" cls_str += self.indent + "target: int = 0,\n" cls_str += self.indent + "**kwargs: Any,\n" self._change_indent(-1) cls_str += self.indent + "):\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialise an instance of {}Message.\n\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + ":param message_id: the message id.\n" cls_str += self.indent + ":param dialogue_reference: the dialogue reference.\n" cls_str += self.indent + ":param target: the message target.\n" cls_str += self.indent + ":param performative: the message performative.\n" cls_str += self.indent + ":param **kwargs: extra options.\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "super().__init__(\n" self._change_indent(1) cls_str += self.indent + "dialogue_reference=dialogue_reference,\n" cls_str += self.indent + "message_id=message_id,\n" cls_str += self.indent + "target=target,\n" cls_str += ( self.indent + "performative={}Message.Performative(performative),\n".format( self.protocol_specification_in_camel_case ) ) cls_str += self.indent + "**kwargs,\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-1) # Instance properties cls_str += self.indent + "@property\n" cls_str += self.indent + "def valid_performatives(self) -> Set[str]:\n" self._change_indent(1) cls_str += self.indent + '"""Get valid performatives."""\n' cls_str += self.indent + "return self._performatives\n\n" self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def dialogue_reference(self) -> Tuple[str, str]:\n" self._change_indent(1) cls_str += self.indent + '"""Get the dialogue_reference of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("dialogue_reference"), "dialogue_reference is not set.")\n' ) cls_str += ( self.indent + 'return cast(Tuple[str, str], self.get("dialogue_reference"))\n\n' ) self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def message_id(self) -> int:\n" self._change_indent(1) cls_str += self.indent + '"""Get the message_id of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("message_id"), "message_id is not set.")\n' ) cls_str += self.indent + 'return cast(int, self.get("message_id"))\n\n' self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += ( self.indent + "def performative(self) -> Performative: # type: ignore # noqa: F821\n" ) self._change_indent(1) cls_str += self.indent + '"""Get the performative of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("performative"), "performative is not set.")\n' ) cls_str += ( self.indent + 'return cast({}Message.Performative, self.get("performative"))\n\n'.format( self.protocol_specification_in_camel_case ) ) self._change_indent(-1) cls_str += self.indent + "@property\n" cls_str += self.indent + "def target(self) -> int:\n" self._change_indent(1) cls_str += self.indent + '"""Get the target of the message."""\n' cls_str += ( self.indent + 'enforce(self.is_set("target"), "target is not set.")\n' ) cls_str += self.indent + 'return cast(int, self.get("target"))\n\n' self._change_indent(-1) for content_name in sorted(self.spec.all_unique_contents.keys()): content_type = self.spec.all_unique_contents[content_name] cls_str += self.indent + "@property\n" cls_str += self.indent + "def {}(self) -> {}:\n".format( content_name, self._to_custom_custom(content_type) ) self._change_indent(1) cls_str += ( self.indent + '"""Get the \'{}\' content from the message."""\n'.format( content_name ) ) if not content_type.startswith("Optional"): cls_str += ( self.indent + 'enforce(self.is_set("{}"), "\'{}\' content is not set.")\n'.format( content_name, content_name ) ) cls_str += self.indent + 'return cast({}, self.get("{}"))\n\n'.format( self._to_custom_custom(content_type), content_name ) self._change_indent(-1) # check_consistency method cls_str += self.indent + "def _is_consistent(self) -> bool:\n" self._change_indent(1) cls_str += ( self.indent + '"""Check that the message follows the {} protocol."""\n'.format( self.protocol_specification.name ) ) cls_str += self.indent + "try:\n" self._change_indent(1) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference, tuple), \"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.\"" ".format(type(self.dialogue_reference)))\n" ) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference[0], str), \"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.\"" ".format(type(self.dialogue_reference[0])))\n" ) cls_str += ( self.indent + "enforce(isinstance(self.dialogue_reference[1], str), \"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.\"" ".format(type(self.dialogue_reference[1])))\n" ) cls_str += ( self.indent + "enforce(" + _type_check("self.message_id", "int") + ", \"Invalid type for 'message_id'. Expected 'int'. Found '{}'.\"" ".format(type(self.message_id)))\n" ) cls_str += ( self.indent + "enforce(" + _type_check("self.target", "int") + ", \"Invalid type for 'target'. Expected 'int'. Found '{}'.\"" ".format(type(self.target)))\n\n" ) cls_str += self.indent + "# Light Protocol Rule 2\n" cls_str += self.indent + "# Check correct performative\n" cls_str += ( self.indent + "enforce(isinstance(self.performative, {}Message.Performative)".format( self.protocol_specification_in_camel_case ) ) cls_str += ( ", \"Invalid 'performative'. Expected either of '{}'. Found '{}'.\".format(" ) cls_str += "self.valid_performatives, self.performative" cls_str += "))\n\n" cls_str += self.indent + "# Check correct contents\n" cls_str += ( self.indent + "actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE\n" ) cls_str += self.indent + "expected_nb_of_contents = 0\n" counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "self.performative == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper(), ) self._change_indent(1) nb_of_non_optional_contents = 0 for content_type in contents.values(): if not content_type.startswith("Optional"): nb_of_non_optional_contents += 1 cls_str += self.indent + "expected_nb_of_contents = {}\n".format( nb_of_non_optional_contents ) for content_name, content_type in contents.items(): cls_str += self._check_content_type_str(content_name, content_type) counter += 1 self._change_indent(-1) cls_str += "\n" cls_str += self.indent + "# Check correct content count\n" cls_str += ( self.indent + "enforce(expected_nb_of_contents == actual_nb_of_contents, " '"Incorrect number of contents. Expected {}. Found {}"' ".format(expected_nb_of_contents, actual_nb_of_contents))\n\n" ) cls_str += self.indent + "# Light Protocol Rule 3\n" cls_str += self.indent + "if self.message_id == 1:\n" self._change_indent(1) cls_str += ( self.indent + "enforce(self.target == 0, \"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.\".format(self.target))\n" ) self._change_indent(-2) cls_str += ( self.indent + "except (AEAEnforceError, ValueError, KeyError) as e:\n" ) self._change_indent(1) cls_str += self.indent + "_default_logger.error(str(e))\n" cls_str += self.indent + "return False\n\n" self._change_indent(-1) cls_str += self.indent + "return True\n" return cls_str def _valid_replies_str(self) -> str: """ Generate the `valid replies` dictionary. :return: the `valid replies` dictionary string """ valid_replies_str = ( self.indent + "VALID_REPLIES: Dict[Message.Performative, FrozenSet[Message.Performative]] = {\n" ) self._change_indent(1) for performative in sorted(self.spec.reply.keys()): valid_replies_str += ( self.indent + "{}Message.Performative.{}: frozenset(".format( self.protocol_specification_in_camel_case, performative.upper() ) ) if len(self.spec.reply[performative]) > 0: valid_replies_str += "\n" self._change_indent(1) valid_replies_str += self.indent + "{" for reply in self.spec.reply[performative]: valid_replies_str += "{}Message.Performative.{}, ".format( self.protocol_specification_in_camel_case, reply.upper() ) valid_replies_str = valid_replies_str[:-2] valid_replies_str += "}\n" self._change_indent(-1) valid_replies_str += self.indent + "),\n" self._change_indent(-1) valid_replies_str += self.indent + "}" return valid_replies_str def _end_state_enum_str(self) -> str: """ Generate the end state Enum class. :return: the end state Enum string """ enum_str = self.indent + "class EndState(Dialogue.EndState):\n" self._change_indent(1) enum_str += ( self.indent + '"""This class defines the end states of a {} dialogue."""\n\n'.format( self.protocol_specification.name ) ) tag = 0 for end_state in self.spec.end_states: enum_str += self.indent + "{} = {}\n".format(end_state.upper(), tag) tag += 1 self._change_indent(-1) return enum_str def _agent_role_enum_str(self) -> str: """ Generate the agent role Enum class. :return: the agent role Enum string """ enum_str = self.indent + "class Role(Dialogue.Role):\n" self._change_indent(1) enum_str += ( self.indent + '"""This class defines the agent\'s role in a {} dialogue."""\n\n'.format( self.protocol_specification.name ) ) for role in self.spec.roles: enum_str += self.indent + '{} = "{}"\n'.format(role.upper(), role) self._change_indent(-1) return enum_str def _dialogue_class_str(self) -> str: """ Produce the content of the Message class. :return: the message.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += self.indent + '"""\n' cls_str += ( self.indent + "This module contains the classes required for {} dialogue management.\n\n".format( self.protocol_specification.name ) ) cls_str += ( self.indent + "- {}Dialogue: The dialogue class maintains state of a dialogue and manages it.\n".format( self.protocol_specification_in_camel_case ) ) cls_str += ( self.indent + "- {}Dialogues: The dialogues class keeps track of all dialogues.\n".format( self.protocol_specification_in_camel_case ) ) cls_str += self.indent + '"""\n\n' # Imports cls_str += self.indent + "from abc import ABC\n" cls_str += ( self.indent + "from typing import Callable, Dict, FrozenSet, Type, cast\n\n" ) cls_str += self.indent + "from aea.common import Address\n" cls_str += self.indent + "from aea.protocols.base import Message\n" cls_str += ( self.indent + "from aea.protocols.dialogue.base import Dialogue, DialogueLabel, Dialogues\n\n" ) cls_str += self.indent + "from {}.message import {}Message\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) # Class Header cls_str += "\nclass {}Dialogue(Dialogue):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += ( self.indent + '"""The {} dialogue class maintains state of a dialogue and manages it."""\n'.format( self.protocol_specification.name ) ) # Class Constants initial_performatives_str = ", ".join( [ "{}Message.Performative.{}".format( self.protocol_specification_in_camel_case, initial_performative ) for initial_performative in self.spec.initial_performatives ] ) terminal_performatives_str = ", ".join( [ "{}Message.Performative.{}".format( self.protocol_specification_in_camel_case, terminal_performative ) for terminal_performative in self.spec.terminal_performatives ] ) cls_str += ( self.indent + "INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({" + initial_performatives_str + "})\n" + self.indent + "TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({" + terminal_performatives_str + "})\n" + self._valid_replies_str() ) # Enums cls_str += "\n" + self._agent_role_enum_str() cls_str += "\n" + self._end_state_enum_str() cls_str += "\n" # initializer cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "dialogue_label: DialogueLabel,\n" cls_str += self.indent + "self_address: Address,\n" cls_str += self.indent + "role: Dialogue.Role,\n" cls_str += self.indent + "message_class: Type[{}Message] = {}Message,\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) self._change_indent(-1) cls_str += self.indent + ") -> None:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialize a dialogue.\n\n" cls_str += ( self.indent + ":param dialogue_label: the identifier of the dialogue\n" ) cls_str += ( self.indent + ":param self_address: the address of the entity for whom this dialogue is maintained\n" ) cls_str += ( self.indent + ":param role: the role of the agent this dialogue is maintained for\n" ) cls_str += self.indent + ":param message_class: the message class used\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "Dialogue.__init__(\n" cls_str += self.indent + "self,\n" cls_str += self.indent + "dialogue_label=dialogue_label,\n" cls_str += self.indent + "message_class=message_class,\n" cls_str += self.indent + "self_address=self_address,\n" cls_str += self.indent + "role=role,\n" cls_str += self.indent + ")\n" self._change_indent(-2) # dialogues class cls_str += self.indent + "class {}Dialogues(Dialogues, ABC):\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) cls_str += ( self.indent + '"""This class keeps track of all {} dialogues."""\n\n'.format( self.protocol_specification.name ) ) end_states_str = ", ".join( [ "{}Dialogue.EndState.{}".format( self.protocol_specification_in_camel_case, end_state.upper() ) for end_state in self.spec.end_states ] ) cls_str += self.indent + "END_STATES = frozenset(\n" cls_str += self.indent + "{" + end_states_str + "}" cls_str += self.indent + ")\n\n" cls_str += ( self.indent + f"_keep_terminal_state_dialogues = {repr(self.spec.keep_terminal_state_dialogues)}\n\n" ) cls_str += self.indent + "def __init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "self_address: Address,\n" cls_str += ( self.indent + "role_from_first_message: Callable[[Message, Address], Dialogue.Role],\n" ) cls_str += ( self.indent + "dialogue_class: Type[{}Dialogue] = {}Dialogue,\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) ) self._change_indent(-1) cls_str += self.indent + ") -> None:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Initialize dialogues.\n\n" cls_str += ( self.indent + ":param self_address: the address of the entity for whom dialogues are maintained\n" ) cls_str += self.indent + ":param dialogue_class: the dialogue class used\n" cls_str += ( self.indent + ":param role_from_first_message: the callable determining role from first message\n" ) cls_str += self.indent + '"""\n' cls_str += self.indent + "Dialogues.__init__(\n" self._change_indent(1) cls_str += self.indent + "self,\n" cls_str += self.indent + "self_address=self_address,\n" cls_str += ( self.indent + "end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES),\n" ) cls_str += self.indent + "message_class={}Message,\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + "dialogue_class=dialogue_class,\n" cls_str += self.indent + "role_from_first_message=role_from_first_message,\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-2) cls_str += self.indent + "\n" return cls_str def _custom_types_module_str(self) -> str: """ Produce the contents of the custom_types module, containing classes corresponding to every custom type in the protocol specification. :return: the custom_types.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += '"""This module contains class representations corresponding to every custom type in the protocol specification."""\n' # class code per custom type for custom_type in self.spec.all_custom_types: cls_str += self.indent + "\n\nclass {}:\n".format(custom_type) self._change_indent(1) cls_str += ( self.indent + '"""This class represents an instance of {}."""\n\n'.format( custom_type ) ) cls_str += self.indent + "def __init__(self):\n" self._change_indent(1) cls_str += self.indent + '"""Initialise an instance of {}."""\n'.format( custom_type ) cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "@staticmethod\n" cls_str += ( self.indent + 'def encode({}_protobuf_object, {}_object: "{}") -> None:\n'.format( _camel_case_to_snake_case(custom_type), _camel_case_to_snake_case(custom_type), custom_type, ) ) self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += ( self.indent + "Encode an instance of this class into the protocol buffer object.\n\n" ) cls_str += ( self.indent + "The protocol buffer object in the {}_protobuf_object argument is matched with the instance of this class in the '{}_object' argument.\n\n".format( _camel_case_to_snake_case(custom_type), _camel_case_to_snake_case(custom_type), ) ) cls_str += ( self.indent + ":param {}_protobuf_object: the protocol buffer object whose type corresponds with this class.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":param {}_object: an instance of this class to be encoded in the protocol buffer object.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += self.indent + '"""\n' cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "@classmethod\n" cls_str += ( self.indent + 'def decode(cls, {}_protobuf_object) -> "{}":\n'.format( _camel_case_to_snake_case(custom_type), custom_type, ) ) self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += ( self.indent + "Decode a protocol buffer object that corresponds with this class into an instance of this class.\n\n" ) cls_str += ( self.indent + "A new instance of this class is created that matches the protocol buffer object in the '{}_protobuf_object' argument.\n\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":param {}_protobuf_object: the protocol buffer object whose type corresponds with this class.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += ( self.indent + ":return: A new instance of this class that matches the protocol buffer object in the '{}_protobuf_object' argument.\n".format( _camel_case_to_snake_case(custom_type) ) ) cls_str += self.indent + '"""\n' cls_str += self.indent + "raise NotImplementedError\n\n" self._change_indent(-1) cls_str += self.indent + "def __eq__(self, other):\n" self._change_indent(1) cls_str += self.indent + "raise NotImplementedError\n" self._change_indent(-2) return cls_str def _encoding_message_content_from_python_to_protobuf( self, content_name: str, content_type: str, ) -> str: """ Produce the encoding of message contents for the serialisation class. :param content_name: the name of the content to be encoded :param content_type: the type of the content to be encoded :return: the encoding string """ encoding_str = "" if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys(): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{} = {}\n".format( content_name, content_name ) elif content_type.startswith("FrozenSet") or content_type.startswith("Tuple"): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{}.extend({})\n".format( content_name, content_name ) elif content_type.startswith("Dict"): encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "performative.{}.update({})\n".format( content_name, content_name ) elif content_type.startswith("Union"): sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name_in_protobuf = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) encoding_str += self.indent + 'if msg.is_set("{}"):\n'.format( sub_type_name_in_protobuf ) self._change_indent(1) encoding_str += self.indent + "performative.{}_is_set = True\n".format( sub_type_name_in_protobuf ) encoding_str += self._encoding_message_content_from_python_to_protobuf( sub_type_name_in_protobuf, sub_type ) self._change_indent(-1) elif content_type.startswith("Optional"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] if not sub_type.startswith("Union"): encoding_str += self.indent + 'if msg.is_set("{}"):\n'.format( content_name ) self._change_indent(1) encoding_str += self.indent + "performative.{}_is_set = True\n".format( content_name ) encoding_str += self._encoding_message_content_from_python_to_protobuf( content_name, sub_type ) if not sub_type.startswith("Union"): self._change_indent(-1) else: encoding_str += self.indent + "{} = msg.{}\n".format( content_name, content_name ) encoding_str += self.indent + "{}.encode(performative.{}, {})\n".format( content_type, content_name, content_name ) return encoding_str def _decoding_message_content_from_protobuf_to_python( self, performative: str, content_name: str, content_type: str, variable_name_in_protobuf: Optional[str] = "", ) -> str: """ Produce the decoding of message contents for the serialisation class. :param performative: the performative to which the content belongs :param content_name: the name of the content to be decoded :param content_type: the type of the content to be decoded :param variable_name_in_protobuf: the name of the variable in the protobuf schema :return: the decoding string """ decoding_str = "" variable_name = ( content_name if variable_name_in_protobuf == "" else variable_name_in_protobuf ) if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys(): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, variable_name, ) decoding_str += self.indent + 'performative_content["{}"] = {}\n'.format( content_name, content_name ) elif content_type.startswith("FrozenSet"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_frozenset = frozenset({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_frozenset\n'.format( content_name, content_name ) ) elif content_type.startswith("Tuple"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_tuple = tuple({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_tuple\n'.format( content_name, content_name ) ) elif content_type.startswith("Dict"): decoding_str += self.indent + "{} = {}_pb.{}.{}\n".format( content_name, self.protocol_specification.name, performative, content_name, ) decoding_str += self.indent + "{}_dict = dict({})\n".format( content_name, content_name ) decoding_str += ( self.indent + 'performative_content["{}"] = {}_dict\n'.format( content_name, content_name ) ) elif content_type.startswith("Union"): sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name_in_protobuf = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) decoding_str += self.indent + "if {}_pb.{}.{}_is_set:\n".format( self.protocol_specification.name, performative, sub_type_name_in_protobuf, ) self._change_indent(1) decoding_str += self._decoding_message_content_from_protobuf_to_python( performative=performative, content_name=content_name, content_type=sub_type, variable_name_in_protobuf=sub_type_name_in_protobuf, ) self._change_indent(-1) elif content_type.startswith("Optional"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] if not sub_type.startswith("Union"): decoding_str += self.indent + "if {}_pb.{}.{}_is_set:\n".format( self.protocol_specification.name, performative, content_name ) self._change_indent(1) decoding_str += self._decoding_message_content_from_protobuf_to_python( performative, content_name, sub_type ) if not sub_type.startswith("Union"): self._change_indent(-1) else: decoding_str += self.indent + "pb2_{} = {}_pb.{}.{}\n".format( variable_name, self.protocol_specification.name, performative, variable_name, ) decoding_str += self.indent + "{} = {}.decode(pb2_{})\n".format( content_name, content_type, variable_name, ) decoding_str += self.indent + 'performative_content["{}"] = {}\n'.format( content_name, content_name ) return decoding_str def _serialization_class_str(self) -> str: """ Produce the content of the Serialization class. :return: the serialization.py file content """ self._change_indent(0, "s") # Header cls_str = _copyright_header_str(self.protocol_specification.author) + "\n" # Module docstring cls_str += ( self.indent + '"""Serialization module for {} protocol."""\n\n'.format( self.protocol_specification.name ) ) cls_str += f"# pylint: disable={','.join(PYLINT_DISABLE_SERIALIZATION_PY)}\n" # Imports cls_str += self.indent + "from typing import Any, Dict, cast\n\n" cls_str += ( self.indent + "from aea.mail.base_pb2 import DialogueMessage, Message as ProtobufMessage\n" ) cls_str += MESSAGE_IMPORT + "\n" cls_str += SERIALIZER_IMPORT + "\n\n" cls_str += self.indent + "from {} import (\n {}_pb2,\n)\n".format( self.dotted_path_to_protocol_package, self.protocol_specification.name, ) for custom_type in self.spec.all_custom_types: cls_str += ( self.indent + "from {}.custom_types import (\n {},\n)\n".format( self.dotted_path_to_protocol_package, custom_type, ) ) cls_str += self.indent + "from {}.message import (\n {}Message,\n)\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) # Class Header cls_str += self.indent + "\n\nclass {}Serializer(Serializer):\n".format( self.protocol_specification_in_camel_case, ) self._change_indent(1) cls_str += ( self.indent + '"""Serialization for the \'{}\' protocol."""\n\n'.format( self.protocol_specification.name, ) ) # encoder cls_str += self.indent + "@staticmethod\n" cls_str += self.indent + "def encode(msg: Message) -> bytes:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Encode a '{}' message into bytes.\n\n".format( self.protocol_specification_in_camel_case, ) cls_str += self.indent + ":param msg: the message object.\n" cls_str += self.indent + ":return: the bytes.\n" cls_str += self.indent + '"""\n' cls_str += self.indent + "msg = cast({}Message, msg)\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + "message_pb = ProtobufMessage()\n" cls_str += self.indent + "dialogue_message_pb = DialogueMessage()\n" cls_str += self.indent + "{}_msg = {}_pb2.{}Message()\n\n".format( self.protocol_specification.name, self.protocol_specification.name, self.protocol_specification_in_camel_case, ) cls_str += self.indent + "dialogue_message_pb.message_id = msg.message_id\n" cls_str += self.indent + "dialogue_reference = msg.dialogue_reference\n" cls_str += ( self.indent + "dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]\n" ) cls_str += ( self.indent + "dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]\n" ) cls_str += self.indent + "dialogue_message_pb.target = msg.target\n\n" cls_str += self.indent + "performative_id = msg.performative\n" counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "performative_id == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper() ) self._change_indent(1) cls_str += ( self.indent + "performative = {}_pb2.{}Message.{}_Performative() # type: ignore\n".format( self.protocol_specification.name, self.protocol_specification_in_camel_case, performative.title(), ) ) for content_name, content_type in contents.items(): cls_str += self._encoding_message_content_from_python_to_protobuf( content_name, content_type ) cls_str += self.indent + "{}_msg.{}.CopyFrom(performative)\n".format( self.protocol_specification.name, performative ) counter += 1 self._change_indent(-1) cls_str += self.indent + "else:\n" self._change_indent(1) cls_str += ( self.indent + 'raise ValueError("Performative not valid: {}".format(performative_id))\n\n' ) self._change_indent(-1) cls_str += ( self.indent + "dialogue_message_pb.content = {}_msg.SerializeToString()\n\n".format( self.protocol_specification.name, ) ) cls_str += ( self.indent + "message_pb.dialogue_message.CopyFrom(dialogue_message_pb)\n" ) cls_str += self.indent + "message_bytes = message_pb.SerializeToString()\n" cls_str += self.indent + "return message_bytes\n" self._change_indent(-1) # decoder cls_str += self.indent + "@staticmethod\n" cls_str += self.indent + "def decode(obj: bytes) -> Message:\n" self._change_indent(1) cls_str += self.indent + '"""\n' cls_str += self.indent + "Decode bytes into a '{}' message.\n\n".format( self.protocol_specification_in_camel_case, ) cls_str += self.indent + ":param obj: the bytes object.\n" cls_str += self.indent + ":return: the '{}' message.\n".format( self.protocol_specification_in_camel_case ) cls_str += self.indent + '"""\n' cls_str += self.indent + "message_pb = ProtobufMessage()\n" cls_str += self.indent + "{}_pb = {}_pb2.{}Message()\n".format( self.protocol_specification.name, self.protocol_specification.name, self.protocol_specification_in_camel_case, ) cls_str += self.indent + "message_pb.ParseFromString(obj)\n" cls_str += self.indent + "message_id = message_pb.dialogue_message.message_id\n" cls_str += ( self.indent + "dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)\n" ) cls_str += self.indent + "target = message_pb.dialogue_message.target\n\n" cls_str += ( self.indent + "{}_pb.ParseFromString(message_pb.dialogue_message.content)\n".format( self.protocol_specification.name ) ) cls_str += ( self.indent + 'performative = {}_pb.WhichOneof("performative")\n'.format( self.protocol_specification.name ) ) cls_str += ( self.indent + "performative_id = {}Message.Performative(str(performative))\n".format( self.protocol_specification_in_camel_case ) ) cls_str += ( self.indent + "performative_content = dict() # type: Dict[str, Any]\n" ) counter = 1 for performative, contents in self.spec.speech_acts.items(): if counter == 1: cls_str += self.indent + "if " else: cls_str += self.indent + "elif " cls_str += "performative_id == {}Message.Performative.{}:\n".format( self.protocol_specification_in_camel_case, performative.upper() ) self._change_indent(1) if len(contents.keys()) == 0: cls_str += self.indent + "pass\n" else: for content_name, content_type in contents.items(): cls_str += self._decoding_message_content_from_protobuf_to_python( performative, content_name, content_type ) counter += 1 self._change_indent(-1) cls_str += self.indent + "else:\n" self._change_indent(1) cls_str += ( self.indent + 'raise ValueError("Performative not valid: {}.".format(performative_id))\n\n' ) self._change_indent(-1) cls_str += self.indent + "return {}Message(\n".format( self.protocol_specification_in_camel_case, ) self._change_indent(1) cls_str += self.indent + "message_id=message_id,\n" cls_str += self.indent + "dialogue_reference=dialogue_reference,\n" cls_str += self.indent + "target=target,\n" cls_str += self.indent + "performative=performative,\n" cls_str += self.indent + "**performative_content\n" self._change_indent(-1) cls_str += self.indent + ")\n" self._change_indent(-2) return cls_str def _content_to_proto_field_str( self, content_name: str, content_type: str, tag_no: int, ) -> Tuple[str, int]: """ Convert a message content to its representation in a protocol buffer schema. :param content_name: the name of the content :param content_type: the type of the content :param tag_no: the tag number :return: the content in protocol buffer schema and the next tag number to be used """ entry = "" if content_type.startswith("FrozenSet") or content_type.startswith( "Tuple" ): # it is a <PCT> element_type = _get_sub_types_of_compositional_types(content_type)[0] proto_type = _python_pt_or_ct_type_to_proto_type(element_type) entry = self.indent + "repeated {} {} = {};\n".format( proto_type, content_name, tag_no ) tag_no += 1 elif content_type.startswith("Dict"): # it is a <PMT> key_type = _get_sub_types_of_compositional_types(content_type)[0] value_type = _get_sub_types_of_compositional_types(content_type)[1] proto_key_type = _python_pt_or_ct_type_to_proto_type(key_type) proto_value_type = _python_pt_or_ct_type_to_proto_type(value_type) entry = self.indent + "map<{}, {}> {} = {};\n".format( proto_key_type, proto_value_type, content_name, tag_no ) tag_no += 1 elif content_type.startswith("Union"): # it is an <MT> sub_types = _get_sub_types_of_compositional_types(content_type) for sub_type in sub_types: sub_type_name = _union_sub_type_to_protobuf_variable_name( content_name, sub_type ) content_to_proto_field_str, tag_no = self._content_to_proto_field_str( sub_type_name, sub_type, tag_no ) entry += content_to_proto_field_str elif content_type.startswith("Optional"): # it is an <O> sub_type = _get_sub_types_of_compositional_types(content_type)[0] content_to_proto_field_str, tag_no = self._content_to_proto_field_str( content_name, sub_type, tag_no ) entry = content_to_proto_field_str entry += self.indent + "bool {}_is_set = {};\n".format(content_name, tag_no) tag_no += 1 else: # it is a <CT> or <PT> proto_type = _python_pt_or_ct_type_to_proto_type(content_type) entry = self.indent + "{} {} = {};\n".format( proto_type, content_name, tag_no ) tag_no += 1 return entry, tag_no def _protocol_buffer_schema_str(self) -> str: """ Produce the content of the Protocol Buffers schema. :return: the protocol buffers schema content """ self._change_indent(0, "s") # heading proto_buff_schema_str = self.indent + 'syntax = "proto3";\n\n' proto_buff_schema_str += self.indent + "package {};\n\n".format( public_id_to_package_name( self.protocol_specification.protocol_specification_id ) ) proto_buff_schema_str += self.indent + "message {}Message{{\n\n".format( self.protocol_specification_in_camel_case ) self._change_indent(1) # custom types if ( (len(self.spec.all_custom_types) != 0) and (self.protocol_specification.protobuf_snippets is not None) and (self.protocol_specification.protobuf_snippets != "") ): proto_buff_schema_str += self.indent + "// Custom Types\n" for custom_type in self.spec.all_custom_types: proto_buff_schema_str += self.indent + "message {}{{\n".format( custom_type ) self._change_indent(1) # formatting and adding the custom type protobuf entry specification_custom_type = "ct:" + custom_type proto_part = self.protocol_specification.protobuf_snippets[ specification_custom_type ] number_of_new_lines = proto_part.count("\n") if number_of_new_lines != 0: formatted_proto_part = proto_part.replace( "\n", "\n" + self.indent, number_of_new_lines - 1 ) else: formatted_proto_part = proto_part proto_buff_schema_str += self.indent + formatted_proto_part self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n\n" proto_buff_schema_str += "\n" # performatives proto_buff_schema_str += self.indent + "// Performatives and contents\n" for performative, contents in self.spec.speech_acts.items(): proto_buff_schema_str += self.indent + "message {}_Performative{{".format( performative.title() ) self._change_indent(1) tag_no = 1 if len(contents) == 0: proto_buff_schema_str += "}\n\n" self._change_indent(-1) else: proto_buff_schema_str += "\n" for content_name, content_type in contents.items(): ( content_to_proto_field_str, tag_no, ) = self._content_to_proto_field_str( content_name, content_type, tag_no ) proto_buff_schema_str += content_to_proto_field_str self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n\n" proto_buff_schema_str += "\n" proto_buff_schema_str += self.indent + "oneof performative{\n" self._change_indent(1) tag_no = 5 for performative in self.spec.all_performatives: proto_buff_schema_str += self.indent + "{}_Performative {} = {};\n".format( performative.title(), performative, tag_no ) tag_no += 1 self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n" self._change_indent(-1) proto_buff_schema_str += self.indent + "}\n" return proto_buff_schema_str def _protocol_yaml_str(self) -> str: """ Produce the content of the protocol.yaml file. :return: the protocol.yaml content """ protocol_yaml_str = "name: {}\n".format(self.protocol_specification.name) protocol_yaml_str += "author: {}\n".format(self.protocol_specification.author) protocol_yaml_str += "version: {}\n".format(self.protocol_specification.version) protocol_yaml_str += "protocol_specification_id: {}\n".format( str(self.protocol_specification.protocol_specification_id) ) protocol_yaml_str += "type: {}\n".format( self.protocol_specification.component_type ) protocol_yaml_str += "description: {}\n".format( self.protocol_specification.description ) protocol_yaml_str += "license: {}\n".format(self.protocol_specification.license) protocol_yaml_str += "aea_version: '{}'\n".format( self.protocol_specification.aea_version ) protocol_yaml_str += "fingerprint: {}\n" protocol_yaml_str += "fingerprint_ignore_patterns: []\n" protocol_yaml_str += "dependencies:\n" protocol_yaml_str += " protobuf: {}\n" return protocol_yaml_str def _init_str(self) -> str: """ Produce the content of the __init__.py file. :return: the __init__.py content """ init_str = _copyright_header_str(self.protocol_specification.author) init_str += "\n" init_str += '"""\nThis module contains the support resources for the {} protocol.\n\nIt was created with protocol buffer compiler version `{}` and aea version `{}`.\n"""\n\n'.format( self.protocol_specification.name, self.protoc_version, __aea_version__ ) init_str += "from {}.message import {}Message\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) init_str += "from {}.serialization import {}Serializer\n".format( self.dotted_path_to_protocol_package, self.protocol_specification_in_camel_case, ) init_str += "{}Message.serializer = {}Serializer\n".format( self.protocol_specification_in_camel_case, self.protocol_specification_in_camel_case, ) return init_str def generate_protobuf_only_mode( self, language: str = PROTOCOL_LANGUAGE_PYTHON, run_protolint: bool = True, ) -> Optional[str]: """ Run the generator in "protobuf only" mode: a) validate the protocol specification. b) create the protocol buffer schema file. c) create the protocol buffer implementation file via 'protoc'. :param language: the target language in which to generate the package. :param run_protolint: whether to run protolint or not. :return: None """ if language not in SUPPORTED_PROTOCOL_LANGUAGES: raise ValueError( f"Unsupported language. Expected one of {SUPPORTED_PROTOCOL_LANGUAGES}. Found {language}." ) protobuf_output = None # type: Optional[str] # Create the output folder output_folder = Path(self.path_to_generated_protocol_package) if not output_folder.exists(): os.mkdir(output_folder) # Generate protocol buffer schema file _create_protocol_file( self.path_to_generated_protocol_package, "{}.proto".format(self.protocol_specification.name), self._protocol_buffer_schema_str(), ) # Try to compile protobuf schema file is_compiled, msg = compile_protobuf_using_protoc( self.path_to_generated_protocol_package, self.protocol_specification.name, language, ) if not is_compiled: # Remove the generated folder and files shutil.rmtree(output_folder) raise SyntaxError( "Error when trying to compile the protocol buffer schema file:\n" + msg ) # Run protolint if run_protolint: is_correctly_formatted, protolint_output = apply_protolint( self.path_to_generated_protocol_package, self.protocol_specification.name, ) if not is_correctly_formatted and protolint_output != "": protobuf_output = "Protolint warnings:\n" + protolint_output # Run black and isort formatting for python if language == PROTOCOL_LANGUAGE_PYTHON: try_run_black_formatting(self.path_to_generated_protocol_package) try_run_isort_formatting(self.path_to_generated_protocol_package) return protobuf_output def generate_full_mode(self, language: str) -> Optional[str]: """ Run the generator in "full" mode: Runs the generator in protobuf only mode: a) validate the protocol specification. b) create the protocol buffer schema file. c) create the protocol buffer implementation file via 'protoc'. Additionally: d) generates python modules. e) applies black formatting f) applies isort formatting :param language: the language for which to create protobuf files :return: optional warning message """ if language != PROTOCOL_LANGUAGE_PYTHON: raise ValueError( f"Unsupported language. Expected 'python' because currently the framework supports full generation of protocols only in Python. Found {language}." ) # Run protobuf only mode full_mode_output = self.generate_protobuf_only_mode( language=PROTOCOL_LANGUAGE_PYTHON ) # Generate Python protocol package _create_protocol_file( self.path_to_generated_protocol_package, INIT_FILE_NAME, self._init_str() ) _create_protocol_file( self.path_to_generated_protocol_package, PROTOCOL_YAML_FILE_NAME, self._protocol_yaml_str(), ) _create_protocol_file( self.path_to_generated_protocol_package, MESSAGE_DOT_PY_FILE_NAME, self._message_class_str(), ) if ( self.protocol_specification.dialogue_config is not None and self.protocol_specification.dialogue_config != {} ): _create_protocol_file( self.path_to_generated_protocol_package, DIALOGUE_DOT_PY_FILE_NAME, self._dialogue_class_str(), ) if len(self.spec.all_custom_types) > 0: _create_protocol_file( self.path_to_generated_protocol_package, CUSTOM_TYPES_DOT_PY_FILE_NAME, self._custom_types_module_str(), ) _create_protocol_file( self.path_to_generated_protocol_package, SERIALIZATION_DOT_PY_FILE_NAME, self._serialization_class_str(), ) # Run black formatting try_run_black_formatting(self.path_to_generated_protocol_package) # Run isort formatting try_run_isort_formatting(self.path_to_generated_protocol_package) # Warn if specification has custom types if len(self.spec.all_custom_types) > 0: incomplete_generation_warning_msg = "The generated protocol is incomplete, because the protocol specification contains the following custom types: {}. Update the generated '{}' file with the appropriate implementations of these custom types.".format( self.spec.all_custom_types, CUSTOM_TYPES_DOT_PY_FILE_NAME ) if full_mode_output is not None: full_mode_output += incomplete_generation_warning_msg else: full_mode_output = incomplete_generation_warning_msg return full_mode_output def generate( self, protobuf_only: bool = False, language: str = PROTOCOL_LANGUAGE_PYTHON ) -> Optional[str]: """ Run the generator either in "full" or "protobuf only" mode. :param protobuf_only: mode of running the generator. :param language: the target language in which to generate the protocol package. :return: optional warning message. """ if protobuf_only: output = self.generate_protobuf_only_mode(language) # type: Optional[str] # Warn about the protobuf only mode protobuf_mode_warning_msg = ( "The generated protocol is incomplete. It only includes the protocol buffer definitions. " + "You must implement and add other definitions (e.g. messages, serialisation, dialogue, etc) to this package." ) if output is not None: output += protobuf_mode_warning_msg else: output = protobuf_mode_warning_msg else: output = self.generate_full_mode(language) return output def public_id_to_package_name(public_id: PublicId) -> str: """Make package name string from public_id provided.""" return f'aea.{public_id.author}.{public_id.name}.v{public_id.version.replace(".", "_")}'
# pylint:disable=line-too-long import logging from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64 from .. import SIM_PROCEDURES as P from . import SimLibrary _l = logging.getLogger(name=__name__) lib = SimLibrary() lib.set_default_cc('X86', SimCCStdcall) lib.set_default_cc('AMD64', SimCCMicrosoftAMD64) lib.set_library_names("iphlpapi.dll") prototypes = \ { # 'GetIfEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIfEntry2Ex': SimTypeFunction([SimTypeInt(signed=False, label="MIB_IF_ENTRY_LEVEL"), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Level", "Row"]), # 'GetIfTable2': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)}, name="MIB_IF_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetIfTable2Ex': SimTypeFunction([SimTypeInt(signed=False, label="MIB_IF_TABLE_LEVEL"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)}, name="MIB_IF_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Level", "Table"]), # 'GetIfStackTable': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"HigherLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "LowerLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IFSTACK_ROW", pack=False, align=None), offset=0)}, name="MIB_IFSTACK_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetInvertedIfStackTable': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"LowerLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "HigherLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32")}, name="MIB_INVERTEDIFSTACK_ROW", pack=False, align=None), offset=0)}, name="MIB_INVERTEDIFSTACK_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpInterfaceTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)}, name="MIB_IPINTERFACE_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyIpInterfaceChange': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'SetIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpNetworkConnectionBandwidthEstimates': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"InboundBandwidthInformation": SimStruct({"Bandwidth": SimTypeLongLong(signed=False, label="UInt64"), "Instability": SimTypeLongLong(signed=False, label="UInt64"), "BandwidthPeaked": SimTypeChar(label="Byte")}, name="NL_BANDWIDTH_INFORMATION", pack=False, align=None), "OutboundBandwidthInformation": SimStruct({"Bandwidth": SimTypeLongLong(signed=False, label="UInt64"), "Instability": SimTypeLongLong(signed=False, label="UInt64"), "BandwidthPeaked": SimTypeChar(label="Byte")}, name="NL_BANDWIDTH_INFORMATION", pack=False, align=None)}, name="MIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "AddressFamily", "BandwidthEstimates"]), # 'CreateUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetUnicastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyUnicastIpAddressChange': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'NotifyStableUnicastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["CallerContext", "AddressTable"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table", "CallerCallback", "CallerContext", "NotificationHandle"]), # 'SetUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'CreateAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetAnycastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_ANYCASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'GetMulticastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_MULTICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetMulticastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_MULTICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_MULTICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'CreateIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetBestRoute2': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceIndex", "SourceAddress", "DestinationAddress", "AddressSortOptions", "BestRoute", "BestSourceAddress"]), # 'GetIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpForwardTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)}, name="MIB_IPFORWARD_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyRouteChange2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["AddressFamily", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'SetIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Route"]), # 'FlushIpPathTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=True, label="Int32"), arg_names=["Family"]), # 'GetIpPathEntry': SimTypeFunction([SimTypePointer(SimStruct({"Source": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "Destination": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "CurrentNextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PathMtu": SimTypeInt(signed=False, label="UInt32"), "RttMean": SimTypeInt(signed=False, label="UInt32"), "RttDeviation": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "IsReachable": SimTypeChar(label="Byte"), "LinkTransmitSpeed": SimTypeLongLong(signed=False, label="UInt64"), "LinkReceiveSpeed": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IPPATH_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpPathTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Source": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "Destination": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "CurrentNextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PathMtu": SimTypeInt(signed=False, label="UInt32"), "RttMean": SimTypeInt(signed=False, label="UInt32"), "RttDeviation": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "IsReachable": SimTypeChar(label="Byte"), "LinkTransmitSpeed": SimTypeLongLong(signed=False, label="UInt64"), "LinkReceiveSpeed": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IPPATH_ROW", pack=False, align=None), offset=0)}, name="MIB_IPPATH_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'CreateIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'FlushIpNetTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "InterfaceIndex"]), # 'GetIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpNetTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)}, name="MIB_IPNET_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'ResolveIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row", "SourceAddress"]), # 'SetIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'NotifyTeredoPortChange': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Port", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'GetTeredoPort': SimTypeFunction([SimTypePointer(SimTypeShort(signed=False, label="UInt16"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Port"]), # 'CancelMibChangeNotify2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["NotificationHandle"]), # 'FreeMibTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["Memory"]), # 'CreateSortedAddressPairs': SimTypeFunction([SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({"SourceAddress": SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), "DestinationAddress": SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0)}, name="SOCKADDR_IN6_PAIR", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["SourceAddressList", "SourceAddressCount", "DestinationAddressList", "DestinationAddressCount", "AddressSortOptions", "SortedAddressPairList", "SortedAddressPairCount"]), # 'ConvertCompartmentGuidToId': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentGuid", "CompartmentId"]), # 'ConvertCompartmentIdToGuid': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentId", "CompartmentGuid"]), # 'ConvertInterfaceNameToLuidA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceName", "InterfaceLuid"]), # 'ConvertInterfaceNameToLuidW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceName", "InterfaceLuid"]), # 'ConvertInterfaceLuidToNameA': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceName", "Length"]), # 'ConvertInterfaceLuidToNameW': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceName", "Length"]), # 'ConvertInterfaceLuidToIndex': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceIndex"]), # 'ConvertInterfaceIndexToLuid': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "InterfaceLuid"]), # 'ConvertInterfaceLuidToAlias': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceAlias", "Length"]), # 'ConvertInterfaceAliasToLuid': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceAlias", "InterfaceLuid"]), # 'ConvertInterfaceLuidToGuid': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceGuid"]), # 'ConvertInterfaceGuidToLuid': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceGuid", "InterfaceLuid"]), # 'if_nametoindex': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceName"]), # 'if_indextoname': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0)], SimTypePointer(SimTypeChar(label="Byte"), offset=0), arg_names=["InterfaceIndex", "InterfaceName"]), # 'GetCurrentThreadCompartmentId': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'SetCurrentThreadCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentId"]), # 'GetCurrentThreadCompartmentScope': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeBottom(label="Void"), arg_names=["CompartmentScope", "CompartmentId"]), # 'SetCurrentThreadCompartmentScope': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentScope"]), # 'GetJobCompartmentId': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["JobHandle"]), # 'SetJobCompartmentId': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["JobHandle", "CompartmentId"]), # 'GetSessionCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["SessionId"]), # 'SetSessionCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["SessionId", "CompartmentId"]), # 'GetDefaultCompartmentId': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'GetNetworkInformation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["NetworkGuid", "CompartmentId", "SiteId", "NetworkName", "Length"]), # 'SetNetworkInformation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["NetworkGuid", "CompartmentId", "NetworkName"]), # 'ConvertLengthToIpv4Mask': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["MaskLength", "Mask"]), # 'ConvertIpv4MaskToLength': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Mask", "MaskLength"]), # 'GetDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Settings"]), # 'FreeDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Settings"]), # 'SetDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Settings"]), # 'GetInterfaceDnsSettings': SimTypeFunction([SimTypeBottom(label="Guid"), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Interface", "Settings"]), # 'FreeInterfaceDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Settings"]), # 'SetInterfaceDnsSettings': SimTypeFunction([SimTypeBottom(label="Guid"), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Interface", "Settings"]), # 'GetNetworkConnectivityHint': SimTypeFunction([SimTypePointer(SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ConnectivityHint"]), # 'GetNetworkConnectivityHintForInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "ConnectivityHint"]), # 'NotifyNetworkConnectivityHintChange': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None)], SimTypeBottom(label="Void"), arg_names=["CallerContext", "ConnectivityHint"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'IcmpCreateFile': SimTypeFunction([], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)), # 'Icmp6CreateFile': SimTypeFunction([], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)), # 'IcmpCloseHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["IcmpHandle"]), # 'IcmpSendEcho': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpSendEcho2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpSendEcho2Ex': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "SourceAddress", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'Icmp6SendEcho2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "SourceAddress", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpParseReplies': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ReplyBuffer", "ReplySize"]), # 'Icmp6ParseReplies': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ReplyBuffer", "ReplySize"]), # 'GetNumberOfInterfaces': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pdwNumIf"]), # 'GetIfEntry': SimTypeFunction([SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfRow"]), # 'GetIfTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)}, name="MIB_IFTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfTable", "pdwSize", "bOrder"]), # 'GetIpAddrTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwAddr": SimTypeInt(signed=False, label="UInt32"), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwMask": SimTypeInt(signed=False, label="UInt32"), "dwBCastAddr": SimTypeInt(signed=False, label="UInt32"), "dwReasmSize": SimTypeInt(signed=False, label="UInt32"), "unused1": SimTypeShort(signed=False, label="UInt16"), "wType": SimTypeShort(signed=False, label="UInt16")}, name="MIB_IPADDRROW_XP", pack=False, align=None), offset=0)}, name="MIB_IPADDRTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpAddrTable", "pdwSize", "bOrder"]), # 'GetIpNetTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)}, name="MIB_IPNETTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IpNetTable", "SizePointer", "Order"]), # 'GetIpForwardTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)}, name="MIB_IPFORWARDTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpForwardTable", "pdwSize", "bOrder"]), # 'GetTcpTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0)}, name="MIB_TCPTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetExtendedTcpTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="TCP_TABLE_CLASS"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpTable", "pdwSize", "bOrder", "ulAf", "TableClass", "Reserved"]), # 'GetOwnerModuleFromTcpEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwState": SimTypeInt(signed=False, label="UInt32"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_TCPROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetUdpTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPROW", pack=False, align=None), offset=0)}, name="MIB_UDPTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["UdpTable", "SizePointer", "Order"]), # 'GetExtendedUdpTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UDP_TABLE_CLASS"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpTable", "pdwSize", "bOrder", "ulAf", "TableClass", "Reserved"]), # 'GetOwnerModuleFromUdpEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=True, label="Int32")}, name="_Anonymous_e__Struct", pack=False, align=None), "dwFlags": SimTypeInt(signed=True, label="Int32")}, name="<anon>", label="None"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_UDPROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetTcpTable2': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwState": SimTypeInt(signed=False, label="UInt32"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "dwOffloadState": SimTypeInt(signed=False, label="TCP_CONNECTION_OFFLOAD_STATE")}, name="MIB_TCPROW2", pack=False, align=None), offset=0)}, name="MIB_TCPTABLE2", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetTcp6Table': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0)}, name="MIB_TCP6TABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetTcp6Table2': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "dwOffloadState": SimTypeInt(signed=False, label="TCP_CONNECTION_OFFLOAD_STATE")}, name="MIB_TCP6ROW2", pack=False, align=None), offset=0)}, name="MIB_TCP6TABLE2", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetPerTcpConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Ros", "RosVersion", "RosSize", "Rod", "RodVersion", "RodSize"]), # 'SetPerTcpConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Offset"]), # 'GetPerTcp6ConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Ros", "RosVersion", "RosSize", "Rod", "RodVersion", "RodSize"]), # 'SetPerTcp6ConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Offset"]), # 'GetOwnerModuleFromTcp6Entry': SimTypeFunction([SimTypePointer(SimStruct({"ucLocalAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "ucRemoteAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwState": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_TCP6ROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetUdp6Table': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwLocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDP6ROW", pack=False, align=None), offset=0)}, name="MIB_UDP6TABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Udp6Table", "SizePointer", "Order"]), # 'GetOwnerModuleFromUdp6Entry': SimTypeFunction([SimTypePointer(SimStruct({"ucLocalAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=True, label="Int32")}, name="_Anonymous_e__Struct", pack=False, align=None), "dwFlags": SimTypeInt(signed=True, label="Int32")}, name="<anon>", label="None"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_UDP6ROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetOwnerModuleFromPidAndInfo': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ulPid", "pInfo", "Class", "pBuffer", "pdwSize"]), # 'GetIpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetIcmpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"stats": SimStruct({"icmpInStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "dwDestUnreachs": SimTypeInt(signed=False, label="UInt32"), "dwTimeExcds": SimTypeInt(signed=False, label="UInt32"), "dwParmProbs": SimTypeInt(signed=False, label="UInt32"), "dwSrcQuenchs": SimTypeInt(signed=False, label="UInt32"), "dwRedirects": SimTypeInt(signed=False, label="UInt32"), "dwEchos": SimTypeInt(signed=False, label="UInt32"), "dwEchoReps": SimTypeInt(signed=False, label="UInt32"), "dwTimestamps": SimTypeInt(signed=False, label="UInt32"), "dwTimestampReps": SimTypeInt(signed=False, label="UInt32"), "dwAddrMasks": SimTypeInt(signed=False, label="UInt32"), "dwAddrMaskReps": SimTypeInt(signed=False, label="UInt32")}, name="MIBICMPSTATS", pack=False, align=None), "icmpOutStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "dwDestUnreachs": SimTypeInt(signed=False, label="UInt32"), "dwTimeExcds": SimTypeInt(signed=False, label="UInt32"), "dwParmProbs": SimTypeInt(signed=False, label="UInt32"), "dwSrcQuenchs": SimTypeInt(signed=False, label="UInt32"), "dwRedirects": SimTypeInt(signed=False, label="UInt32"), "dwEchos": SimTypeInt(signed=False, label="UInt32"), "dwEchoReps": SimTypeInt(signed=False, label="UInt32"), "dwTimestamps": SimTypeInt(signed=False, label="UInt32"), "dwTimestampReps": SimTypeInt(signed=False, label="UInt32"), "dwAddrMasks": SimTypeInt(signed=False, label="UInt32"), "dwAddrMaskReps": SimTypeInt(signed=False, label="UInt32")}, name="MIBICMPSTATS", pack=False, align=None)}, name="MIBICMPINFO", pack=False, align=None)}, name="MIB_ICMP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetTcpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwRtoAlgorithm": SimTypeInt(signed=False, label="UInt32"), "RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM")}, name="<anon>", label="None"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dwInSegs": SimTypeInt(signed=False, label="UInt32"), "dwOutSegs": SimTypeInt(signed=False, label="UInt32"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetUdpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"dwInDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Stats"]), # 'SetIpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetIpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetIcmpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"icmpInStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "rgdwTypeCount": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 256)}, name="MIBICMPSTATS_EX_XPSP1", pack=False, align=None), "icmpOutStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "rgdwTypeCount": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 256)}, name="MIBICMPSTATS_EX_XPSP1", pack=False, align=None)}, name="MIB_ICMP_EX_XPSP1", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetTcpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwRtoAlgorithm": SimTypeInt(signed=False, label="UInt32"), "RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM")}, name="<anon>", label="None"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dwInSegs": SimTypeInt(signed=False, label="UInt32"), "dwOutSegs": SimTypeInt(signed=False, label="UInt32"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetUdpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"dwInDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetTcpStatisticsEx2': SimTypeFunction([SimTypePointer(SimStruct({"RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dw64InSegs": SimTypeLongLong(signed=False, label="UInt64"), "dw64OutSegs": SimTypeLongLong(signed=False, label="UInt64"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetUdpStatisticsEx2': SimTypeFunction([SimTypePointer(SimStruct({"dw64InDatagrams": SimTypeLongLong(signed=False, label="UInt64"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dw64OutDatagrams": SimTypeLongLong(signed=False, label="UInt64"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'SetIfEntry': SimTypeFunction([SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfRow"]), # 'CreateIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'SetIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'DeleteIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'SetIpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpStats"]), # 'SetIpTTL': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["nTTL"]), # 'CreateIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'SetIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'DeleteIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'FlushIpNetTable': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwIfIndex"]), # 'CreateProxyArpEntry': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwAddress", "dwMask", "dwIfIndex"]), # 'DeleteProxyArpEntry': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwAddress", "dwMask", "dwIfIndex"]), # 'SetTcpEntry': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpRow"]), # 'GetInterfaceInfo': SimTypeFunction([SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=True, label="Int32"), "Adapter": SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)}, name="IP_INTERFACE_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfTable", "dwOutBufLen"]), # 'GetUniDirectionalAdapterInfo': SimTypeFunction([SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=False, label="UInt32"), "Address": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="IP_UNIDIRECTIONAL_ADAPTER_ADDRESS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIPIfInfo", "dwOutBufLen"]), # 'NhpAllocateAndGetInterfaceInfoFromStack': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "MediaType": SimTypeInt(signed=False, label="UInt32"), "ConnectionType": SimTypeChar(label="Byte"), "AccessType": SimTypeChar(label="Byte"), "DeviceGuid": SimTypeBottom(label="Guid"), "InterfaceGuid": SimTypeBottom(label="Guid")}, name="ip_interface_name_info_w2ksp1", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ppTable", "pdwCount", "bOrder", "hHeap", "dwFlags"]), # 'GetBestInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwDestAddr", "pdwBestIfIndex"]), # 'GetBestInterfaceEx': SimTypeFunction([SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pDestAddr", "pdwBestIfIndex"]), # 'GetBestRoute': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwDestAddr", "dwSourceAddr", "pBestRoute"]), # 'NotifyAddrChange': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Handle", "overlapped"]), # 'NotifyRouteChange': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Handle", "overlapped"]), # 'CancelIPChangeNotify': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["notifyOverlapped"]), # 'GetAdapterIndex': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterName", "IfIndex"]), # 'AddIPAddress': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Address", "IpMask", "IfIndex", "NTEContext", "NTEInstance"]), # 'DeleteIPAddress': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["NTEContext"]), # 'GetNetworkParams': SimTypeFunction([SimTypePointer(SimStruct({"HostName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "DomainName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "CurrentDnsServer": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "DnsServerList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "NodeType": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 260), "EnableRouting": SimTypeInt(signed=False, label="UInt32"), "EnableProxy": SimTypeInt(signed=False, label="UInt32"), "EnableDns": SimTypeInt(signed=False, label="UInt32")}, name="FIXED_INFO_W2KSP1", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="WIN32_ERROR"), arg_names=["pFixedInfo", "pOutBufLen"]), # 'GetAdaptersInfo': SimTypeFunction([SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_INFO"), offset=0), "ComboIndex": SimTypeInt(signed=False, label="UInt32"), "AdapterName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 260), "Description": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "AddressLength": SimTypeInt(signed=False, label="UInt32"), "Address": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "Index": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "DhcpEnabled": SimTypeInt(signed=False, label="UInt32"), "CurrentIpAddress": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "IpAddressList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "GatewayList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "DhcpServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "HaveWins": SimTypeInt(signed=True, label="Int32"), "PrimaryWinsServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "SecondaryWinsServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "LeaseObtained": SimTypeLongLong(signed=True, label="Int64"), "LeaseExpires": SimTypeLongLong(signed=True, label="Int64")}, name="IP_ADAPTER_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo", "SizePointer"]), # 'GetAdapterOrderMap': SimTypeFunction([], SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=False, label="UInt32"), "AdapterOrder": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="IP_ADAPTER_ORDER_MAP", pack=False, align=None), offset=0)), # 'GetAdaptersAddresses': SimTypeFunction([SimTypeInt(signed=False, label="ADDRESS_FAMILY"), SimTypeInt(signed=False, label="GET_ADAPTERS_ADDRESSES_FLAGS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous1": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "IfIndex": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_ADDRESSES_LH"), offset=0), "AdapterName": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "FirstUnicastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_UNICAST_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "LeaseLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte")}, name="IP_ADAPTER_UNICAST_ADDRESS_LH", pack=False, align=None), offset=0), "FirstAnycastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_ANYCAST_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_ANYCAST_ADDRESS_XP", pack=False, align=None), offset=0), "FirstMulticastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_MULTICAST_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_MULTICAST_ADDRESS_XP", pack=False, align=None), offset=0), "FirstDnsServerAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_DNS_SERVER_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_DNS_SERVER_ADDRESS_XP", pack=False, align=None), offset=0), "DnsSuffix": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Description": SimTypePointer(SimTypeChar(label="Char"), offset=0), "FriendlyName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "Anonymous2": SimUnion({"Flags": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Mtu": SimTypeInt(signed=False, label="UInt32"), "IfType": SimTypeInt(signed=False, label="UInt32"), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "Ipv6IfIndex": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "FirstPrefix": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_PREFIX_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "PrefixLength": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADAPTER_PREFIX_XP", pack=False, align=None), offset=0), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "FirstWinsServerAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_WINS_SERVER_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_WINS_SERVER_ADDRESS_LH", pack=False, align=None), offset=0), "FirstGatewayAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_GATEWAY_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_GATEWAY_ADDRESS_LH", pack=False, align=None), offset=0), "Ipv4Metric": SimTypeInt(signed=False, label="UInt32"), "Ipv6Metric": SimTypeInt(signed=False, label="UInt32"), "Luid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Dhcpv4Server": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "CompartmentId": SimTypeInt(signed=False, label="UInt32"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "Dhcpv6Server": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "Dhcpv6ClientDuid": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 130), "Dhcpv6ClientDuidLength": SimTypeInt(signed=False, label="UInt32"), "Dhcpv6Iaid": SimTypeInt(signed=False, label="UInt32"), "FirstDnsSuffix": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_DNS_SUFFIX"), offset=0), "String": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256)}, name="IP_ADAPTER_DNS_SUFFIX", pack=False, align=None), offset=0)}, name="IP_ADAPTER_ADDRESSES_LH", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Family", "Flags", "Reserved", "AdapterAddresses", "SizePointer"]), # 'GetPerAdapterInfo': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"AutoconfigEnabled": SimTypeInt(signed=False, label="UInt32"), "AutoconfigActive": SimTypeInt(signed=False, label="UInt32"), "CurrentDnsServer": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "DnsServerList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None)}, name="IP_PER_ADAPTER_INFO_W2KSP1", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["IfIndex", "pPerAdapterInfo", "pOutBufLen"]), # 'GetInterfaceCurrentTimestampCapabilities': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "HardwareClockFrequencyHz": SimTypeLongLong(signed=False, label="UInt64"), "CrossTimestamp": SimTypeChar(label="Byte"), "Reserved1": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64"), "TimestampFlags": SimStruct({"PtpV2OverUdpIPv4EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgTransmitHw": SimTypeChar(label="Byte"), "AllReceiveHw": SimTypeChar(label="Byte"), "AllTransmitHw": SimTypeChar(label="Byte"), "TaggedTransmitHw": SimTypeChar(label="Byte"), "AllReceiveSw": SimTypeChar(label="Byte"), "AllTransmitSw": SimTypeChar(label="Byte"), "TaggedTransmitSw": SimTypeChar(label="Byte")}, name="INTERFACE_TIMESTAMP_CAPABILITY_FLAGS", pack=False, align=None)}, name="INTERFACE_TIMESTAMP_CAPABILITIES", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "TimestampCapabilites"]), # 'GetInterfaceHardwareTimestampCapabilities': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "HardwareClockFrequencyHz": SimTypeLongLong(signed=False, label="UInt64"), "CrossTimestamp": SimTypeChar(label="Byte"), "Reserved1": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64"), "TimestampFlags": SimStruct({"PtpV2OverUdpIPv4EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgTransmitHw": SimTypeChar(label="Byte"), "AllReceiveHw": SimTypeChar(label="Byte"), "AllTransmitHw": SimTypeChar(label="Byte"), "TaggedTransmitHw": SimTypeChar(label="Byte"), "AllReceiveSw": SimTypeChar(label="Byte"), "AllTransmitSw": SimTypeChar(label="Byte"), "TaggedTransmitSw": SimTypeChar(label="Byte")}, name="INTERFACE_TIMESTAMP_CAPABILITY_FLAGS", pack=False, align=None)}, name="INTERFACE_TIMESTAMP_CAPABILITIES", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "TimestampCapabilites"]), # 'CaptureInterfaceHardwareCrossTimestamp': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32"), "SystemTimestamp1": SimTypeLongLong(signed=False, label="UInt64"), "HardwareClockTimestamp": SimTypeLongLong(signed=False, label="UInt64"), "SystemTimestamp2": SimTypeLongLong(signed=False, label="UInt64")}, name="INTERFACE_HARDWARE_CROSSTIMESTAMP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "CrossTimestamp"]), # 'NotifyIfTimestampConfigChange': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["CallerContext"]), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["CallerContext", "Callback", "NotificationHandle"]), # 'CancelIfTimestampConfigChange': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeBottom(label="Void"), arg_names=["NotificationHandle"]), # 'IpReleaseAddress': SimTypeFunction([SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo"]), # 'IpRenewAddress': SimTypeFunction([SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo"]), # 'SendARP': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["DestIP", "SrcIP", "pMacAddr", "PhyAddrLen"]), # 'GetRTTAndHopCount': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["DestIpAddress", "HopCount", "MaxHops", "RTT"]), # 'GetFriendlyIfIndex': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IfIndex"]), # 'EnableRouter': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pHandle", "pOverlapped"]), # 'UnenableRouter': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pOverlapped", "lpdwEnableCount"]), # 'DisableMediaSense': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pHandle", "pOverLapped"]), # 'RestoreMediaSense': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pOverlapped", "lpdwEnableCount"]), # 'GetIpErrorString': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ErrorCode", "Buffer", "Size"]), # 'ResolveNeighbor': SimTypeFunction([SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["NetworkAddress", "PhysicalAddress", "PhysicalAddressLength"]), # 'CreatePersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'CreatePersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'DeletePersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts"]), # 'DeletePersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts"]), # 'LookupPersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'LookupPersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'PfCreateInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="PFFORWARD_ACTION"), SimTypeInt(signed=False, label="PFFORWARD_ACTION"), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwName", "inAction", "outAction", "bUseLog", "bMustBeUnique", "ppInterface"]), # 'PfDeleteInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface"]), # 'PfAddFiltersToInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ih", "cInFilters", "pfiltIn", "cOutFilters", "pfiltOut", "pfHandle"]), # 'PfRemoveFiltersFromInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ih", "cInFilters", "pfiltIn", "cOutFilters", "pfiltOut"]), # 'PfRemoveFilterHandles': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "cFilters", "pvHandles"]), # 'PfUnBindInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface"]), # 'PfBindInterfaceToIndex': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="PFADDRESSTYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "dwIndex", "pfatLinkType", "LinkIPAddress"]), # 'PfBindInterfaceToIPAddress': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="PFADDRESSTYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "pfatType", "IPAddress"]), # 'PfRebindFilters': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Mask": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="PF_LATEBIND_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "pLateBindInfo"]), # 'PfAddGlobalFilterToInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="GLOBAL_FILTER")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "gfFilter"]), # 'PfRemoveGlobalFilterFromInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="GLOBAL_FILTER")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "gfFilter"]), # 'PfMakeLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hEvent"]), # 'PfSetLogBuffer': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pbBuffer", "dwSize", "dwThreshold", "dwEntries", "pdwLoggedEntries", "pdwLostEntries", "pdwSizeUsed"]), # 'PfDeleteLog': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'PfGetInterfaceStatistics': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"pvDriverContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "dwInDrops": SimTypeInt(signed=False, label="UInt32"), "dwOutDrops": SimTypeInt(signed=False, label="UInt32"), "eaInAction": SimTypeInt(signed=False, label="PFFORWARD_ACTION"), "eaOutAction": SimTypeInt(signed=False, label="PFFORWARD_ACTION"), "dwNumInFilters": SimTypeInt(signed=False, label="UInt32"), "dwNumOutFilters": SimTypeInt(signed=False, label="UInt32"), "dwFrag": SimTypeInt(signed=False, label="UInt32"), "dwSpoof": SimTypeInt(signed=False, label="UInt32"), "dwReserved1": SimTypeInt(signed=False, label="UInt32"), "dwReserved2": SimTypeInt(signed=False, label="UInt32"), "liSYN": SimTypeBottom(label="LARGE_INTEGER"), "liTotalLogged": SimTypeBottom(label="LARGE_INTEGER"), "dwLostLogEntries": SimTypeInt(signed=False, label="UInt32"), "FilterInfo": SimTypePointer(SimStruct({"dwNumPacketsFiltered": SimTypeInt(signed=False, label="UInt32"), "info": SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None)}, name="PF_FILTER_STATS", pack=False, align=None), offset=0)}, name="PF_INTERFACE_STATS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "ppfStats", "pdwBufferSize", "fResetCounters"]), # 'PfTestPacket': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="PFFORWARD_ACTION"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInInterface", "pOutInterface", "cBytes", "pbPacket", "ppAction"]), } lib.set_prototypes(prototypes)
# pylint:disable=line-too-long import logging from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64 from .. import SIM_PROCEDURES as P from . import SimLibrary _l = logging.getLogger(name=__name__) lib = SimLibrary() lib.set_default_cc('X86', SimCCStdcall) lib.set_default_cc('AMD64', SimCCMicrosoftAMD64) lib.set_library_names("iphlpapi.dll") prototypes = \ { # 'GetIfEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIfEntry2Ex': SimTypeFunction([SimTypeInt(signed=False, label="MIB_IF_ENTRY_LEVEL"), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Level", "Row"]), # 'GetIfTable2': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)}, name="MIB_IF_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetIfTable2Ex': SimTypeFunction([SimTypeInt(signed=False, label="MIB_IF_TABLE_LEVEL"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceGuid": SimTypeBottom(label="Guid"), "Alias": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "Description": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 257), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PermanentPhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "Mtu": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "MediaType": SimTypeBottom(label="NDIS_MEDIUM"), "PhysicalMediumType": SimTypeBottom(label="NDIS_PHYSICAL_MEDIUM"), "AccessType": SimTypeInt(signed=False, label="NET_IF_ACCESS_TYPE"), "DirectionType": SimTypeInt(signed=False, label="NET_IF_DIRECTION_TYPE"), "InterfaceAndOperStatusFlags": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_InterfaceAndOperStatusFlags_e__Struct", pack=False, align=None), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "AdminStatus": SimTypeInt(signed=False, label="NET_IF_ADMIN_STATUS"), "MediaConnectState": SimTypeInt(signed=False, label="NET_IF_MEDIA_CONNECT_STATE"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "InOctets": SimTypeLongLong(signed=False, label="UInt64"), "InUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "InDiscards": SimTypeLongLong(signed=False, label="UInt64"), "InErrors": SimTypeLongLong(signed=False, label="UInt64"), "InUnknownProtos": SimTypeLongLong(signed=False, label="UInt64"), "InUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "InBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutNUcastPkts": SimTypeLongLong(signed=False, label="UInt64"), "OutDiscards": SimTypeLongLong(signed=False, label="UInt64"), "OutErrors": SimTypeLongLong(signed=False, label="UInt64"), "OutUcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutMulticastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutBroadcastOctets": SimTypeLongLong(signed=False, label="UInt64"), "OutQLen": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IF_ROW2", pack=False, align=None), offset=0)}, name="MIB_IF_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Level", "Table"]), # 'GetIfStackTable': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"HigherLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "LowerLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IFSTACK_ROW", pack=False, align=None), offset=0)}, name="MIB_IFSTACK_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetInvertedIfStackTable': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"LowerLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "HigherLayerInterfaceIndex": SimTypeInt(signed=False, label="UInt32")}, name="MIB_INVERTEDIFSTACK_ROW", pack=False, align=None), offset=0)}, name="MIB_INVERTEDIFSTACK_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Table"]), # 'GetIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpInterfaceTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)}, name="MIB_IPINTERFACE_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyIpInterfaceChange': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'SetIpInterfaceEntry': SimTypeFunction([SimTypePointer(SimStruct({"Family": SimTypeShort(signed=False, label="UInt16"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "MaxReassemblySize": SimTypeInt(signed=False, label="UInt32"), "InterfaceIdentifier": SimTypeLongLong(signed=False, label="UInt64"), "MinRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "MaxRouterAdvertisementInterval": SimTypeInt(signed=False, label="UInt32"), "AdvertisingEnabled": SimTypeChar(label="Byte"), "ForwardingEnabled": SimTypeChar(label="Byte"), "WeakHostSend": SimTypeChar(label="Byte"), "WeakHostReceive": SimTypeChar(label="Byte"), "UseAutomaticMetric": SimTypeChar(label="Byte"), "UseNeighborUnreachabilityDetection": SimTypeChar(label="Byte"), "ManagedAddressConfigurationSupported": SimTypeChar(label="Byte"), "OtherStatefulConfigurationSupported": SimTypeChar(label="Byte"), "AdvertiseDefaultRoute": SimTypeChar(label="Byte"), "RouterDiscoveryBehavior": SimTypeInt(signed=False, label="NL_ROUTER_DISCOVERY_BEHAVIOR"), "DadTransmits": SimTypeInt(signed=False, label="UInt32"), "BaseReachableTime": SimTypeInt(signed=False, label="UInt32"), "RetransmitTime": SimTypeInt(signed=False, label="UInt32"), "PathMtuDiscoveryTimeout": SimTypeInt(signed=False, label="UInt32"), "LinkLocalAddressBehavior": SimTypeInt(signed=False, label="NL_LINK_LOCAL_ADDRESS_BEHAVIOR"), "LinkLocalAddressTimeout": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "SitePrefixLength": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "NlMtu": SimTypeInt(signed=False, label="UInt32"), "Connected": SimTypeChar(label="Byte"), "SupportsWakeUpPatterns": SimTypeChar(label="Byte"), "SupportsNeighborDiscovery": SimTypeChar(label="Byte"), "SupportsRouterDiscovery": SimTypeChar(label="Byte"), "ReachableTime": SimTypeInt(signed=False, label="UInt32"), "TransmitOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "ReceiveOffload": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="NL_INTERFACE_OFFLOAD_ROD", pack=False, align=None), "DisableDefaultRoutes": SimTypeChar(label="Byte")}, name="MIB_IPINTERFACE_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpNetworkConnectionBandwidthEstimates': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"InboundBandwidthInformation": SimStruct({"Bandwidth": SimTypeLongLong(signed=False, label="UInt64"), "Instability": SimTypeLongLong(signed=False, label="UInt64"), "BandwidthPeaked": SimTypeChar(label="Byte")}, name="NL_BANDWIDTH_INFORMATION", pack=False, align=None), "OutboundBandwidthInformation": SimStruct({"Bandwidth": SimTypeLongLong(signed=False, label="UInt64"), "Instability": SimTypeLongLong(signed=False, label="UInt64"), "BandwidthPeaked": SimTypeChar(label="Byte")}, name="NL_BANDWIDTH_INFORMATION", pack=False, align=None)}, name="MIB_IP_NETWORK_CONNECTION_BANDWIDTH_ESTIMATES", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "AddressFamily", "BandwidthEstimates"]), # 'CreateUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetUnicastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyUnicastIpAddressChange': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'NotifyStableUnicastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_UNICASTIPADDRESS_TABLE", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["CallerContext", "AddressTable"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table", "CallerCallback", "CallerContext", "NotificationHandle"]), # 'SetUnicastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte"), "SkipAsSource": SimTypeChar(label="Byte"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None), "CreationTimeStamp": SimTypeBottom(label="LARGE_INTEGER")}, name="MIB_UNICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'CreateAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetAnycastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetAnycastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_ANYCASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_ANYCASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'GetMulticastIpAddressEntry': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_MULTICASTIPADDRESS_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetMulticastIpAddressTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "ScopeId": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="MIB_MULTICASTIPADDRESS_ROW", pack=False, align=None), offset=0)}, name="MIB_MULTICASTIPADDRESS_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'CreateIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetBestRoute2': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceIndex", "SourceAddress", "DestinationAddress", "AddressSortOptions", "BestRoute", "BestSourceAddress"]), # 'GetIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpForwardTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)}, name="MIB_IPFORWARD_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'InitializeIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Row"]), # 'NotifyRouteChange2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Row", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["AddressFamily", "Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'SetIpForwardEntry2': SimTypeFunction([SimTypePointer(SimStruct({"InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "DestinationPrefix": SimStruct({"Prefix": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PrefixLength": SimTypeChar(label="Byte")}, name="IP_ADDRESS_PREFIX", pack=False, align=None), "NextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "SitePrefixLength": SimTypeChar(label="Byte"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "Metric": SimTypeInt(signed=False, label="UInt32"), "Protocol": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL"), "Loopback": SimTypeChar(label="Byte"), "AutoconfigureAddress": SimTypeChar(label="Byte"), "Publish": SimTypeChar(label="Byte"), "Immortal": SimTypeChar(label="Byte"), "Age": SimTypeInt(signed=False, label="UInt32"), "Origin": SimTypeInt(signed=False, label="NL_ROUTE_ORIGIN")}, name="MIB_IPFORWARD_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Route"]), # 'FlushIpPathTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=True, label="Int32"), arg_names=["Family"]), # 'GetIpPathEntry': SimTypeFunction([SimTypePointer(SimStruct({"Source": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "Destination": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "CurrentNextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PathMtu": SimTypeInt(signed=False, label="UInt32"), "RttMean": SimTypeInt(signed=False, label="UInt32"), "RttDeviation": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "IsReachable": SimTypeChar(label="Byte"), "LinkTransmitSpeed": SimTypeLongLong(signed=False, label="UInt64"), "LinkReceiveSpeed": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IPPATH_ROW", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpPathTable': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Source": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "Destination": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "CurrentNextHop": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "PathMtu": SimTypeInt(signed=False, label="UInt32"), "RttMean": SimTypeInt(signed=False, label="UInt32"), "RttDeviation": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None"), "IsReachable": SimTypeChar(label="Byte"), "LinkTransmitSpeed": SimTypeLongLong(signed=False, label="UInt64"), "LinkReceiveSpeed": SimTypeLongLong(signed=False, label="UInt64")}, name="MIB_IPPATH_ROW", pack=False, align=None), offset=0)}, name="MIB_IPPATH_TABLE", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'CreateIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'DeleteIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'FlushIpNetTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "InterfaceIndex"]), # 'GetIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'GetIpNetTable2': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypePointer(SimStruct({"NumEntries": SimTypeInt(signed=False, label="UInt32"), "Table": SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)}, name="MIB_IPNET_TABLE2", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Family", "Table"]), # 'ResolveIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row", "SourceAddress"]), # 'SetIpNetEntry2': SimTypeFunction([SimTypePointer(SimStruct({"Address": SimUnion({"Ipv4": SimStruct({"sin_family": SimTypeShort(signed=False, label="UInt16"), "sin_port": SimTypeShort(signed=False, label="UInt16"), "sin_addr": SimStruct({"S_un": SimUnion({"S_un_b": SimStruct({"s_b1": SimTypeChar(label="Byte"), "s_b2": SimTypeChar(label="Byte"), "s_b3": SimTypeChar(label="Byte"), "s_b4": SimTypeChar(label="Byte")}, name="_S_un_b_e__Struct", pack=False, align=None), "S_un_w": SimStruct({"s_w1": SimTypeShort(signed=False, label="UInt16"), "s_w2": SimTypeShort(signed=False, label="UInt16")}, name="_S_un_w_e__Struct", pack=False, align=None), "S_addr": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="IN_ADDR", pack=False, align=None), "sin_zero": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 8)}, name="SOCKADDR_IN", pack=False, align=None), "Ipv6": SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), "si_family": SimTypeShort(signed=False, label="UInt16")}, name="<anon>", label="None"), "InterfaceIndex": SimTypeInt(signed=False, label="UInt32"), "InterfaceLuid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 32), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="NL_NEIGHBOR_STATE"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeChar(label="Byte")}, name="_Anonymous_e__Struct", pack=False, align=None), "Flags": SimTypeChar(label="Byte")}, name="<anon>", label="None"), "ReachabilityTime": SimUnion({"LastReachable": SimTypeInt(signed=False, label="UInt32"), "LastUnreachable": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="MIB_IPNET_ROW2", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Row"]), # 'NotifyTeredoPortChange': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypeInt(signed=False, label="MIB_NOTIFICATION_TYPE")], SimTypeBottom(label="Void"), arg_names=["CallerContext", "Port", "NotificationType"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'GetTeredoPort': SimTypeFunction([SimTypePointer(SimTypeShort(signed=False, label="UInt16"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Port"]), # 'CancelMibChangeNotify2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["NotificationHandle"]), # 'FreeMibTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["Memory"]), # 'CreateSortedAddressPairs': SimTypeFunction([SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({"SourceAddress": SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), "DestinationAddress": SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0)}, name="SOCKADDR_IN6_PAIR", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["SourceAddressList", "SourceAddressCount", "DestinationAddressList", "DestinationAddressCount", "AddressSortOptions", "SortedAddressPairList", "SortedAddressPairCount"]), # 'ConvertCompartmentGuidToId': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentGuid", "CompartmentId"]), # 'ConvertCompartmentIdToGuid': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentId", "CompartmentGuid"]), # 'ConvertInterfaceNameToLuidA': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceName", "InterfaceLuid"]), # 'ConvertInterfaceNameToLuidW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceName", "InterfaceLuid"]), # 'ConvertInterfaceLuidToNameA': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceName", "Length"]), # 'ConvertInterfaceLuidToNameW': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceName", "Length"]), # 'ConvertInterfaceLuidToIndex': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceIndex"]), # 'ConvertInterfaceIndexToLuid': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "InterfaceLuid"]), # 'ConvertInterfaceLuidToAlias': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceAlias", "Length"]), # 'ConvertInterfaceAliasToLuid': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceAlias", "InterfaceLuid"]), # 'ConvertInterfaceLuidToGuid': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimTypeBottom(label="Guid"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceLuid", "InterfaceGuid"]), # 'ConvertInterfaceGuidToLuid': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceGuid", "InterfaceLuid"]), # 'if_nametoindex': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceName"]), # 'if_indextoname': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), label="LPArray", offset=0)], SimTypePointer(SimTypeChar(label="Byte"), offset=0), arg_names=["InterfaceIndex", "InterfaceName"]), # 'GetCurrentThreadCompartmentId': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'SetCurrentThreadCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentId"]), # 'GetCurrentThreadCompartmentScope': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeBottom(label="Void"), arg_names=["CompartmentScope", "CompartmentId"]), # 'SetCurrentThreadCompartmentScope': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["CompartmentScope"]), # 'GetJobCompartmentId': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["JobHandle"]), # 'SetJobCompartmentId': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["JobHandle", "CompartmentId"]), # 'GetSessionCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["SessionId"]), # 'SetSessionCompartmentId': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["SessionId", "CompartmentId"]), # 'GetDefaultCompartmentId': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'GetNetworkInformation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["NetworkGuid", "CompartmentId", "SiteId", "NetworkName", "Length"]), # 'SetNetworkInformation': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["NetworkGuid", "CompartmentId", "NetworkName"]), # 'ConvertLengthToIpv4Mask': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["MaskLength", "Mask"]), # 'ConvertIpv4MaskToLength': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Mask", "MaskLength"]), # 'GetDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Settings"]), # 'FreeDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Settings"]), # 'SetDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Hostname": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Settings"]), # 'GetInterfaceDnsSettings': SimTypeFunction([SimTypeBottom(label="Guid"), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Interface", "Settings"]), # 'FreeInterfaceDnsSettings': SimTypeFunction([SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Settings"]), # 'SetInterfaceDnsSettings': SimTypeFunction([SimTypeBottom(label="Guid"), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeLongLong(signed=False, label="UInt64"), "Domain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "NameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0), "SearchList": SimTypePointer(SimTypeChar(label="Char"), offset=0), "RegistrationEnabled": SimTypeInt(signed=False, label="UInt32"), "RegisterAdapterName": SimTypeInt(signed=False, label="UInt32"), "EnableLLMNR": SimTypeInt(signed=False, label="UInt32"), "QueryAdapterName": SimTypeInt(signed=False, label="UInt32"), "ProfileNameServer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="DNS_INTERFACE_SETTINGS", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Interface", "Settings"]), # 'GetNetworkConnectivityHint': SimTypeFunction([SimTypePointer(SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ConnectivityHint"]), # 'GetNetworkConnectivityHintForInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["InterfaceIndex", "ConnectivityHint"]), # 'NotifyNetworkConnectivityHintChange': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimStruct({"ConnectivityLevel": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_LEVEL_HINT"), "ConnectivityCost": SimTypeInt(signed=False, label="NL_NETWORK_CONNECTIVITY_COST_HINT"), "ApproachingDataLimit": SimTypeChar(label="Byte"), "OverDataLimit": SimTypeChar(label="Byte"), "Roaming": SimTypeChar(label="Byte")}, name="NL_NETWORK_CONNECTIVITY_HINT", pack=False, align=None)], SimTypeBottom(label="Void"), arg_names=["CallerContext", "ConnectivityHint"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeChar(label="Byte"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Callback", "CallerContext", "InitialNotification", "NotificationHandle"]), # 'IcmpCreateFile': SimTypeFunction([], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)), # 'Icmp6CreateFile': SimTypeFunction([], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)), # 'IcmpCloseHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["IcmpHandle"]), # 'IcmpSendEcho': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpSendEcho2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpSendEcho2Ex': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "SourceAddress", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'Icmp6SendEcho2': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous": SimUnion({"Status": SimTypeInt(signed=True, label="Int32"), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "Information": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="IO_STATUS_BLOCK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ApcContext", "IoStatusBlock", "Reserved"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"sin6_family": SimTypeShort(signed=False, label="UInt16"), "sin6_port": SimTypeShort(signed=False, label="UInt16"), "sin6_flowinfo": SimTypeInt(signed=False, label="UInt32"), "sin6_addr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "Anonymous": SimUnion({"sin6_scope_id": SimTypeInt(signed=False, label="UInt32"), "sin6_scope_struct": SimStruct({"Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Value": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="SCOPE_ID", pack=False, align=None)}, name="<anon>", label="None")}, name="SOCKADDR_IN6", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimStruct({"Ttl": SimTypeChar(label="Byte"), "Tos": SimTypeChar(label="Byte"), "Flags": SimTypeChar(label="Byte"), "OptionsSize": SimTypeChar(label="Byte"), "OptionsData": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="ip_option_information", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IcmpHandle", "Event", "ApcRoutine", "ApcContext", "SourceAddress", "DestinationAddress", "RequestData", "RequestSize", "RequestOptions", "ReplyBuffer", "ReplySize", "Timeout"]), # 'IcmpParseReplies': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ReplyBuffer", "ReplySize"]), # 'Icmp6ParseReplies': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ReplyBuffer", "ReplySize"]), # 'GetNumberOfInterfaces': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pdwNumIf"]), # 'GetIfEntry': SimTypeFunction([SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfRow"]), # 'GetIfTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)}, name="MIB_IFTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfTable", "pdwSize", "bOrder"]), # 'GetIpAddrTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwAddr": SimTypeInt(signed=False, label="UInt32"), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwMask": SimTypeInt(signed=False, label="UInt32"), "dwBCastAddr": SimTypeInt(signed=False, label="UInt32"), "dwReasmSize": SimTypeInt(signed=False, label="UInt32"), "unused1": SimTypeShort(signed=False, label="UInt16"), "wType": SimTypeShort(signed=False, label="UInt16")}, name="MIB_IPADDRROW_XP", pack=False, align=None), offset=0)}, name="MIB_IPADDRTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpAddrTable", "pdwSize", "bOrder"]), # 'GetIpNetTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)}, name="MIB_IPNETTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IpNetTable", "SizePointer", "Order"]), # 'GetIpForwardTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)}, name="MIB_IPFORWARDTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpForwardTable", "pdwSize", "bOrder"]), # 'GetTcpTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0)}, name="MIB_TCPTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetExtendedTcpTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="TCP_TABLE_CLASS"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpTable", "pdwSize", "bOrder", "ulAf", "TableClass", "Reserved"]), # 'GetOwnerModuleFromTcpEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwState": SimTypeInt(signed=False, label="UInt32"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_TCPROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetUdpTable': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPROW", pack=False, align=None), offset=0)}, name="MIB_UDPTABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["UdpTable", "SizePointer", "Order"]), # 'GetExtendedUdpTable': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UDP_TABLE_CLASS"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpTable", "pdwSize", "bOrder", "ulAf", "TableClass", "Reserved"]), # 'GetOwnerModuleFromUdpEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=True, label="Int32")}, name="_Anonymous_e__Struct", pack=False, align=None), "dwFlags": SimTypeInt(signed=True, label="Int32")}, name="<anon>", label="None"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_UDPROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetTcpTable2': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwState": SimTypeInt(signed=False, label="UInt32"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "dwOffloadState": SimTypeInt(signed=False, label="TCP_CONNECTION_OFFLOAD_STATE")}, name="MIB_TCPROW2", pack=False, align=None), offset=0)}, name="MIB_TCPTABLE2", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetTcp6Table': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0)}, name="MIB_TCP6TABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetTcp6Table2': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "dwOffloadState": SimTypeInt(signed=False, label="TCP_CONNECTION_OFFLOAD_STATE")}, name="MIB_TCP6ROW2", pack=False, align=None), offset=0)}, name="MIB_TCP6TABLE2", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["TcpTable", "SizePointer", "Order"]), # 'GetPerTcpConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Ros", "RosVersion", "RosSize", "Rod", "RodVersion", "RodSize"]), # 'SetPerTcpConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Offset"]), # 'GetPerTcp6ConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Ros", "RosVersion", "RosSize", "Rod", "RodVersion", "RodSize"]), # 'SetPerTcp6ConnectionEStats': SimTypeFunction([SimTypePointer(SimStruct({"State": SimTypeInt(signed=False, label="MIB_TCP_STATE"), "LocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "RemoteAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCP6ROW", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCP_ESTATS_TYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Row", "EstatsType", "Rw", "RwVersion", "RwSize", "Offset"]), # 'GetOwnerModuleFromTcp6Entry': SimTypeFunction([SimTypePointer(SimStruct({"ucLocalAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "ucRemoteAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwRemoteScopeId": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32"), "dwState": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_TCP6ROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetUdp6Table': SimTypeFunction([SimTypePointer(SimStruct({"dwNumEntries": SimTypeInt(signed=False, label="UInt32"), "table": SimTypePointer(SimStruct({"dwLocalAddr": SimStruct({"u": SimUnion({"Byte": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "Word": SimTypeFixedSizeArray(SimTypeShort(signed=False, label="UInt16"), 8)}, name="<anon>", label="None")}, name="IN6_ADDR", pack=False, align=None), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDP6ROW", pack=False, align=None), offset=0)}, name="MIB_UDP6TABLE", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Udp6Table", "SizePointer", "Order"]), # 'GetOwnerModuleFromUdp6Entry': SimTypeFunction([SimTypePointer(SimStruct({"ucLocalAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 16), "dwLocalScopeId": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwOwningPid": SimTypeInt(signed=False, label="UInt32"), "liCreateTimestamp": SimTypeBottom(label="LARGE_INTEGER"), "Anonymous": SimUnion({"Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=True, label="Int32")}, name="_Anonymous_e__Struct", pack=False, align=None), "dwFlags": SimTypeInt(signed=True, label="Int32")}, name="<anon>", label="None"), "OwningModuleInfo": SimTypeFixedSizeArray(SimTypeLongLong(signed=False, label="UInt64"), 16)}, name="MIB_UDP6ROW_OWNER_MODULE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pUdpEntry", "Class", "pBuffer", "pdwSize"]), # 'GetOwnerModuleFromPidAndInfo': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0), SimTypeInt(signed=False, label="TCPIP_OWNER_MODULE_INFO_CLASS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ulPid", "pInfo", "Class", "pBuffer", "pdwSize"]), # 'GetIpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetIcmpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"stats": SimStruct({"icmpInStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "dwDestUnreachs": SimTypeInt(signed=False, label="UInt32"), "dwTimeExcds": SimTypeInt(signed=False, label="UInt32"), "dwParmProbs": SimTypeInt(signed=False, label="UInt32"), "dwSrcQuenchs": SimTypeInt(signed=False, label="UInt32"), "dwRedirects": SimTypeInt(signed=False, label="UInt32"), "dwEchos": SimTypeInt(signed=False, label="UInt32"), "dwEchoReps": SimTypeInt(signed=False, label="UInt32"), "dwTimestamps": SimTypeInt(signed=False, label="UInt32"), "dwTimestampReps": SimTypeInt(signed=False, label="UInt32"), "dwAddrMasks": SimTypeInt(signed=False, label="UInt32"), "dwAddrMaskReps": SimTypeInt(signed=False, label="UInt32")}, name="MIBICMPSTATS", pack=False, align=None), "icmpOutStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "dwDestUnreachs": SimTypeInt(signed=False, label="UInt32"), "dwTimeExcds": SimTypeInt(signed=False, label="UInt32"), "dwParmProbs": SimTypeInt(signed=False, label="UInt32"), "dwSrcQuenchs": SimTypeInt(signed=False, label="UInt32"), "dwRedirects": SimTypeInt(signed=False, label="UInt32"), "dwEchos": SimTypeInt(signed=False, label="UInt32"), "dwEchoReps": SimTypeInt(signed=False, label="UInt32"), "dwTimestamps": SimTypeInt(signed=False, label="UInt32"), "dwTimestampReps": SimTypeInt(signed=False, label="UInt32"), "dwAddrMasks": SimTypeInt(signed=False, label="UInt32"), "dwAddrMaskReps": SimTypeInt(signed=False, label="UInt32")}, name="MIBICMPSTATS", pack=False, align=None)}, name="MIBICMPINFO", pack=False, align=None)}, name="MIB_ICMP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetTcpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwRtoAlgorithm": SimTypeInt(signed=False, label="UInt32"), "RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM")}, name="<anon>", label="None"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dwInSegs": SimTypeInt(signed=False, label="UInt32"), "dwOutSegs": SimTypeInt(signed=False, label="UInt32"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics"]), # 'GetUdpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"dwInDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Stats"]), # 'SetIpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetIpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetIcmpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"icmpInStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "rgdwTypeCount": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 256)}, name="MIBICMPSTATS_EX_XPSP1", pack=False, align=None), "icmpOutStats": SimStruct({"dwMsgs": SimTypeInt(signed=False, label="UInt32"), "dwErrors": SimTypeInt(signed=False, label="UInt32"), "rgdwTypeCount": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 256)}, name="MIBICMPSTATS_EX_XPSP1", pack=False, align=None)}, name="MIB_ICMP_EX_XPSP1", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetTcpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwRtoAlgorithm": SimTypeInt(signed=False, label="UInt32"), "RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM")}, name="<anon>", label="None"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dwInSegs": SimTypeInt(signed=False, label="UInt32"), "dwOutSegs": SimTypeInt(signed=False, label="UInt32"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS_LH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetUdpStatisticsEx': SimTypeFunction([SimTypePointer(SimStruct({"dwInDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetTcpStatisticsEx2': SimTypeFunction([SimTypePointer(SimStruct({"RtoAlgorithm": SimTypeInt(signed=False, label="TCP_RTO_ALGORITHM"), "dwRtoMin": SimTypeInt(signed=False, label="UInt32"), "dwRtoMax": SimTypeInt(signed=False, label="UInt32"), "dwMaxConn": SimTypeInt(signed=False, label="UInt32"), "dwActiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwPassiveOpens": SimTypeInt(signed=False, label="UInt32"), "dwAttemptFails": SimTypeInt(signed=False, label="UInt32"), "dwEstabResets": SimTypeInt(signed=False, label="UInt32"), "dwCurrEstab": SimTypeInt(signed=False, label="UInt32"), "dw64InSegs": SimTypeLongLong(signed=False, label="UInt64"), "dw64OutSegs": SimTypeLongLong(signed=False, label="UInt64"), "dwRetransSegs": SimTypeInt(signed=False, label="UInt32"), "dwInErrs": SimTypeInt(signed=False, label="UInt32"), "dwOutRsts": SimTypeInt(signed=False, label="UInt32"), "dwNumConns": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPSTATS2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'GetUdpStatisticsEx2': SimTypeFunction([SimTypePointer(SimStruct({"dw64InDatagrams": SimTypeLongLong(signed=False, label="UInt64"), "dwNoPorts": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dw64OutDatagrams": SimTypeLongLong(signed=False, label="UInt64"), "dwNumAddrs": SimTypeInt(signed=False, label="UInt32")}, name="MIB_UDPSTATS2", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="ADDRESS_FAMILY")], SimTypeInt(signed=False, label="UInt32"), arg_names=["Statistics", "Family"]), # 'SetIfEntry': SimTypeFunction([SimTypePointer(SimStruct({"wszName": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256), "dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "dwMtu": SimTypeInt(signed=False, label="UInt32"), "dwSpeed": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAdminStatus": SimTypeInt(signed=False, label="UInt32"), "dwOperStatus": SimTypeInt(signed=False, label="INTERNAL_IF_OPER_STATUS"), "dwLastChange": SimTypeInt(signed=False, label="UInt32"), "dwInOctets": SimTypeInt(signed=False, label="UInt32"), "dwInUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInErrors": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwOutOctets": SimTypeInt(signed=False, label="UInt32"), "dwOutUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutNUcastPkts": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutErrors": SimTypeInt(signed=False, label="UInt32"), "dwOutQLen": SimTypeInt(signed=False, label="UInt32"), "dwDescrLen": SimTypeInt(signed=False, label="UInt32"), "bDescr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 256)}, name="MIB_IFROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfRow"]), # 'CreateIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'SetIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'DeleteIpForwardEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pRoute"]), # 'SetIpStatistics': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwForwarding": SimTypeInt(signed=False, label="UInt32"), "Forwarding": SimTypeInt(signed=False, label="MIB_IPSTATS_FORWARDING")}, name="<anon>", label="None"), "dwDefaultTTL": SimTypeInt(signed=False, label="UInt32"), "dwInReceives": SimTypeInt(signed=False, label="UInt32"), "dwInHdrErrors": SimTypeInt(signed=False, label="UInt32"), "dwInAddrErrors": SimTypeInt(signed=False, label="UInt32"), "dwForwDatagrams": SimTypeInt(signed=False, label="UInt32"), "dwInUnknownProtos": SimTypeInt(signed=False, label="UInt32"), "dwInDiscards": SimTypeInt(signed=False, label="UInt32"), "dwInDelivers": SimTypeInt(signed=False, label="UInt32"), "dwOutRequests": SimTypeInt(signed=False, label="UInt32"), "dwRoutingDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutDiscards": SimTypeInt(signed=False, label="UInt32"), "dwOutNoRoutes": SimTypeInt(signed=False, label="UInt32"), "dwReasmTimeout": SimTypeInt(signed=False, label="UInt32"), "dwReasmReqds": SimTypeInt(signed=False, label="UInt32"), "dwReasmOks": SimTypeInt(signed=False, label="UInt32"), "dwReasmFails": SimTypeInt(signed=False, label="UInt32"), "dwFragOks": SimTypeInt(signed=False, label="UInt32"), "dwFragFails": SimTypeInt(signed=False, label="UInt32"), "dwFragCreates": SimTypeInt(signed=False, label="UInt32"), "dwNumIf": SimTypeInt(signed=False, label="UInt32"), "dwNumAddr": SimTypeInt(signed=False, label="UInt32"), "dwNumRoutes": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPSTATS_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIpStats"]), # 'SetIpTTL': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["nTTL"]), # 'CreateIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'SetIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'DeleteIpNetEntry': SimTypeFunction([SimTypePointer(SimStruct({"dwIndex": SimTypeInt(signed=False, label="UInt32"), "dwPhysAddrLen": SimTypeInt(signed=False, label="UInt32"), "bPhysAddr": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "dwAddr": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"dwType": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="MIB_IPNET_TYPE")}, name="<anon>", label="None")}, name="MIB_IPNETROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pArpEntry"]), # 'FlushIpNetTable': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwIfIndex"]), # 'CreateProxyArpEntry': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwAddress", "dwMask", "dwIfIndex"]), # 'DeleteProxyArpEntry': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwAddress", "dwMask", "dwIfIndex"]), # 'SetTcpEntry': SimTypeFunction([SimTypePointer(SimStruct({"Anonymous": SimUnion({"dwState": SimTypeInt(signed=False, label="UInt32"), "State": SimTypeInt(signed=False, label="MIB_TCP_STATE")}, name="<anon>", label="None"), "dwLocalAddr": SimTypeInt(signed=False, label="UInt32"), "dwLocalPort": SimTypeInt(signed=False, label="UInt32"), "dwRemoteAddr": SimTypeInt(signed=False, label="UInt32"), "dwRemotePort": SimTypeInt(signed=False, label="UInt32")}, name="MIB_TCPROW_LH", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pTcpRow"]), # 'GetInterfaceInfo': SimTypeFunction([SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=True, label="Int32"), "Adapter": SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)}, name="IP_INTERFACE_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIfTable", "dwOutBufLen"]), # 'GetUniDirectionalAdapterInfo': SimTypeFunction([SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=False, label="UInt32"), "Address": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="IP_UNIDIRECTIONAL_ADAPTER_ADDRESS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pIPIfInfo", "dwOutBufLen"]), # 'NhpAllocateAndGetInterfaceInfoFromStack': SimTypeFunction([SimTypePointer(SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "MediaType": SimTypeInt(signed=False, label="UInt32"), "ConnectionType": SimTypeChar(label="Byte"), "AccessType": SimTypeChar(label="Byte"), "DeviceGuid": SimTypeBottom(label="Guid"), "InterfaceGuid": SimTypeBottom(label="Guid")}, name="ip_interface_name_info_w2ksp1", pack=False, align=None), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["ppTable", "pdwCount", "bOrder", "hHeap", "dwFlags"]), # 'GetBestInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwDestAddr", "pdwBestIfIndex"]), # 'GetBestInterfaceEx': SimTypeFunction([SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pDestAddr", "pdwBestIfIndex"]), # 'GetBestRoute': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwForwardDest": SimTypeInt(signed=False, label="UInt32"), "dwForwardMask": SimTypeInt(signed=False, label="UInt32"), "dwForwardPolicy": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHop": SimTypeInt(signed=False, label="UInt32"), "dwForwardIfIndex": SimTypeInt(signed=False, label="UInt32"), "Anonymous1": SimUnion({"dwForwardType": SimTypeInt(signed=False, label="UInt32"), "ForwardType": SimTypeInt(signed=False, label="MIB_IPFORWARD_TYPE")}, name="<anon>", label="None"), "Anonymous2": SimUnion({"dwForwardProto": SimTypeInt(signed=False, label="UInt32"), "ForwardProto": SimTypeInt(signed=False, label="NL_ROUTE_PROTOCOL")}, name="<anon>", label="None"), "dwForwardAge": SimTypeInt(signed=False, label="UInt32"), "dwForwardNextHopAS": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric1": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric2": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric3": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric4": SimTypeInt(signed=False, label="UInt32"), "dwForwardMetric5": SimTypeInt(signed=False, label="UInt32")}, name="MIB_IPFORWARDROW", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwDestAddr", "dwSourceAddr", "pBestRoute"]), # 'NotifyAddrChange': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Handle", "overlapped"]), # 'NotifyRouteChange': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Handle", "overlapped"]), # 'CancelIPChangeNotify': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["notifyOverlapped"]), # 'GetAdapterIndex': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterName", "IfIndex"]), # 'AddIPAddress': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Address", "IpMask", "IfIndex", "NTEContext", "NTEInstance"]), # 'DeleteIPAddress': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["NTEContext"]), # 'GetNetworkParams': SimTypeFunction([SimTypePointer(SimStruct({"HostName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "DomainName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "CurrentDnsServer": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "DnsServerList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "NodeType": SimTypeInt(signed=False, label="UInt32"), "ScopeId": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 260), "EnableRouting": SimTypeInt(signed=False, label="UInt32"), "EnableProxy": SimTypeInt(signed=False, label="UInt32"), "EnableDns": SimTypeInt(signed=False, label="UInt32")}, name="FIXED_INFO_W2KSP1", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="WIN32_ERROR"), arg_names=["pFixedInfo", "pOutBufLen"]), # 'GetAdaptersInfo': SimTypeFunction([SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_INFO"), offset=0), "ComboIndex": SimTypeInt(signed=False, label="UInt32"), "AdapterName": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 260), "Description": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 132), "AddressLength": SimTypeInt(signed=False, label="UInt32"), "Address": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "Index": SimTypeInt(signed=False, label="UInt32"), "Type": SimTypeInt(signed=False, label="UInt32"), "DhcpEnabled": SimTypeInt(signed=False, label="UInt32"), "CurrentIpAddress": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "IpAddressList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "GatewayList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "DhcpServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "HaveWins": SimTypeInt(signed=True, label="Int32"), "PrimaryWinsServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "SecondaryWinsServer": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), "LeaseObtained": SimTypeLongLong(signed=True, label="Int64"), "LeaseExpires": SimTypeLongLong(signed=True, label="Int64")}, name="IP_ADAPTER_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo", "SizePointer"]), # 'GetAdapterOrderMap': SimTypeFunction([], SimTypePointer(SimStruct({"NumAdapters": SimTypeInt(signed=False, label="UInt32"), "AdapterOrder": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="IP_ADAPTER_ORDER_MAP", pack=False, align=None), offset=0)), # 'GetAdaptersAddresses': SimTypeFunction([SimTypeInt(signed=False, label="ADDRESS_FAMILY"), SimTypeInt(signed=False, label="GET_ADAPTERS_ADDRESSES_FLAGS"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"Anonymous1": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "IfIndex": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_ADDRESSES_LH"), offset=0), "AdapterName": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "FirstUnicastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_UNICAST_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "PrefixOrigin": SimTypeInt(signed=False, label="NL_PREFIX_ORIGIN"), "SuffixOrigin": SimTypeInt(signed=False, label="NL_SUFFIX_ORIGIN"), "DadState": SimTypeInt(signed=False, label="NL_DAD_STATE"), "ValidLifetime": SimTypeInt(signed=False, label="UInt32"), "PreferredLifetime": SimTypeInt(signed=False, label="UInt32"), "LeaseLifetime": SimTypeInt(signed=False, label="UInt32"), "OnLinkPrefixLength": SimTypeChar(label="Byte")}, name="IP_ADAPTER_UNICAST_ADDRESS_LH", pack=False, align=None), offset=0), "FirstAnycastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_ANYCAST_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_ANYCAST_ADDRESS_XP", pack=False, align=None), offset=0), "FirstMulticastAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_MULTICAST_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_MULTICAST_ADDRESS_XP", pack=False, align=None), offset=0), "FirstDnsServerAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_DNS_SERVER_ADDRESS_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_DNS_SERVER_ADDRESS_XP", pack=False, align=None), offset=0), "DnsSuffix": SimTypePointer(SimTypeChar(label="Char"), offset=0), "Description": SimTypePointer(SimTypeChar(label="Char"), offset=0), "FriendlyName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "PhysicalAddress": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 8), "PhysicalAddressLength": SimTypeInt(signed=False, label="UInt32"), "Anonymous2": SimUnion({"Flags": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimStruct({"_bitfield": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Mtu": SimTypeInt(signed=False, label="UInt32"), "IfType": SimTypeInt(signed=False, label="UInt32"), "OperStatus": SimTypeInt(signed=False, label="IF_OPER_STATUS"), "Ipv6IfIndex": SimTypeInt(signed=False, label="UInt32"), "ZoneIndices": SimTypeFixedSizeArray(SimTypeInt(signed=False, label="UInt32"), 16), "FirstPrefix": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_PREFIX_XP"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "PrefixLength": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADAPTER_PREFIX_XP", pack=False, align=None), offset=0), "TransmitLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "ReceiveLinkSpeed": SimTypeLongLong(signed=False, label="UInt64"), "FirstWinsServerAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_WINS_SERVER_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_WINS_SERVER_ADDRESS_LH", pack=False, align=None), offset=0), "FirstGatewayAddress": SimTypePointer(SimStruct({"Anonymous": SimUnion({"Alignment": SimTypeLongLong(signed=False, label="UInt64"), "Anonymous": SimStruct({"Length": SimTypeInt(signed=False, label="UInt32"), "Reserved": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_GATEWAY_ADDRESS_LH"), offset=0), "Address": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None)}, name="IP_ADAPTER_GATEWAY_ADDRESS_LH", pack=False, align=None), offset=0), "Ipv4Metric": SimTypeInt(signed=False, label="UInt32"), "Ipv6Metric": SimTypeInt(signed=False, label="UInt32"), "Luid": SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), "Dhcpv4Server": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "CompartmentId": SimTypeInt(signed=False, label="UInt32"), "NetworkGuid": SimTypeBottom(label="Guid"), "ConnectionType": SimTypeInt(signed=False, label="NET_IF_CONNECTION_TYPE"), "TunnelType": SimTypeInt(signed=False, label="TUNNEL_TYPE"), "Dhcpv6Server": SimStruct({"lpSockaddr": SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), "iSockaddrLength": SimTypeInt(signed=True, label="Int32")}, name="SOCKET_ADDRESS", pack=False, align=None), "Dhcpv6ClientDuid": SimTypeFixedSizeArray(SimTypeChar(label="Byte"), 130), "Dhcpv6ClientDuidLength": SimTypeInt(signed=False, label="UInt32"), "Dhcpv6Iaid": SimTypeInt(signed=False, label="UInt32"), "FirstDnsSuffix": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADAPTER_DNS_SUFFIX"), offset=0), "String": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 256)}, name="IP_ADAPTER_DNS_SUFFIX", pack=False, align=None), offset=0)}, name="IP_ADAPTER_ADDRESSES_LH", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["Family", "Flags", "Reserved", "AdapterAddresses", "SizePointer"]), # 'GetPerAdapterInfo': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"AutoconfigEnabled": SimTypeInt(signed=False, label="UInt32"), "AutoconfigActive": SimTypeInt(signed=False, label="UInt32"), "CurrentDnsServer": SimTypePointer(SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None), offset=0), "DnsServerList": SimStruct({"Next": SimTypePointer(SimTypeBottom(label="IP_ADDR_STRING"), offset=0), "IpAddress": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "IpMask": SimStruct({"String": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 16)}, name="IP_ADDRESS_STRING", pack=False, align=None), "Context": SimTypeInt(signed=False, label="UInt32")}, name="IP_ADDR_STRING", pack=False, align=None)}, name="IP_PER_ADAPTER_INFO_W2KSP1", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["IfIndex", "pPerAdapterInfo", "pOutBufLen"]), # 'GetInterfaceCurrentTimestampCapabilities': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "HardwareClockFrequencyHz": SimTypeLongLong(signed=False, label="UInt64"), "CrossTimestamp": SimTypeChar(label="Byte"), "Reserved1": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64"), "TimestampFlags": SimStruct({"PtpV2OverUdpIPv4EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgTransmitHw": SimTypeChar(label="Byte"), "AllReceiveHw": SimTypeChar(label="Byte"), "AllTransmitHw": SimTypeChar(label="Byte"), "TaggedTransmitHw": SimTypeChar(label="Byte"), "AllReceiveSw": SimTypeChar(label="Byte"), "AllTransmitSw": SimTypeChar(label="Byte"), "TaggedTransmitSw": SimTypeChar(label="Byte")}, name="INTERFACE_TIMESTAMP_CAPABILITY_FLAGS", pack=False, align=None)}, name="INTERFACE_TIMESTAMP_CAPABILITIES", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "TimestampCapabilites"]), # 'GetInterfaceHardwareTimestampCapabilities': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "HardwareClockFrequencyHz": SimTypeLongLong(signed=False, label="UInt64"), "CrossTimestamp": SimTypeChar(label="Byte"), "Reserved1": SimTypeLongLong(signed=False, label="UInt64"), "Reserved2": SimTypeLongLong(signed=False, label="UInt64"), "TimestampFlags": SimStruct({"PtpV2OverUdpIPv4EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv4AllMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgReceiveHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6EventMsgTransmitHw": SimTypeChar(label="Byte"), "PtpV2OverUdpIPv6AllMsgTransmitHw": SimTypeChar(label="Byte"), "AllReceiveHw": SimTypeChar(label="Byte"), "AllTransmitHw": SimTypeChar(label="Byte"), "TaggedTransmitHw": SimTypeChar(label="Byte"), "AllReceiveSw": SimTypeChar(label="Byte"), "AllTransmitSw": SimTypeChar(label="Byte"), "TaggedTransmitSw": SimTypeChar(label="Byte")}, name="INTERFACE_TIMESTAMP_CAPABILITY_FLAGS", pack=False, align=None)}, name="INTERFACE_TIMESTAMP_CAPABILITIES", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "TimestampCapabilites"]), # 'CaptureInterfaceHardwareCrossTimestamp': SimTypeFunction([SimTypePointer(SimUnion({"Value": SimTypeLongLong(signed=False, label="UInt64"), "Info": SimStruct({"_bitfield": SimTypeLongLong(signed=False, label="UInt64")}, name="_Info_e__Struct", pack=False, align=None)}, name="<anon>", label="None"), offset=0), SimTypePointer(SimStruct({"Version": SimTypeInt(signed=False, label="UInt32"), "Flags": SimTypeInt(signed=False, label="UInt32"), "SystemTimestamp1": SimTypeLongLong(signed=False, label="UInt64"), "HardwareClockTimestamp": SimTypeLongLong(signed=False, label="UInt64"), "SystemTimestamp2": SimTypeLongLong(signed=False, label="UInt64")}, name="INTERFACE_HARDWARE_CROSSTIMESTAMP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["InterfaceLuid", "CrossTimestamp"]), # 'NotifyIfTimestampConfigChange': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["CallerContext"]), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["CallerContext", "Callback", "NotificationHandle"]), # 'CancelIfTimestampConfigChange': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeBottom(label="Void"), arg_names=["NotificationHandle"]), # 'IpReleaseAddress': SimTypeFunction([SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo"]), # 'IpRenewAddress': SimTypeFunction([SimTypePointer(SimStruct({"Index": SimTypeInt(signed=False, label="UInt32"), "Name": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 128)}, name="IP_ADAPTER_INDEX_MAP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["AdapterInfo"]), # 'SendARP': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["DestIP", "SrcIP", "pMacAddr", "PhyAddrLen"]), # 'GetRTTAndHopCount': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["DestIpAddress", "HopCount", "MaxHops", "RTT"]), # 'GetFriendlyIfIndex': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["IfIndex"]), # 'EnableRouter': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pHandle", "pOverlapped"]), # 'UnenableRouter': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pOverlapped", "lpdwEnableCount"]), # 'DisableMediaSense': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pHandle", "pOverLapped"]), # 'RestoreMediaSense': SimTypeFunction([SimTypePointer(SimStruct({"Internal": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "InternalHigh": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Anonymous": SimUnion({"Anonymous": SimStruct({"Offset": SimTypeInt(signed=False, label="UInt32"), "OffsetHigh": SimTypeInt(signed=False, label="UInt32")}, name="_Anonymous_e__Struct", pack=False, align=None), "Pointer": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="<anon>", label="None"), "hEvent": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)}, name="OVERLAPPED", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pOverlapped", "lpdwEnableCount"]), # 'GetIpErrorString': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ErrorCode", "Buffer", "Size"]), # 'ResolveNeighbor': SimTypeFunction([SimTypePointer(SimStruct({"sa_family": SimTypeShort(signed=False, label="UInt16"), "sa_data": SimTypeFixedSizeArray(SimTypeBottom(label="CHAR"), 14)}, name="SOCKADDR", pack=False, align=None), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["NetworkAddress", "PhysicalAddress", "PhysicalAddressLength"]), # 'CreatePersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'CreatePersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'DeletePersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts"]), # 'DeletePersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16")], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts"]), # 'LookupPersistentTcpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'LookupPersistentUdpPortReservation': SimTypeFunction([SimTypeShort(signed=False, label="UInt16"), SimTypeShort(signed=False, label="UInt16"), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["StartPort", "NumberOfPorts", "Token"]), # 'PfCreateInterface': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="PFFORWARD_ACTION"), SimTypeInt(signed=False, label="PFFORWARD_ACTION"), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["dwName", "inAction", "outAction", "bUseLog", "bMustBeUnique", "ppInterface"]), # 'PfDeleteInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface"]), # 'PfAddFiltersToInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ih", "cInFilters", "pfiltIn", "cOutFilters", "pfiltOut", "pfHandle"]), # 'PfRemoveFiltersFromInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ih", "cInFilters", "pfiltIn", "cOutFilters", "pfiltOut"]), # 'PfRemoveFilterHandles': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeBottom(label="Void"), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "cFilters", "pvHandles"]), # 'PfUnBindInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface"]), # 'PfBindInterfaceToIndex': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="PFADDRESSTYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "dwIndex", "pfatLinkType", "LinkIPAddress"]), # 'PfBindInterfaceToIPAddress': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="PFADDRESSTYPE"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "pfatType", "IPAddress"]), # 'PfRebindFilters': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "Mask": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="PF_LATEBIND_INFO", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "pLateBindInfo"]), # 'PfAddGlobalFilterToInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="GLOBAL_FILTER")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "gfFilter"]), # 'PfRemoveGlobalFilterFromInterface': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="GLOBAL_FILTER")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "gfFilter"]), # 'PfMakeLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hEvent"]), # 'PfSetLogBuffer': SimTypeFunction([SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pbBuffer", "dwSize", "dwThreshold", "dwEntries", "pdwLoggedEntries", "pdwLostEntries", "pdwSizeUsed"]), # 'PfDeleteLog': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")), # 'PfGetInterfaceStatistics': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimStruct({"pvDriverContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "dwInDrops": SimTypeInt(signed=False, label="UInt32"), "dwOutDrops": SimTypeInt(signed=False, label="UInt32"), "eaInAction": SimTypeInt(signed=False, label="PFFORWARD_ACTION"), "eaOutAction": SimTypeInt(signed=False, label="PFFORWARD_ACTION"), "dwNumInFilters": SimTypeInt(signed=False, label="UInt32"), "dwNumOutFilters": SimTypeInt(signed=False, label="UInt32"), "dwFrag": SimTypeInt(signed=False, label="UInt32"), "dwSpoof": SimTypeInt(signed=False, label="UInt32"), "dwReserved1": SimTypeInt(signed=False, label="UInt32"), "dwReserved2": SimTypeInt(signed=False, label="UInt32"), "liSYN": SimTypeBottom(label="LARGE_INTEGER"), "liTotalLogged": SimTypeBottom(label="LARGE_INTEGER"), "dwLostLogEntries": SimTypeInt(signed=False, label="UInt32"), "FilterInfo": SimTypePointer(SimStruct({"dwNumPacketsFiltered": SimTypeInt(signed=False, label="UInt32"), "info": SimStruct({"dwFilterFlags": SimTypeInt(signed=False, label="UInt32"), "dwRule": SimTypeInt(signed=False, label="UInt32"), "pfatType": SimTypeInt(signed=False, label="PFADDRESSTYPE"), "SrcAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "SrcMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstAddr": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "DstMask": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "dwProtocol": SimTypeInt(signed=False, label="UInt32"), "fLateBound": SimTypeInt(signed=False, label="UInt32"), "wSrcPort": SimTypeShort(signed=False, label="UInt16"), "wDstPort": SimTypeShort(signed=False, label="UInt16"), "wSrcPortHighRange": SimTypeShort(signed=False, label="UInt16"), "wDstPortHighRange": SimTypeShort(signed=False, label="UInt16")}, name="PF_FILTER_DESCRIPTOR", pack=False, align=None)}, name="PF_FILTER_STATS", pack=False, align=None), offset=0)}, name="PF_INTERFACE_STATS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInterface", "ppfStats", "pdwBufferSize", "fResetCounters"]), # 'PfTestPacket': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="PFFORWARD_ACTION"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pInInterface", "pOutInterface", "cBytes", "pbPacket", "ppAction"]), } lib.set_prototypes(prototypes)
import os, configparser from celery import Celery from celery.schedules import crontab from parse import default from parse import custom __version__ = "2.0.0" config = configparser.RawConfigParser() config.read(os.environ.get("CFG_PATH")) default_config = dict(config.items("DEFAULT")) app = Celery("tasks") app.conf.update(default_config) @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): print("===== Start add tasks =====") for section in config.sections(): section_config = dict(config.items(section)) if "crontab" in section_config and section_config["crontab"]: print(f"Add {section} task(crontab).") crontab_info = {} if "minute" in section_config: crontab_info.update(minute=section_config["minute"]) if "hour" in section_config: crontab_info.update(hour=section_config["hour"]) if "day_of_week" in section_config: crontab_info.update(day_of_week=section_config["day_of_week"]) if "day_of_month" in section_config: crontab_info.update(day_of_month=section_config["day_of_month"]) if "month_of_year" in section_config: crontab_info.update(month_of_year=section_config["month_of_year"]) sender.add_periodic_task(crontab(**crontab_info), switch.s(section, section_config), name=f'RUN {section}') elif "seconds" in section_config: print(f"Add {section} task.") sender.add_periodic_task(float(section_config.get("seconds")), switch.s(section, section_config), name=f'RUN {section} every {section_config.get('seconds')} seconds') print("===== End add tasks =====") @app.task def switch(name, config): if "custom" in config and config["custom"]: return getattr(custom, name.lower())(config) return getattr(default, name.lower())(config)
import os, configparser from celery import Celery from celery.schedules import crontab from parse import default from parse import custom __version__ = "2.0.0" config = configparser.RawConfigParser() config.read(os.environ.get("CFG_PATH")) default_config = dict(config.items("DEFAULT")) app = Celery("tasks") app.conf.update(default_config) @app.on_after_configure.connect def setup_periodic_tasks(sender, **kwargs): print("===== Start add tasks =====") for section in config.sections(): section_config = dict(config.items(section)) if "crontab" in section_config and section_config["crontab"]: print(f"Add {section} task(crontab).") crontab_info = {} if "minute" in section_config: crontab_info.update(minute=section_config["minute"]) if "hour" in section_config: crontab_info.update(hour=section_config["hour"]) if "day_of_week" in section_config: crontab_info.update(day_of_week=section_config["day_of_week"]) if "day_of_month" in section_config: crontab_info.update(day_of_month=section_config["day_of_month"]) if "month_of_year" in section_config: crontab_info.update(month_of_year=section_config["month_of_year"]) sender.add_periodic_task(crontab(**crontab_info), switch.s(section, section_config), name=f'RUN {section}') elif "seconds" in section_config: print(f"Add {section} task.") sender.add_periodic_task(float(section_config.get("seconds")), switch.s(section, section_config), name=f'RUN {section} every {section_config.get("seconds")} seconds') print("===== End add tasks =====") @app.task def switch(name, config): if "custom" in config and config["custom"]: return getattr(custom, name.lower())(config) return getattr(default, name.lower())(config)
from typing import Iterable, Optional, Dict, Collection, Tuple, Sequence, Iterator, Any import operator from collections import namedtuple from enum import Enum from re import finditer from pyimath.annotations import BaseField, BaseNumber, Operand from pyimath.functions import gcd, reduce_to_gcd, power __all__ = ['Polynomial', 'symbolic_polynomial'] class Polynomial: """Represents a polynomial over a finite field or over the integers More generally, definition of polynomials over a ring is possible but not really recommended The internal representation of the coefficients uses a `dict` that indexes the coefficients by their degree. The usual definition of a polynomial as a sequence of numbers that are all zeroes from a certain index is used to initialize the polynomial on instantiation and can be retrieved through the `coefficients` property Moreover this class implements all the operations over a ring of polynomials `base_field` must represent an integral domain that is: - a set which is an abelian group for `+` and a semi-group for `*` and where `GCD` are computable. - a ring of polynomials is at least defined over an integral domain examples: Z, Z(i) """ def __init__(self, coeffs: Sequence[BaseNumber], base_field: BaseField, indeterminate: Optional[str] = 'X'): """`coeffs` is an iterable of elements from the base field, `base_field` an instance of what should generally be a finite field and `indeterminate` is a single digit string used to format the polynomial """ self.base_field = base_field assert hasattr(base_field, 'element') and hasattr(base_field, '__call__') self._coefficients = self._safe_convert_coefficients(self._remove_trailing_zeros(coeffs)) self.indeterminate = indeterminate def add(self, poly: 'Polynomial') -> 'Polynomial': """Returns the sum of two polynomials""" a = self._coefficients s = poly.copy for deg_a, c_a in a.items(): if deg_a in s.internal.keys(): s._set_term(deg_a, s[deg_a] + c_a) else: s._set_term(deg_a, c_a) return s def add_constant(self, k: BaseNumber) -> 'Polynomial': """External addition of a polynomial""" a = self._coefficients s = self.null if not self.is_null: for deg, c in a.items(): if deg == 0: s._set_term(deg, c + k) else: s._set_term(deg, c) else: s._set_term(0, k) return s def check_irreducibility(self) -> bool: """Returns True if the polynomial is irreducible Inspired by https://jeremykun.com/2014/03/13/programming-with-finite-fields/""" p = self.copy q = p.base_field.characteristic if q == 0: raise NotImplementedError(f'Cannot check polynomial irreducibility in {self.base_field}') x = p.monic(1) term = x.copy for _ in range(p.degree // 2): term = term ** q % p if not (term - x).is_null: if gcd(p, term - x).degree > 0: return False else: return False return True @property def coefficients(self) -> Collection: """Returns the coefficients as a list""" deg = 0 res = [] while deg <= self.degree: res.append(self[deg]) deg += 1 return res @property def constant(self) -> BaseNumber: """Returns the value of the coefficient of the term of degree 0""" if not self.is_null: return self[0] else: return self.base_field.zero @property def copy(self) -> 'Polynomial': """Returns a copy of itself""" res = self.null for deg, c in self.internal.items(): res._set_term(deg, c) return res @property def degree(self) -> int: """Returns the degree of the polynomial""" if self.is_null: return 0 # rigorously, it should be -infinity else: return max(self._coefficients.keys()) def evaluate(self, value: BaseNumber) -> BaseNumber: """Evaluate the polynomial for some value""" value = self.base_field.element(value) # f(k) = f(x) % (x-k) # f(x) = q(x)(x - k) + r (deg(r) < d(x-k) = 1 => deg(r)=0 # f(k) = r r = self % self.base_field.linear_polynomial(value) return r.trailing def formal_derivative(self) -> 'Polynomial': """Computes and returns the formal derivative of a polynomial""" res = self.null for deg, c in self.internal.items(): if deg > 0: res._set_term(deg - 1, self.base_field.ext_mul(deg, c)) return res def gcd(self, p: 'Polynomial') -> 'Polynomial': """Returns the GCD of two polynomials""" return gcd(self, p) def frobenius_reciprocal(self) -> 'Polynomial': """Returns a polynomial `R` if and only if this polynomial can be written as `R^(p*m)` where `R` is a polynomial, `p` the field characteristic and `m` an integer. Equivalent of taking the p-th root of a polynomial over a finite field Do not use this method if the base field/ring has a characteristic of zero""" assert self.base_field.characteristic > 0 if self.base_field.characteristic > 0: """if p is a power of a multiple of the field characteristic the function returns the base polynomial that is: if p == x^q where q % self.c == 0 it returns x^(q/self.c)""" assert hasattr(self.base_field, 'frobenius_reciprocal') p_th_root_func = self.base_field.frobenius_reciprocal if not self.formal_derivative().is_null: raise ValueError(f'The polynomial p is not a {self.base_field.characteristic} power') else: res = self.null for deg in range(0, self.degree + 1): if deg == 0: res += p_th_root_func(self.constant) else: if deg % self.base_field.characteristic == 0: term = self.monic(deg // self.base_field.characteristic) term *= p_th_root_func(self[deg]) res += term else: assert self[deg] == 0 return res else: raise RuntimeError(f'{self.base_field} does not support taking the p-th root of a polynomial') @property def internal(self) -> Dict[int, BaseNumber]: """Returns the coefficients as a `dict` indexed by their degree. Eschews null terms""" return dict({deg: c for deg, c in self._coefficients.items()}) @property def is_abs_unit(self) -> bool: """Returns `True` if the polynomial is a constant of constant term 1 or -1""" return self in (self.monic(0), -self.monic(0)) @property def is_constant(self) -> bool: """Returns `True` if the polynomial is of degree zero and is not null""" return self.degree == 0 and not self.is_null @property def is_irreducible(self) -> bool: """Returns `True` if the polynomial is irreducible""" return self.check_irreducibility() @property def is_monic(self) -> bool: """Returns `True` if the leading coefficient is one""" return self._coefficients[self.degree] == self.base_field.one @property def is_null(self) -> bool: """Returns `True` if all coefficients are zero""" return len(self._coefficients.keys()) == 0 @property def is_unit(self) -> bool: """Returns `True` if the polynomial is a constant of constant term 1""" return self == self.unit @property def leading(self) -> BaseNumber: """Returns the value of the coefficient of the term of highest degree""" if not self.is_null: return self._coefficients[self.degree] else: return self.base_field.zero def long_division(self, divisor: 'Polynomial') -> Tuple['Polynomial', 'Polynomial']: """Defines the long division according to decreasing degrees Be careful if the coefficients are from a ring""" quotient = self.null remainder = self.copy while remainder != self.null and remainder.degree >= divisor.degree: deg = remainder.degree - divisor.degree if remainder.leading % divisor.leading == self.base_field.zero: c = remainder.leading // divisor.leading else: raise ValueError(f'{remainder.leading} is not divisible by {divisor.leading}') if deg == 0: poly = self.unit.mul_constant(c) else: poly = self.monic(deg).mul_constant(c) quotient += poly remainder -= (divisor * poly) return quotient, remainder def long_division_reversed(self, divisor: 'Polynomial') -> Tuple['Polynomial', 'Polynomial']: """Defines the long division according to increasing degrees""" quotient = self.null remainder = self while remainder != self.null and remainder.valuation <= divisor.degree: deg = remainder.valuation - divisor.valuation if remainder.trailing % divisor.trailing == 0: c = self.base_field(remainder.trailing // divisor.trailing) else: raise ValueError(f'{remainder.trailing} is not divisible by {divisor.trailing}') if deg == 0: poly = self.unit.mul_constant(c) else: poly = self.monic(deg).mul_constant(c) quotient += poly remainder -= (divisor * poly) return quotient, remainder def make_monic(self) -> 'Polynomial': """ Attempts to divide the polynomial by its leading coefficient (for a field) or by the gcd of its coefficients (for a ring) Returns a monic polynomial or raises an error if the polynomial cannot be made monic """ if not self.is_monic: if self.base_field.characteristic != 0: return self / self.leading else: g = reduce_to_gcd(iter(self.coefficients)) if self.leading // g == self.base_field.one: return self // self.leading else: raise ValueError(f'Polynomial {self} over {self.base_field} cannot be made monic') else: return self.copy def monic(self, degree: int = 1) -> 'Polynomial': """Returns a monic polynomial with a single term of a given degree""" res = self.null res._set_term(degree, self.base_field.one) return res def mul(self, poly: 'Polynomial') -> 'Polynomial': """Multiplication in a ring of polynomials""" if poly.is_null or self.is_null: return self.null res = self.null for deg_p, c_p in poly.internal.items(): a = self._coefficients for deg_a, c_a in a.items(): deg = deg_a + deg_p if deg in res._coefficients.keys(): res._set_term(deg, res[deg] + c_p * c_a) else: res._set_term(deg, c_p * c_a) return res def mul_constant(self, k: BaseNumber) -> 'Polynomial': """External multiplication (vector space external product) of a polynomial and a constant""" s = self.null if k != self.base_field.zero: for deg, c in self._coefficients.items(): s._set_term(deg, k * c) return s @property def null(self) -> 'Polynomial': """Returns the null polynomial""" return Polynomial([], base_field=self.base_field, indeterminate=self.indeterminate) @staticmethod def parse(expr: str, base_field: BaseField, indeterminate: Optional[str] = 'X') -> 'Polynomial': """Returns a polynomial from its algebraic expression """ return symbolic_polynomial(expr, base_field, indeterminate=indeterminate) def pow(self, n: int) -> 'Polynomial': """Exponentiation of a polynomial""" assert n >= 0 if self.is_unit: return self.unit if n == 0: return self.unit if n == 1: return self.copy if self.is_null: return self.null return power(self.copy, n) def sub(self, poly: 'Polynomial') -> 'Polynomial': """Returns the difference between two polynomials""" assert isinstance(poly, Polynomial) # Subtraction is indeed just an addition of an inverse return self.add(-poly) @property def trailing(self) -> BaseNumber: """Returns the value of the coefficient of the term of lowest degree""" if self.is_null: return self.base_field.zero else: return self._coefficients[self.valuation] @property def unit(self) -> 'Polynomial': """Returns 1 as a polynomial of degree 0""" return self.monic(0) @property def valuation(self) -> int: """Returns the degree of the term of lowest degree""" if self.is_null: raise ValueError('The valuation of the null polynomial is undefined') return min(self._coefficients.keys()) def __eq__(self, other: Operand) -> bool: """Term-wise comparison of two polynomials""" if isinstance(other, Polynomial): if self.degree == other.degree: if self.is_null or other.is_null: return self.is_null and other.is_null a = self.coefficients b = other.coefficients assert len(a) == len(b) return a == b else: return False else: other = self(other) return self == other def __getitem__(self, degree: int) -> BaseNumber: """Returns the coefficient of the term of a given degree""" if degree in self._coefficients.keys(): return self._coefficients[degree] else: return self.base_field.zero def __len__(self) -> int: """Returns the number of non zero terms""" return len([c for c in self._coefficients.values() if c != 0]) def __add__(self, other: Operand) -> 'Polynomial': if isinstance(other, Polynomial): return self.add(other) elif isinstance(other, list): return self.add(Polynomial(other, base_field=self.base_field)) else: return self.add_constant(other) def __radd__(self, other: Operand) -> 'Polynomial': return self.__add__(other) def __neg__(self) -> 'Polynomial': """Returns the inverse of a polynomial with respect to addition""" a = self._coefficients s = self.null for deg, c in a.items(): s._set_term(deg, -c) return s def __mul__(self, other: Operand) -> 'Polynomial': if isinstance(other, Polynomial): return self.mul(other) elif isinstance(other, list): return self.mul(Polynomial(other, base_field=self.base_field)) else: return self.mul_constant(other) def __rmul__(self, other: Operand) -> 'Polynomial': return self.__mul__(other) def __sub__(self, other: Operand) -> 'Polynomial': if isinstance(other, (int, float,)): return self.add_constant(-other) elif isinstance(other, list): return self.sub(Polynomial(other, base_field=self.base_field)) else: return self.sub(other) def __pow__(self, n: int, modulo: 'Polynomial' = None) -> 'Polynomial': return self.pow(n) def __hash__(self) -> int: """Allows a polynomial to become a dictionary key""" return hash(tuple(self._coefficients.items())) def __truediv__(self, other: Operand) -> 'Polynomial': return self.__floordiv__(other) def __floordiv__(self, other: Operand) -> 'Polynomial': if isinstance(other, self.__class__): return self.long_division(other)[0] else: other = self.base_field.element(other) return self.mul_constant(self.base_field.one / other) def __divmod__(self, other: Operand) -> Tuple['Polynomial', 'Polynomial']: return self.long_division(other) def __mod__(self, other: Operand) -> 'Polynomial': return self.long_division(other)[1] def __repr__(self) -> str: s = f'{repr(self.base_field)}.polynomial(' s += f'{', '.join([repr(c) for c in self.coefficients])}, ' s += f'indeterminate="{self.indeterminate}")' return s def __str__(self) -> str: s = '' if self == self.null: return '0' for deg in sorted(self._coefficients.keys()): c = self[deg] if c != self.base_field.zero: if deg == 0: s += self._format_coefficient(c, raw=True) else: s += self._format_coefficient(c, len(s) > 0) s += f'{self.indeterminate}' if deg > 1: s += f'^{deg}' return s def __call__(self, *args) -> 'Polynomial': """Syntactic sugar to create a polynomial from another one example: p = poly(1, 2, 1) -> p == 1 + 2X + X^2""" return Polynomial(list(args), base_field=self.base_field, indeterminate=self.indeterminate) def __invert__(self) -> 'Polynomial': """Return the Frobenius reciprocal with the operator ~""" return self.frobenius_reciprocal() # Gory Details (as usual) def _format_coefficient(self, c: BaseNumber, display_plus_sign: bool = False, raw: bool = False) -> str: sf = '' if isinstance(c, int): if raw: sf = str(c) else: if display_plus_sign: if c < 0: sf += ' - ' else: sf += ' + ' if abs(c) != 1: sf += f'{abs(c)}' else: if c < 0: sf += '-' if abs(c) != 1: sf += f'{abs(c)}' else: # rely on the override of __format__ sc = format(c, 'short') if raw: sf = sc else: if display_plus_sign: if sc[0] == '-': sf += ' - ' else: sf += ' + ' # abs may not be defined but neg must be if c != self.base_field.one and c != self.base_field.one.__neg__(): if sc[0] == '-': sf += sc[1:] else: sf += sc else: if sc[0] == '-': sf += '-' if c != self.base_field.one and c != self.base_field.one.__neg__(): if sc[0] == '-': sf += sc[1:] else: sf += sc return sf def _remove_trailing_zeros(self, seq: Sequence) -> Collection: if len(seq) == 0: return [] revseq = list(reversed(seq)) while revseq[0] == self.base_field.zero: revseq = revseq[1:] if len(revseq) == 0: return [] return list(reversed(revseq)) def _safe_convert_coefficients(self, seq: Iterable) -> Dict[int, BaseNumber]: bf = self.base_field return dict({deg: bf.element(c) for deg, c in enumerate(seq) if c != bf.zero}) def _set_term(self, deg: int, c: BaseNumber): if c == self.base_field.zero: if deg in self._coefficients.keys(): del self._coefficients[deg] else: self._coefficients[deg] = c def symbolic_polynomial(expression: str, base_field: BaseField, indeterminate: Optional[str] = 'X'): """Returns a polynomial from its algebraic expression where: * `expression`is an algebraic expression in the `indeterminate`, * `base_field` is the field (or the ring) that coefficients are to be drawn from, * and `indeterminate` is a single digit string in the range [a-zA-Z], usually `'X'`. Returns an instance of `Polynomial`""" return PolynomialParser.parse(expression, base_field, indeterminate=indeterminate) """Lexer et Parser code follows, should not be exported""" class Lexer: INDETERMINATE = 'INDETERMINATE' INTEGER = 'INTEGER' OPERATOR = 'OPERATOR' EXPONENT = 'EXPONENT' SUBEXPR = 'SUBEXPR' EOF = 'EOF' IGNORE = 'IGNORE' MISMATCH = 'MISMATCH' class Token(namedtuple('Token', 'type, value, position')): __slots__ = () def __init__(self, indeterminate: Optional[str] = 'X', root_symbol: Optional[str] = 'j'): self.indeterminate = indeterminate self.root_symbol = root_symbol self.symbols = [ (Lexer.INDETERMINATE, r'[%s]' % self.indeterminate), (Lexer.INTEGER, r'[1-9][0-9]*'), (Lexer.OPERATOR, r'[+-]'), (Lexer.EXPONENT, r'[\^]'), (Lexer.SUBEXPR, r'\([^)]+\)'), (Lexer.EOF, r'$'), (Lexer.IGNORE, r'\s'), # must stay before the last item (Lexer.MISMATCH, r'.') # must stay the last item ] self.tokens_re = '|'.join([f'(?P<{tok}>{re})' for tok, re in self.symbols]) def lex(self, expression: str) -> Iterator['Lexer.Token']: for tok in finditer(self.tokens_re, expression): token = Lexer.Token(tok.lastgroup, tok.group(), tok.start()) if token.type == Lexer.IGNORE: continue elif token.type == Lexer.SUBEXPR: # remove left and right parentheses lexer = Lexer(indeterminate=self.root_symbol, root_symbol='') yield Lexer.Token(token.type, lexer.lex(token.value[1:-1], ), token.position) elif token.type == Lexer.INTEGER: yield Lexer.Token(token.type, int(token.value), token.position) elif token.type == Lexer.OPERATOR: if token.value == '+': yield Lexer.Token(token.type, operator.add, token.position) elif token.value == '-': yield Lexer.Token(token.type, operator.sub, token.position) else: yield token class ParsingContext: def __init__(self, base_field: BaseField, indeterminate: str): self.base_field = base_field self.indeterminate = indeterminate self._stack = [] self._stack.append(self.base_field.polynomial(self.base_field.zero, indeterminate=self.indeterminate)) def accumulate_neutral(self, *_): self._stack.append(self.base_field.neutral) def accumulate_token(self, tok: Lexer.Token, *_): self._stack.append(tok.value) def accumulate_element(self, tok: Lexer.Token, *_): self._stack.append(self.base_field(tok.value)) def accumulate_literal(self, _, v: Any): self._stack.append(v) def accumulate_subexpression(self, tok: Lexer.Token, *_): def convert_subexpr(subexpr): ctx = ParsingContext(self.base_field.prime_field, indeterminate=self.base_field.root_symbol) return self.base_field.element_from_polynomial(PolynomialParser.start(subexpr, ctx)) self._stack.append(convert_subexpr(tok.value)) def reduce(self, *_): try: (result, op, coefficient, degree), self._stack = self._stack[-4:], self._stack[:-4] self._stack.append(op(result, result.monic(degree).mul_constant(coefficient))) except BaseException as e: raise RuntimeError(e) def get_result(self) -> 'Polynomial': return self._stack.pop() class PolynomialParser: @staticmethod def parse(expression: str, base_field: BaseField, indeterminate: Optional[str] = 'X'): """Main parsing utility""" if hasattr(base_field, 'root_symbol'): lexer = Lexer(indeterminate=indeterminate, root_symbol=base_field.root_symbol) else: lexer = Lexer(indeterminate=indeterminate, root_symbol='') ctx = ParsingContext(base_field, indeterminate) return PolynomialParser.start(lexer.lex(expression), ctx) @staticmethod def start(lexer: Iterator, context: ParsingContext) -> Polynomial: def format_syntax_error(s, t): return f'Syntax error at {t.position}: unexpected {t} in state {s}' state = PolynomialParser.States.starting for token in lexer: if (state, token.type) in PolynomialParser.transitions: transition = PolynomialParser.transitions[(state, token.type)] for op, args in transition.actions: op(context, token, *args) state = transition.next_state else: raise SyntaxError(format_syntax_error(state, token)) assert state == PolynomialParser.States.complete return context.get_result() """Internals""" Transition = namedtuple('Transition', 'actions next_state') class States(Enum): starting = 0 coefficient = 1 indeterminate = 2 exponent = 3 sub_expression = 4 operator = 5 complete = 6 init = 7 transitions = { (States.starting, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_element, (),) ], States.coefficient), (States.starting, Lexer.SUBEXPR): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_subexpression, ()) ], States.coefficient), (States.starting, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_token, ()) ], States.operator), (States.starting, Lexer.INDETERMINATE): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_neutral, (),) ], States.indeterminate), (States.starting, Lexer.EOF): Transition([], States.complete), (States.coefficient, Lexer.INDETERMINATE): Transition([], States.indeterminate), (States.coefficient, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_literal, (0,)), (ParsingContext.reduce, ()), (ParsingContext.accumulate_token, ()), ], States.operator), (States.coefficient, Lexer.EOF): Transition([ (ParsingContext.accumulate_literal, (0,)), (ParsingContext.reduce, ()) ], States.complete), (States.operator, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_element, ()) ], States.coefficient), (States.operator, Lexer.SUBEXPR): Transition([ (ParsingContext.accumulate_subexpression, ()) ], States.coefficient), (States.operator, Lexer.INDETERMINATE): Transition([ (ParsingContext.accumulate_neutral, ()) ], States.indeterminate), (States.indeterminate, Lexer.EXPONENT): Transition([], States.exponent), (States.indeterminate, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_literal, (1,)), (ParsingContext.reduce, ()), (ParsingContext.accumulate_token, ()) ], States.operator), (States.indeterminate, Lexer.EOF): Transition([ (ParsingContext.accumulate_literal, (1,)), (ParsingContext.reduce, ()) ], States.complete), (States.exponent, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_token, ()), (ParsingContext.reduce, ()) ], States.init), (States.init, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_token, ()) ], States.operator), (States.init, Lexer.EOF): Transition([], States.complete) }
from typing import Iterable, Optional, Dict, Collection, Tuple, Sequence, Iterator, Any import operator from collections import namedtuple from enum import Enum from re import finditer from pyimath.annotations import BaseField, BaseNumber, Operand from pyimath.functions import gcd, reduce_to_gcd, power __all__ = ['Polynomial', 'symbolic_polynomial'] class Polynomial: """Represents a polynomial over a finite field or over the integers More generally, definition of polynomials over a ring is possible but not really recommended The internal representation of the coefficients uses a `dict` that indexes the coefficients by their degree. The usual definition of a polynomial as a sequence of numbers that are all zeroes from a certain index is used to initialize the polynomial on instantiation and can be retrieved through the `coefficients` property Moreover this class implements all the operations over a ring of polynomials `base_field` must represent an integral domain that is: - a set which is an abelian group for `+` and a semi-group for `*` and where `GCD` are computable. - a ring of polynomials is at least defined over an integral domain examples: Z, Z(i) """ def __init__(self, coeffs: Sequence[BaseNumber], base_field: BaseField, indeterminate: Optional[str] = 'X'): """`coeffs` is an iterable of elements from the base field, `base_field` an instance of what should generally be a finite field and `indeterminate` is a single digit string used to format the polynomial """ self.base_field = base_field assert hasattr(base_field, 'element') and hasattr(base_field, '__call__') self._coefficients = self._safe_convert_coefficients(self._remove_trailing_zeros(coeffs)) self.indeterminate = indeterminate def add(self, poly: 'Polynomial') -> 'Polynomial': """Returns the sum of two polynomials""" a = self._coefficients s = poly.copy for deg_a, c_a in a.items(): if deg_a in s.internal.keys(): s._set_term(deg_a, s[deg_a] + c_a) else: s._set_term(deg_a, c_a) return s def add_constant(self, k: BaseNumber) -> 'Polynomial': """External addition of a polynomial""" a = self._coefficients s = self.null if not self.is_null: for deg, c in a.items(): if deg == 0: s._set_term(deg, c + k) else: s._set_term(deg, c) else: s._set_term(0, k) return s def check_irreducibility(self) -> bool: """Returns True if the polynomial is irreducible Inspired by https://jeremykun.com/2014/03/13/programming-with-finite-fields/""" p = self.copy q = p.base_field.characteristic if q == 0: raise NotImplementedError(f'Cannot check polynomial irreducibility in {self.base_field}') x = p.monic(1) term = x.copy for _ in range(p.degree // 2): term = term ** q % p if not (term - x).is_null: if gcd(p, term - x).degree > 0: return False else: return False return True @property def coefficients(self) -> Collection: """Returns the coefficients as a list""" deg = 0 res = [] while deg <= self.degree: res.append(self[deg]) deg += 1 return res @property def constant(self) -> BaseNumber: """Returns the value of the coefficient of the term of degree 0""" if not self.is_null: return self[0] else: return self.base_field.zero @property def copy(self) -> 'Polynomial': """Returns a copy of itself""" res = self.null for deg, c in self.internal.items(): res._set_term(deg, c) return res @property def degree(self) -> int: """Returns the degree of the polynomial""" if self.is_null: return 0 # rigorously, it should be -infinity else: return max(self._coefficients.keys()) def evaluate(self, value: BaseNumber) -> BaseNumber: """Evaluate the polynomial for some value""" value = self.base_field.element(value) # f(k) = f(x) % (x-k) # f(x) = q(x)(x - k) + r (deg(r) < d(x-k) = 1 => deg(r)=0 # f(k) = r r = self % self.base_field.linear_polynomial(value) return r.trailing def formal_derivative(self) -> 'Polynomial': """Computes and returns the formal derivative of a polynomial""" res = self.null for deg, c in self.internal.items(): if deg > 0: res._set_term(deg - 1, self.base_field.ext_mul(deg, c)) return res def gcd(self, p: 'Polynomial') -> 'Polynomial': """Returns the GCD of two polynomials""" return gcd(self, p) def frobenius_reciprocal(self) -> 'Polynomial': """Returns a polynomial `R` if and only if this polynomial can be written as `R^(p*m)` where `R` is a polynomial, `p` the field characteristic and `m` an integer. Equivalent of taking the p-th root of a polynomial over a finite field Do not use this method if the base field/ring has a characteristic of zero""" assert self.base_field.characteristic > 0 if self.base_field.characteristic > 0: """if p is a power of a multiple of the field characteristic the function returns the base polynomial that is: if p == x^q where q % self.c == 0 it returns x^(q/self.c)""" assert hasattr(self.base_field, 'frobenius_reciprocal') p_th_root_func = self.base_field.frobenius_reciprocal if not self.formal_derivative().is_null: raise ValueError(f'The polynomial p is not a {self.base_field.characteristic} power') else: res = self.null for deg in range(0, self.degree + 1): if deg == 0: res += p_th_root_func(self.constant) else: if deg % self.base_field.characteristic == 0: term = self.monic(deg // self.base_field.characteristic) term *= p_th_root_func(self[deg]) res += term else: assert self[deg] == 0 return res else: raise RuntimeError(f'{self.base_field} does not support taking the p-th root of a polynomial') @property def internal(self) -> Dict[int, BaseNumber]: """Returns the coefficients as a `dict` indexed by their degree. Eschews null terms""" return dict({deg: c for deg, c in self._coefficients.items()}) @property def is_abs_unit(self) -> bool: """Returns `True` if the polynomial is a constant of constant term 1 or -1""" return self in (self.monic(0), -self.monic(0)) @property def is_constant(self) -> bool: """Returns `True` if the polynomial is of degree zero and is not null""" return self.degree == 0 and not self.is_null @property def is_irreducible(self) -> bool: """Returns `True` if the polynomial is irreducible""" return self.check_irreducibility() @property def is_monic(self) -> bool: """Returns `True` if the leading coefficient is one""" return self._coefficients[self.degree] == self.base_field.one @property def is_null(self) -> bool: """Returns `True` if all coefficients are zero""" return len(self._coefficients.keys()) == 0 @property def is_unit(self) -> bool: """Returns `True` if the polynomial is a constant of constant term 1""" return self == self.unit @property def leading(self) -> BaseNumber: """Returns the value of the coefficient of the term of highest degree""" if not self.is_null: return self._coefficients[self.degree] else: return self.base_field.zero def long_division(self, divisor: 'Polynomial') -> Tuple['Polynomial', 'Polynomial']: """Defines the long division according to decreasing degrees Be careful if the coefficients are from a ring""" quotient = self.null remainder = self.copy while remainder != self.null and remainder.degree >= divisor.degree: deg = remainder.degree - divisor.degree if remainder.leading % divisor.leading == self.base_field.zero: c = remainder.leading // divisor.leading else: raise ValueError(f'{remainder.leading} is not divisible by {divisor.leading}') if deg == 0: poly = self.unit.mul_constant(c) else: poly = self.monic(deg).mul_constant(c) quotient += poly remainder -= (divisor * poly) return quotient, remainder def long_division_reversed(self, divisor: 'Polynomial') -> Tuple['Polynomial', 'Polynomial']: """Defines the long division according to increasing degrees""" quotient = self.null remainder = self while remainder != self.null and remainder.valuation <= divisor.degree: deg = remainder.valuation - divisor.valuation if remainder.trailing % divisor.trailing == 0: c = self.base_field(remainder.trailing // divisor.trailing) else: raise ValueError(f'{remainder.trailing} is not divisible by {divisor.trailing}') if deg == 0: poly = self.unit.mul_constant(c) else: poly = self.monic(deg).mul_constant(c) quotient += poly remainder -= (divisor * poly) return quotient, remainder def make_monic(self) -> 'Polynomial': """ Attempts to divide the polynomial by its leading coefficient (for a field) or by the gcd of its coefficients (for a ring) Returns a monic polynomial or raises an error if the polynomial cannot be made monic """ if not self.is_monic: if self.base_field.characteristic != 0: return self / self.leading else: g = reduce_to_gcd(iter(self.coefficients)) if self.leading // g == self.base_field.one: return self // self.leading else: raise ValueError(f'Polynomial {self} over {self.base_field} cannot be made monic') else: return self.copy def monic(self, degree: int = 1) -> 'Polynomial': """Returns a monic polynomial with a single term of a given degree""" res = self.null res._set_term(degree, self.base_field.one) return res def mul(self, poly: 'Polynomial') -> 'Polynomial': """Multiplication in a ring of polynomials""" if poly.is_null or self.is_null: return self.null res = self.null for deg_p, c_p in poly.internal.items(): a = self._coefficients for deg_a, c_a in a.items(): deg = deg_a + deg_p if deg in res._coefficients.keys(): res._set_term(deg, res[deg] + c_p * c_a) else: res._set_term(deg, c_p * c_a) return res def mul_constant(self, k: BaseNumber) -> 'Polynomial': """External multiplication (vector space external product) of a polynomial and a constant""" s = self.null if k != self.base_field.zero: for deg, c in self._coefficients.items(): s._set_term(deg, k * c) return s @property def null(self) -> 'Polynomial': """Returns the null polynomial""" return Polynomial([], base_field=self.base_field, indeterminate=self.indeterminate) @staticmethod def parse(expr: str, base_field: BaseField, indeterminate: Optional[str] = 'X') -> 'Polynomial': """Returns a polynomial from its algebraic expression """ return symbolic_polynomial(expr, base_field, indeterminate=indeterminate) def pow(self, n: int) -> 'Polynomial': """Exponentiation of a polynomial""" assert n >= 0 if self.is_unit: return self.unit if n == 0: return self.unit if n == 1: return self.copy if self.is_null: return self.null return power(self.copy, n) def sub(self, poly: 'Polynomial') -> 'Polynomial': """Returns the difference between two polynomials""" assert isinstance(poly, Polynomial) # Subtraction is indeed just an addition of an inverse return self.add(-poly) @property def trailing(self) -> BaseNumber: """Returns the value of the coefficient of the term of lowest degree""" if self.is_null: return self.base_field.zero else: return self._coefficients[self.valuation] @property def unit(self) -> 'Polynomial': """Returns 1 as a polynomial of degree 0""" return self.monic(0) @property def valuation(self) -> int: """Returns the degree of the term of lowest degree""" if self.is_null: raise ValueError('The valuation of the null polynomial is undefined') return min(self._coefficients.keys()) def __eq__(self, other: Operand) -> bool: """Term-wise comparison of two polynomials""" if isinstance(other, Polynomial): if self.degree == other.degree: if self.is_null or other.is_null: return self.is_null and other.is_null a = self.coefficients b = other.coefficients assert len(a) == len(b) return a == b else: return False else: other = self(other) return self == other def __getitem__(self, degree: int) -> BaseNumber: """Returns the coefficient of the term of a given degree""" if degree in self._coefficients.keys(): return self._coefficients[degree] else: return self.base_field.zero def __len__(self) -> int: """Returns the number of non zero terms""" return len([c for c in self._coefficients.values() if c != 0]) def __add__(self, other: Operand) -> 'Polynomial': if isinstance(other, Polynomial): return self.add(other) elif isinstance(other, list): return self.add(Polynomial(other, base_field=self.base_field)) else: return self.add_constant(other) def __radd__(self, other: Operand) -> 'Polynomial': return self.__add__(other) def __neg__(self) -> 'Polynomial': """Returns the inverse of a polynomial with respect to addition""" a = self._coefficients s = self.null for deg, c in a.items(): s._set_term(deg, -c) return s def __mul__(self, other: Operand) -> 'Polynomial': if isinstance(other, Polynomial): return self.mul(other) elif isinstance(other, list): return self.mul(Polynomial(other, base_field=self.base_field)) else: return self.mul_constant(other) def __rmul__(self, other: Operand) -> 'Polynomial': return self.__mul__(other) def __sub__(self, other: Operand) -> 'Polynomial': if isinstance(other, (int, float,)): return self.add_constant(-other) elif isinstance(other, list): return self.sub(Polynomial(other, base_field=self.base_field)) else: return self.sub(other) def __pow__(self, n: int, modulo: 'Polynomial' = None) -> 'Polynomial': return self.pow(n) def __hash__(self) -> int: """Allows a polynomial to become a dictionary key""" return hash(tuple(self._coefficients.items())) def __truediv__(self, other: Operand) -> 'Polynomial': return self.__floordiv__(other) def __floordiv__(self, other: Operand) -> 'Polynomial': if isinstance(other, self.__class__): return self.long_division(other)[0] else: other = self.base_field.element(other) return self.mul_constant(self.base_field.one / other) def __divmod__(self, other: Operand) -> Tuple['Polynomial', 'Polynomial']: return self.long_division(other) def __mod__(self, other: Operand) -> 'Polynomial': return self.long_division(other)[1] def __repr__(self) -> str: s = f'{repr(self.base_field)}.polynomial(' s += f'{", ".join([repr(c) for c in self.coefficients])}, ' s += f'indeterminate="{self.indeterminate}")' return s def __str__(self) -> str: s = '' if self == self.null: return '0' for deg in sorted(self._coefficients.keys()): c = self[deg] if c != self.base_field.zero: if deg == 0: s += self._format_coefficient(c, raw=True) else: s += self._format_coefficient(c, len(s) > 0) s += f'{self.indeterminate}' if deg > 1: s += f'^{deg}' return s def __call__(self, *args) -> 'Polynomial': """Syntactic sugar to create a polynomial from another one example: p = poly(1, 2, 1) -> p == 1 + 2X + X^2""" return Polynomial(list(args), base_field=self.base_field, indeterminate=self.indeterminate) def __invert__(self) -> 'Polynomial': """Return the Frobenius reciprocal with the operator ~""" return self.frobenius_reciprocal() # Gory Details (as usual) def _format_coefficient(self, c: BaseNumber, display_plus_sign: bool = False, raw: bool = False) -> str: sf = '' if isinstance(c, int): if raw: sf = str(c) else: if display_plus_sign: if c < 0: sf += ' - ' else: sf += ' + ' if abs(c) != 1: sf += f'{abs(c)}' else: if c < 0: sf += '-' if abs(c) != 1: sf += f'{abs(c)}' else: # rely on the override of __format__ sc = format(c, 'short') if raw: sf = sc else: if display_plus_sign: if sc[0] == '-': sf += ' - ' else: sf += ' + ' # abs may not be defined but neg must be if c != self.base_field.one and c != self.base_field.one.__neg__(): if sc[0] == '-': sf += sc[1:] else: sf += sc else: if sc[0] == '-': sf += '-' if c != self.base_field.one and c != self.base_field.one.__neg__(): if sc[0] == '-': sf += sc[1:] else: sf += sc return sf def _remove_trailing_zeros(self, seq: Sequence) -> Collection: if len(seq) == 0: return [] revseq = list(reversed(seq)) while revseq[0] == self.base_field.zero: revseq = revseq[1:] if len(revseq) == 0: return [] return list(reversed(revseq)) def _safe_convert_coefficients(self, seq: Iterable) -> Dict[int, BaseNumber]: bf = self.base_field return dict({deg: bf.element(c) for deg, c in enumerate(seq) if c != bf.zero}) def _set_term(self, deg: int, c: BaseNumber): if c == self.base_field.zero: if deg in self._coefficients.keys(): del self._coefficients[deg] else: self._coefficients[deg] = c def symbolic_polynomial(expression: str, base_field: BaseField, indeterminate: Optional[str] = 'X'): """Returns a polynomial from its algebraic expression where: * `expression`is an algebraic expression in the `indeterminate`, * `base_field` is the field (or the ring) that coefficients are to be drawn from, * and `indeterminate` is a single digit string in the range [a-zA-Z], usually `'X'`. Returns an instance of `Polynomial`""" return PolynomialParser.parse(expression, base_field, indeterminate=indeterminate) """Lexer et Parser code follows, should not be exported""" class Lexer: INDETERMINATE = 'INDETERMINATE' INTEGER = 'INTEGER' OPERATOR = 'OPERATOR' EXPONENT = 'EXPONENT' SUBEXPR = 'SUBEXPR' EOF = 'EOF' IGNORE = 'IGNORE' MISMATCH = 'MISMATCH' class Token(namedtuple('Token', 'type, value, position')): __slots__ = () def __init__(self, indeterminate: Optional[str] = 'X', root_symbol: Optional[str] = 'j'): self.indeterminate = indeterminate self.root_symbol = root_symbol self.symbols = [ (Lexer.INDETERMINATE, r'[%s]' % self.indeterminate), (Lexer.INTEGER, r'[1-9][0-9]*'), (Lexer.OPERATOR, r'[+-]'), (Lexer.EXPONENT, r'[\^]'), (Lexer.SUBEXPR, r'\([^)]+\)'), (Lexer.EOF, r'$'), (Lexer.IGNORE, r'\s'), # must stay before the last item (Lexer.MISMATCH, r'.') # must stay the last item ] self.tokens_re = '|'.join([f'(?P<{tok}>{re})' for tok, re in self.symbols]) def lex(self, expression: str) -> Iterator['Lexer.Token']: for tok in finditer(self.tokens_re, expression): token = Lexer.Token(tok.lastgroup, tok.group(), tok.start()) if token.type == Lexer.IGNORE: continue elif token.type == Lexer.SUBEXPR: # remove left and right parentheses lexer = Lexer(indeterminate=self.root_symbol, root_symbol='') yield Lexer.Token(token.type, lexer.lex(token.value[1:-1], ), token.position) elif token.type == Lexer.INTEGER: yield Lexer.Token(token.type, int(token.value), token.position) elif token.type == Lexer.OPERATOR: if token.value == '+': yield Lexer.Token(token.type, operator.add, token.position) elif token.value == '-': yield Lexer.Token(token.type, operator.sub, token.position) else: yield token class ParsingContext: def __init__(self, base_field: BaseField, indeterminate: str): self.base_field = base_field self.indeterminate = indeterminate self._stack = [] self._stack.append(self.base_field.polynomial(self.base_field.zero, indeterminate=self.indeterminate)) def accumulate_neutral(self, *_): self._stack.append(self.base_field.neutral) def accumulate_token(self, tok: Lexer.Token, *_): self._stack.append(tok.value) def accumulate_element(self, tok: Lexer.Token, *_): self._stack.append(self.base_field(tok.value)) def accumulate_literal(self, _, v: Any): self._stack.append(v) def accumulate_subexpression(self, tok: Lexer.Token, *_): def convert_subexpr(subexpr): ctx = ParsingContext(self.base_field.prime_field, indeterminate=self.base_field.root_symbol) return self.base_field.element_from_polynomial(PolynomialParser.start(subexpr, ctx)) self._stack.append(convert_subexpr(tok.value)) def reduce(self, *_): try: (result, op, coefficient, degree), self._stack = self._stack[-4:], self._stack[:-4] self._stack.append(op(result, result.monic(degree).mul_constant(coefficient))) except BaseException as e: raise RuntimeError(e) def get_result(self) -> 'Polynomial': return self._stack.pop() class PolynomialParser: @staticmethod def parse(expression: str, base_field: BaseField, indeterminate: Optional[str] = 'X'): """Main parsing utility""" if hasattr(base_field, 'root_symbol'): lexer = Lexer(indeterminate=indeterminate, root_symbol=base_field.root_symbol) else: lexer = Lexer(indeterminate=indeterminate, root_symbol='') ctx = ParsingContext(base_field, indeterminate) return PolynomialParser.start(lexer.lex(expression), ctx) @staticmethod def start(lexer: Iterator, context: ParsingContext) -> Polynomial: def format_syntax_error(s, t): return f'Syntax error at {t.position}: unexpected {t} in state {s}' state = PolynomialParser.States.starting for token in lexer: if (state, token.type) in PolynomialParser.transitions: transition = PolynomialParser.transitions[(state, token.type)] for op, args in transition.actions: op(context, token, *args) state = transition.next_state else: raise SyntaxError(format_syntax_error(state, token)) assert state == PolynomialParser.States.complete return context.get_result() """Internals""" Transition = namedtuple('Transition', 'actions next_state') class States(Enum): starting = 0 coefficient = 1 indeterminate = 2 exponent = 3 sub_expression = 4 operator = 5 complete = 6 init = 7 transitions = { (States.starting, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_element, (),) ], States.coefficient), (States.starting, Lexer.SUBEXPR): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_subexpression, ()) ], States.coefficient), (States.starting, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_token, ()) ], States.operator), (States.starting, Lexer.INDETERMINATE): Transition([ (ParsingContext.accumulate_literal, (operator.add,)), (ParsingContext.accumulate_neutral, (),) ], States.indeterminate), (States.starting, Lexer.EOF): Transition([], States.complete), (States.coefficient, Lexer.INDETERMINATE): Transition([], States.indeterminate), (States.coefficient, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_literal, (0,)), (ParsingContext.reduce, ()), (ParsingContext.accumulate_token, ()), ], States.operator), (States.coefficient, Lexer.EOF): Transition([ (ParsingContext.accumulate_literal, (0,)), (ParsingContext.reduce, ()) ], States.complete), (States.operator, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_element, ()) ], States.coefficient), (States.operator, Lexer.SUBEXPR): Transition([ (ParsingContext.accumulate_subexpression, ()) ], States.coefficient), (States.operator, Lexer.INDETERMINATE): Transition([ (ParsingContext.accumulate_neutral, ()) ], States.indeterminate), (States.indeterminate, Lexer.EXPONENT): Transition([], States.exponent), (States.indeterminate, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_literal, (1,)), (ParsingContext.reduce, ()), (ParsingContext.accumulate_token, ()) ], States.operator), (States.indeterminate, Lexer.EOF): Transition([ (ParsingContext.accumulate_literal, (1,)), (ParsingContext.reduce, ()) ], States.complete), (States.exponent, Lexer.INTEGER): Transition([ (ParsingContext.accumulate_token, ()), (ParsingContext.reduce, ()) ], States.init), (States.init, Lexer.OPERATOR): Transition([ (ParsingContext.accumulate_token, ()) ], States.operator), (States.init, Lexer.EOF): Transition([], States.complete) }
import datetime from enum import Enum from itertools import islice from logging import getLogger from typing import Any, Dict, List, Optional, Tuple, Type, TypedDict from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.indexes import GinIndex from django.db import IntegrityError, models from django.db.models import Case, JSONField, Q, QuerySet, Sum from django.db.models.expressions import (F, OuterRef, RawSQL, Subquery, Value, When) from django.db.models.signals import post_save from hexbytes import HexBytes from model_utils.models import TimeStampedModel from gnosis.eth.constants import ERC20_721_TRANSFER_TOPIC from gnosis.eth.django.models import (EthereumAddressField, HexField, Sha3HashField, Uint256Field) from gnosis.safe import SafeOperation from gnosis.safe.safe_signature import SafeSignatureType from .utils import clean_receipt_log logger = getLogger(__name__) class ConfirmationType(Enum): CONFIRMATION = 0 EXECUTION = 1 class EthereumTxCallType(Enum): # https://ethereum.stackexchange.com/questions/63743/whats-the-difference-between-type-and-calltype-in-parity-trace CALL = 0 DELEGATE_CALL = 1 CALL_CODE = 2 STATIC_CALL = 3 @staticmethod def parse_call_type(call_type: Optional[str]): if not call_type: return None call_type = call_type.lower() if call_type == 'call': return EthereumTxCallType.CALL elif call_type == 'delegatecall': return EthereumTxCallType.DELEGATE_CALL elif call_type == 'callcode': return EthereumTxCallType.CALL_CODE elif call_type == 'staticcall': return EthereumTxCallType.STATIC_CALL else: return None class EthereumTxType(Enum): CALL = 0 CREATE = 1 SELF_DESTRUCT = 2 REWARD = 3 @staticmethod def parse(tx_type: str): tx_type = tx_type.upper() if tx_type == 'CALL': return EthereumTxType.CALL elif tx_type == 'CREATE': return EthereumTxType.CREATE elif tx_type == 'SUICIDE': return EthereumTxType.SELF_DESTRUCT elif tx_type == 'REWARD': return EthereumTxType.REWARD else: raise ValueError(f'{tx_type} is not a valid EthereumTxType') class TransferDict(TypedDict): block_number: int transaction_hash: HexBytes to: str _from: str value: int execution_date: datetime.datetime token_id: int token_address: str class BulkCreateSignalMixin: def bulk_create(self, objs, batch_size: Optional[int] = None, ignore_conflicts: bool = False): objs = list(objs) # If not it won't be iterate later result = super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts) for obj in objs: post_save.send(obj.__class__, instance=obj, created=True) return result def bulk_create_from_generator(self, objs, batch_size: int = 10000, ignore_conflicts: bool = False) -> int: """ Implementation in Django is not ok, as it will do `objs = list(objs)`. If objects come from a generator they will be brought to RAM. This approach is more friendly :return: Count of inserted elements """ assert batch_size is not None and batch_size > 0 total = 0 while True: if inserted := len(self.bulk_create(islice(objs, batch_size), ignore_conflicts=ignore_conflicts)): total += inserted else: return total # class EnsLabel(models.Model): # label_hash = Sha3HashField(unique=True) # Keccak of the label # label = models.CharField(max_length=20, blank=True) # def __str__(self): # return f'Label={self.label} with hash={self.label_hash}' class EthereumBlockManager(models.Manager): def get_or_create_from_block(self, block: Dict[str, Any], confirmed: bool = False): try: return self.get(number=block['number']) except self.model.DoesNotExist: return self.create_from_block(block, confirmed=confirmed) def create_from_block(self, block: Dict[str, Any], confirmed: bool = False) -> 'EthereumBlock': """ :param block: Block Dict returned by Web3 :param confirmed: If True we will not check for reorgs in the future :return: EthereumBlock model """ try: return super().create( number=block['number'], gas_limit=block['gasLimit'], gas_used=block['gasUsed'], timestamp=datetime.datetime.fromtimestamp(block['timestamp'], datetime.timezone.utc), block_hash=block['hash'], parent_hash=block['parentHash'], confirmed=confirmed, ) except IntegrityError: # The block could be created in the meantime by other task while the block was fetched from blockchain return self.get(number=block['number']) class EthereumBlockQuerySet(models.QuerySet): def not_confirmed(self, to_block_number: Optional[int] = None): queryset = self.filter(confirmed=False) if to_block_number is not None: queryset = queryset.filter(number__lte=to_block_number) return queryset.order_by('number') class EthereumBlock(models.Model): objects = EthereumBlockManager.from_queryset(EthereumBlockQuerySet)() number = models.PositiveIntegerField(primary_key=True) gas_limit = models.PositiveIntegerField() gas_used = models.PositiveIntegerField() timestamp = models.DateTimeField() block_hash = Sha3HashField(unique=True) parent_hash = Sha3HashField(unique=True) # For reorgs, True if `current_block_number` - `number` >= MIN_CONFIRMATIONS confirmed = models.BooleanField(default=False, db_index=True) def __str__(self): return f'Block number={self.number} on {self.timestamp}' def set_confirmed(self): self.confirmed = True self.save(update_fields=['confirmed']) class EthereumTxManager(models.Manager): def create_from_tx_dict(self, tx: Dict[str, Any], tx_receipt: Optional[Dict[str, Any]] = None, ethereum_block: Optional[EthereumBlock] = None) -> 'EthereumTx': data = HexBytes(tx.get('data') or tx.get('input')) return super().create( block=ethereum_block, tx_hash=HexBytes(tx['hash']).hex(), _from=tx['from'], gas=tx['gas'], gas_price=tx['gasPrice'], gas_used=tx_receipt and tx_receipt['gasUsed'], logs=tx_receipt and [clean_receipt_log(log) for log in tx_receipt.get('logs', list())], status=tx_receipt and tx_receipt.get('status'), transaction_index=tx_receipt and tx_receipt['transactionIndex'], data=data if data else None, nonce=tx['nonce'], to=tx.get('to'), value=tx['value'], ) class EthereumTx(TimeStampedModel): objects = EthereumTxManager() block = models.ForeignKey(EthereumBlock, on_delete=models.CASCADE, null=True, default=None, related_name='txs') # If mined tx_hash = Sha3HashField(primary_key=True) gas_used = Uint256Field(null=True, default=None) # If mined status = models.IntegerField(null=True, default=None, db_index=True) # If mined. Old txs don't have `status` logs = ArrayField(JSONField(), null=True, default=None) # If mined transaction_index = models.PositiveIntegerField(null=True, default=None) # If mined _from = EthereumAddressField(null=True, db_index=True) gas = Uint256Field() gas_price = Uint256Field() data = models.BinaryField(null=True) nonce = Uint256Field() to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() def __str__(self): return '{} status={} from={} to={}'.format(self.tx_hash, self.status, self._from, self.to) @property def execution_date(self) -> Optional[datetime.datetime]: if self.block_id is not None: return self.block.timestamp return None @property def success(self) -> Optional[bool]: if self.status is not None: return self.status == 1 def update_with_block_and_receipt(self, ethereum_block: 'EthereumBlock', tx_receipt: Dict[str, Any]): if self.block is None: self.block = ethereum_block self.gas_used = tx_receipt['gasUsed'] self.logs = [clean_receipt_log(log) for log in tx_receipt.get('logs', list())] self.status = tx_receipt.get('status') self.transaction_index = tx_receipt['transactionIndex'] return self.save(update_fields=['block', 'gas_used', 'logs', 'status', 'transaction_index']) class EthereumEventQuerySet(models.QuerySet): def not_erc_20_721_events(self): return self.exclude(topic=ERC20_721_TRANSFER_TOPIC) def erc20_and_721_events(self, token_address: Optional[str] = None, address: Optional[str] = None): queryset = self.filter(topic=ERC20_721_TRANSFER_TOPIC) if token_address: queryset = queryset.filter(address=token_address) if address: queryset = queryset.filter(Q(arguments__to=address) | Q(arguments__from=address)) return queryset def erc20_events(self, token_address: Optional[str] = None, address: Optional[str] = None): return self.erc20_and_721_events(token_address=token_address, address=address).filter(arguments__has_key='value') def erc721_events(self, token_address: Optional[str] = None, address: Optional[str] = None): return self.erc20_and_721_events(token_address=token_address, address=address).filter(arguments__has_key='tokenId') def erc721_owned_by(self, address: str) -> List[Tuple[str, int]]: """ Returns erc721 owned by address, removing the ones sent :return: List of tuples(token_address: str, token_id: int) """ # Get all the token history erc721_events = self.erc721_events(address=address) # Get tokens received and remove tokens transferred tokens_in: Tuple[str, int] = [] tokens_out: Tuple[str, int] = [] for erc721_event in erc721_events: token_address = erc721_event.address token_id = erc721_event.arguments.get('tokenId') if token_id is None: logger.error('TokenId for ERC721 info token=%s with owner=%s can never be None', token_address, address) continue if erc721_event.arguments.get('to') == address: list_to_append = tokens_in else: list_to_append = tokens_out list_to_append.append((token_address, token_id)) for token_out in tokens_out: # Remove tokens sent from list if token_out in tokens_in: tokens_in.remove(token_out) return tokens_in class EthereumEventManager(BulkCreateSignalMixin, models.Manager): def from_decoded_event(self, decoded_event: Dict[str, Any]) -> 'EthereumEvent': """ Does not create the model. Requires that `ethereum_tx` exists :param decoded_event: :return: `EthereumEvent` instance (not stored in database) """ return EthereumEvent(ethereum_tx_id=decoded_event['transactionHash'], log_index=decoded_event['logIndex'], address=decoded_event['address'], topic=decoded_event['topics'][0], topics=decoded_event['topics'], arguments=decoded_event['args']) def erc20_tokens_used_by_address(self, address: str) -> List[str]: """ :param address: :return: List of token addresses used by an address """ # return self.erc20_events(address=address).values_list('address', flat=True).distinct() address_as_postgres_text = f'"{address}"' events = self.raw("""SELECT DISTINCT "id", "address" FROM "history_ethereumevent" WHERE ("topic" = %s AND (("arguments" -> 'to')::text = %s OR ("arguments" -> 'from')::text = %s) AND "arguments" ? 'value') """, [ERC20_721_TRANSFER_TOPIC[2:], address_as_postgres_text, address_as_postgres_text]) return [event.address for event in events] def erc721_tokens_used_by_address(self, address: str) -> List[str]: """ :param address: :return: List of token addresses used by an address """ # return self.erc721_events(address=address).values_list('address', flat=True).distinct() address_as_postgres_text = f'"{address}"' events = self.raw("""SELECT DISTINCT "id", "address" FROM "history_ethereumevent" WHERE ("topic" = '%s' AND (("arguments" -> 'to')::text = '"%s"' OR ("arguments" -> 'from')::text = '"%s"') AND "arguments" ? 'tokenId') """, [ERC20_721_TRANSFER_TOPIC[2:], address_as_postgres_text, address_as_postgres_text]) return [event.address for event in events] def erc20_tokens_with_balance(self, address: str) -> List[Dict[str, Any]]: """ :return: List of dictionaries {'token_address': str, 'balance': int} """ arguments_value_field = RawSQL("(arguments->>'value')::numeric", ()) return self.erc20_events( address=address ).values('address').annotate( balance=Sum(Case( When(arguments__from=address, then=-arguments_value_field), default=arguments_value_field, )) ).order_by('-balance').values('address', 'balance') def get_or_create_erc20_or_721_event(self, decoded_event: Dict[str, Any]): if 'value' not in decoded_event['args'] or 'tokenId' not in decoded_event['args']: raise ValueError('Invalid ERC20 or ERC721 event %s' % decoded_event) else: return self.get_or_create(ethereum_tx_id=decoded_event['transactionHash'], log_index=decoded_event['logIndex'], defaults={'address': decoded_event['address'], 'topic': decoded_event['topics'][0], 'topics': decoded_event['topics'], 'arguments': decoded_event['args'], }) class EthereumEvent(models.Model): objects = EthereumEventManager.from_queryset(EthereumEventQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='events') log_index = models.PositiveIntegerField() address = EthereumAddressField(db_index=True) topic = Sha3HashField(db_index=True) topics = ArrayField(Sha3HashField()) arguments = JSONField() class Meta: unique_together = (('ethereum_tx', 'log_index'),) indexes = [GinIndex(fields=['arguments'])] # There are also 2 indexes created manually by 0026 migration, both Btree for arguments->to and arguments->from # To use that indexes json queries must be rewritten to use `::text` fields def __str__(self): return f'Tx-hash={self.ethereum_tx_id} Log-index={self.log_index} Topic={self.topic} Arguments={self.arguments}' @property def created(self): return self.ethereum_tx.block.timestamp def is_erc20(self) -> bool: return self.topic == ERC20_721_TRANSFER_TOPIC and 'value' in self.arguments and 'to' in self.arguments def is_erc721(self) -> bool: return self.topic == ERC20_721_TRANSFER_TOPIC and 'tokenId' in self.arguments and 'to' in self.arguments class InternalTxManager(BulkCreateSignalMixin, models.Manager): def _trace_address_to_str(self, trace_address) -> str: return ','.join([str(address) for address in trace_address]) def build_from_trace(self, trace: Dict[str, Any], ethereum_tx: EthereumTx) -> 'InternalTx': """ Build a InternalTx object from trace, but it doesn't insert it on database :param trace: :param ethereum_tx: :return: InternalTx not inserted """ data = trace['action'].get('input') or trace['action'].get('init') tx_type = EthereumTxType.parse(trace['type']) call_type = EthereumTxCallType.parse_call_type(trace['action'].get('callType')) trace_address_str = self._trace_address_to_str(trace['traceAddress']) return InternalTx( ethereum_tx=ethereum_tx, trace_address=trace_address_str, _from=trace['action'].get('from'), gas=trace['action'].get('gas', 0), data=data if data else None, to=trace['action'].get('to') or trace['action'].get('address'), value=trace['action'].get('value') or trace['action'].get('balance', 0), gas_used=(trace.get('result') or {}).get('gasUsed', 0), contract_address=(trace.get('result') or {}).get('address'), code=(trace.get('result') or {}).get('code'), output=(trace.get('result') or {}).get('output'), refund_address=trace['action'].get('refundAddress'), tx_type=tx_type.value, call_type=call_type.value if call_type else None, error=trace.get('error') ) def get_or_create_from_trace(self, trace: Dict[str, Any], ethereum_tx: EthereumTx) -> Tuple['InternalTx', bool]: tx_type = EthereumTxType.parse(trace['type']) call_type = EthereumTxCallType.parse_call_type(trace['action'].get('callType')) trace_address_str = self._trace_address_to_str(trace['traceAddress']) return self.get_or_create( ethereum_tx=ethereum_tx, trace_address=trace_address_str, defaults={ '_from': trace['action'].get('from'), 'gas': trace['action'].get('gas', 0), 'data': trace['action'].get('input') or trace['action'].get('init'), 'to': trace['action'].get('to') or trace['action'].get('address'), 'value': trace['action'].get('value') or trace['action'].get('balance', 0), 'gas_used': (trace.get('result') or {}).get('gasUsed', 0), 'contract_address': (trace.get('result') or {}).get('address'), 'code': (trace.get('result') or {}).get('code'), 'output': (trace.get('result') or {}).get('output'), 'refund_address': trace['action'].get('refundAddress'), 'tx_type': tx_type.value, 'call_type': call_type.value if call_type else None, 'error': trace.get('error'), } ) class InternalTxQuerySet(models.QuerySet): def ether_txs(self): return self.filter( call_type=EthereumTxCallType.CALL.value, value__gt=0 ).annotate( transaction_hash=F('ethereum_tx_id'), block_number=F('ethereum_tx__block_id'), execution_date=F('ethereum_tx__block__timestamp'), token_id=Value(None, output_field=Uint256Field()), token_address=Value(None, output_field=EthereumAddressField()), ).order_by('-ethereum_tx__block_id') def ether_txs_for_address(self, address: str): return self.ether_txs().filter(Q(to=address) | Q(_from=address)) def ether_incoming_txs_for_address(self, address: str): return self.ether_txs().filter(to=address) def token_txs(self): return EthereumEvent.objects.erc20_and_721_events().annotate( to=RawSQL("arguments->>%s", ('to',)), # Order is really important! _from=RawSQL("arguments->>%s", ('from',)), value=RawSQL("(arguments->>%s)::numeric", ('value',)), transaction_hash=F('ethereum_tx_id'), block_number=F('ethereum_tx__block_id'), execution_date=F('ethereum_tx__block__timestamp'), token_id=RawSQL("(arguments->>%s)::numeric", ('tokenId',)), token_address=F('address') ).order_by('-ethereum_tx__block_id') def token_txs_for_address(self, address: str): return self.token_txs().filter(Q(arguments__to=address) | Q(arguments__from=address)) def token_incoming_txs_for_address(self, address: str): return self.token_txs().filter(arguments__to=address) def ether_and_token_txs(self, address: str): tokens_queryset = self.token_txs_for_address(address) ether_queryset = self.ether_txs_for_address(address) return self.union_ether_and_token_txs(tokens_queryset, ether_queryset) def ether_and_token_incoming_txs(self, address: str): tokens_queryset = self.token_incoming_txs_for_address(address) ether_queryset = self.ether_incoming_txs_for_address(address) return self.union_ether_and_token_txs(tokens_queryset, ether_queryset) def union_ether_and_token_txs(self, tokens_queryset: QuerySet, ether_queryset: QuerySet) -> TransferDict: values = ('block_number', 'transaction_hash', 'to', '_from', 'value', 'execution_date', 'token_id', 'token_address') return ether_queryset.values(*values).union(tokens_queryset.values(*values)).order_by('-block_number') def can_be_decoded(self): """ Every InternalTx can be decoded if: - Has data - InternalTx is not errored - EthereumTx is successful (not reverted or out of gas) - CallType is a DELEGATE_CALL (to the master copy contract) - Not already decoded :return: Txs that can be decoded """ return self.exclude( data=None ).filter( call_type=EthereumTxCallType.DELEGATE_CALL.value, error=None, ethereum_tx__status=1, decoded_tx=None, ) class InternalTx(models.Model): objects = InternalTxManager.from_queryset(InternalTxQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='internal_txs') _from = EthereumAddressField(null=True, db_index=True) # For SELF-DESTRUCT it can be null gas = Uint256Field() data = models.BinaryField(null=True) # `input` for Call, `init` for Create to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() gas_used = Uint256Field() contract_address = EthereumAddressField(null=True, db_index=True) # Create code = models.BinaryField(null=True) # Create output = models.BinaryField(null=True) # Call refund_address = EthereumAddressField(null=True, db_index=True) # For SELF-DESTRUCT tx_type = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in EthereumTxType], db_index=True) call_type = models.PositiveSmallIntegerField(null=True, choices=[(tag.value, tag.name) for tag in EthereumTxCallType], db_index=True) # Call trace_address = models.CharField(max_length=600) # Stringified traceAddress error = models.CharField(max_length=200, null=True) class Meta: unique_together = (('ethereum_tx', 'trace_address'),) def __str__(self): if self.to: return 'Internal tx hash={} from={} to={}'.format(self.ethereum_tx_id, self._from, self.to) else: return 'Internal tx hash={} from={}'.format(self.ethereum_tx_id, self._from) @property def block_number(self): return self.ethereum_tx.block_id @property def created(self): return self.ethereum_tx.block.timestamp @property def can_be_decoded(self) -> bool: return bool(self.is_delegate_call and not self.error and self.data and self.ethereum_tx.success) @property def is_call(self): return EthereumTxType(self.tx_type) == EthereumTxType.CALL @property def is_create(self): return EthereumTxType(self.tx_type) == EthereumTxType.CREATE @property def is_decoded(self): try: return bool(self.decoded_tx) except InternalTxDecoded.DoesNotExist: return False @property def is_delegate_call(self) -> bool: if self.call_type is None: return False else: return EthereumTxCallType(self.call_type) == EthereumTxCallType.DELEGATE_CALL @property def is_ether_transfer(self) -> bool: return self.call_type == EthereumTxCallType.CALL.value and self.value > 0 @property def is_relevant(self): return self.can_be_decoded or self.is_ether_transfer or self.contract_address @property def trace_address_as_list(self) -> List[int]: return [int(x) for x in self.trace_address.split(',')] class InternalTxDecodedManager(BulkCreateSignalMixin, models.Manager): pass class InternalTxDecodedQuerySet(models.QuerySet): def for_indexed_safes(self): return self.filter( Q(internal_tx___from__in=SafeContract.objects.values('address')) # Just Safes indexed | Q(function_name='setup') # This way we can index new Safes without events ) def not_processed(self): return self.filter(processed=False) def order_by_processing_queue(self): """ :return: Transactions ordered to be processed. First older transactions """ return self.order_by( 'internal_tx__ethereum_tx__block_id', 'internal_tx__ethereum_tx__transaction_index', 'internal_tx__trace_address', ) def pending_for_safes(self): """ :return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block """ return self.not_processed( ).for_indexed_safes( ).select_related( 'internal_tx__ethereum_tx__block', ).order_by_processing_queue() def pending_for_safe(self, safe_address: str): """ :return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block """ return self.pending_for_safes().filter(internal_tx___from=safe_address) def safes_pending_to_be_processed(self) -> QuerySet: """ :return: List of Safe addresses that have transactions pending to be processed """ return self.not_processed().for_indexed_safes().values_list( 'internal_tx___from', flat=True ).distinct('internal_tx___from') class InternalTxDecoded(models.Model): objects = InternalTxDecodedManager.from_queryset(InternalTxDecodedQuerySet)() internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='decoded_tx', primary_key=True) function_name = models.CharField(max_length=256, db_index=True) arguments = JSONField() processed = models.BooleanField(default=False) class Meta: indexes = [ models.Index(name='history_decoded_processed_idx', fields=['processed'], condition=Q(processed=False)) ] verbose_name_plural = 'Internal txs decoded' def __str__(self): return f'{'Processed' if self.processed else 'Not Processed'} ' \ f'fn-name={self.function_name} with arguments={self.arguments}' @property def address(self) -> str: return self.internal_tx._from @property def block_number(self) -> Type[int]: return self.internal_tx.ethereum_tx.block_id @property def tx_hash(self) -> Type[int]: return self.internal_tx.ethereum_tx_id def set_processed(self): self.processed = True return self.save(update_fields=['processed']) class MultisigTransactionManager(models.Manager): def last_nonce(self, safe: str) -> Optional[int]: """ :param safe: :return: nonce of the last executed and mined transaction. It will be None if there's no transactions or none of them is mined """ nonce_query = self.filter(safe=safe).exclude(ethereum_tx=None).order_by('-nonce').values('nonce').first() if nonce_query: return nonce_query['nonce'] def last_valid_transaction(self, safe: str) -> Optional['MultisigTransaction']: """ Find last transaction where signers match the owners registered for that Safe. Transactions out of sync have an invalid `safeNonce`, so `safeTxHash` is not valid and owners recovered from the signatures wouldn't be valid. We exclude `Approved hashes` and `Contract signatures` as that owners are not retrieved using the signature, so they will show the right owner even if `safeNonce` is not valid :param safe: :return: Last valid indexed transaction mined """ # Build list of every owner known for that Safe (even if it was deleted/replaced). Changes of collision for # invalid recovered owners from signatures are almost impossible owners_set = set() for owners_list in SafeStatus.objects.filter(address=safe).values_list('owners', flat=True).distinct(): owners_set.update(owners_list) return MultisigTransaction.objects.filter( safe=safe, confirmations__owner__in=owners_set, confirmations__signature_type__in=[SafeSignatureType.EOA.value, SafeSignatureType.ETH_SIGN.value] ).exclude( ethereum_tx=None ).order_by('-nonce').first() class MultisigTransactionQuerySet(models.QuerySet): def executed(self): return self.exclude( ethereum_tx__block=None ) def not_executed(self): return self.filter( ethereum_tx__block=None ) def with_confirmations(self): return self.exclude( confirmations__isnull=True ) def without_confirmations(self): return self.filter( confirmations__isnull=True ) def with_confirmations_required(self): """ Add confirmations required for execution when the tx was mined (threshold of the Safe at that point) :return: queryset with `confirmations_required: int` field """ threshold_query = SafeStatus.objects.filter( internal_tx__ethereum_tx=OuterRef('ethereum_tx') ).sorted_reverse_by_internal_tx().values('threshold') return self.annotate(confirmations_required=Subquery(threshold_query[:1])) class MultisigTransaction(TimeStampedModel): objects = MultisigTransactionManager.from_queryset(MultisigTransactionQuerySet)() safe_tx_hash = Sha3HashField(primary_key=True) safe = EthereumAddressField(db_index=True) ethereum_tx = models.ForeignKey(EthereumTx, null=True, default=None, blank=True, on_delete=models.SET_NULL, related_name='multisig_txs') to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() data = models.BinaryField(null=True) operation = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeOperation]) safe_tx_gas = Uint256Field() base_gas = Uint256Field() gas_price = Uint256Field() gas_token = EthereumAddressField(null=True) refund_receiver = EthereumAddressField(null=True) signatures = models.BinaryField(null=True) # When tx is executed nonce = Uint256Field(db_index=True) failed = models.BooleanField(null=True, default=None, db_index=True) origin = models.CharField(null=True, default=None, max_length=100) # To store arbitrary data on the tx trusted = models.BooleanField(default=False, db_index=True) # Txs proposed by a delegate or with one confirmation def __str__(self): return f'{self.safe} - {self.nonce} - {self.safe_tx_hash}' @property def execution_date(self) -> Optional[datetime.datetime]: if self.ethereum_tx_id and self.ethereum_tx.block_id is not None: return self.ethereum_tx.block.timestamp return None @property def executed(self) -> bool: return bool(self.ethereum_tx_id and (self.ethereum_tx.block_id is not None)) def owners(self) -> Optional[List[str]]: if not self.signatures: return None else: # TODO Get owners from signatures. Not very trivial return [] class ModuleTransaction(TimeStampedModel): internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='module_tx', primary_key=True) safe = EthereumAddressField(db_index=True) # Just for convenience, it could be retrieved from `internal_tx` module = EthereumAddressField(db_index=True) # Just for convenience, it could be retrieved from `internal_tx` to = EthereumAddressField(db_index=True) value = Uint256Field() data = models.BinaryField(null=True) operation = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeOperation]) failed = models.BooleanField(default=False) def __str__(self): if self.value: return f'{self.safe} - {self.to} - {self.value}' else: return f'{self.safe} - {self.to} - {HexBytes(self.data.tobytes()).hex()[:8]}' @property def execution_date(self) -> Optional[datetime.datetime]: if self.internal_tx.ethereum_tx_id and self.internal_tx.ethereum_tx.block_id is not None: return self.internal_tx.ethereum_tx.block.timestamp return None class MultisigConfirmationManager(models.Manager): def remove_unused_confirmations(self, safe: str, current_safe_none: int, owner: str) -> int: """ :return: Remove confirmations for not executed transactions with nonce higher or equal than the current Safe nonce for a Safe and an owner (as an owner can be an owner of multiple Safes). Used when an owner is removed from the Safe. """ return self.filter( multisig_transaction__ethereum_tx=None, # Not executed multisig_transaction__safe=safe, multisig_transaction__nonce__gte=current_safe_none, owner=owner, ).delete()[0] class MultisigConfirmationQuerySet(models.QuerySet): def without_transaction(self): return self.filter(multisig_transaction=None) def with_transaction(self): return self.exclude(multisig_transaction=None) class MultisigConfirmation(TimeStampedModel): objects = MultisigConfirmationManager.from_queryset(MultisigConfirmationQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='multisig_confirmations', null=True) # `null=True` for signature confirmations multisig_transaction = models.ForeignKey(MultisigTransaction, on_delete=models.CASCADE, null=True, related_name='confirmations') multisig_transaction_hash = Sha3HashField(null=True, db_index=True) # Use this while we don't have a `multisig_transaction` owner = EthereumAddressField() signature = HexField(null=True, default=None, max_length=2000) signature_type = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeSignatureType], db_index=True) class Meta: unique_together = (('multisig_transaction_hash', 'owner'),) ordering = ['created'] def __str__(self): if self.multisig_transaction_id: return f'Confirmation of owner={self.owner} for transaction-hash={self.multisig_transaction_hash}' else: return f'Confirmation of owner={self.owner} for existing transaction={self.multisig_transaction_hash}' class MonitoredAddressManager(models.Manager): def update_addresses(self, addresses: List[str], from_block_number: int, block_number: int, database_field: str) -> int: """ :param addresses: Addresses to have the block number updated :param from_block_number: Make sure that no reorg has happened checking that block number was not rollbacked :param block_number: Block number to be updated :param database_field: Database field to store the block number :return: Number of entities updated """ return self.filter( **{'address__in': addresses, database_field + '__gte': from_block_number - 1, # Protect in case of reorg } ).update(**{database_field: block_number}) class MonitoredAddressQuerySet(models.QuerySet): def almost_updated(self, database_field: str, current_block_number: int, updated_blocks_behind: int, confirmations: int): return self.filter( **{database_field + '__lt': current_block_number - confirmations, database_field + '__gt': current_block_number - updated_blocks_behind}) def not_updated(self, database_field: str, current_block_number: int, confirmations: int): return self.filter( **{database_field + '__lt': current_block_number - confirmations} ) class MonitoredAddress(models.Model): objects = MonitoredAddressManager.from_queryset(MonitoredAddressQuerySet)() address = EthereumAddressField(primary_key=True) initial_block_number = models.IntegerField(default=0) # Block number when address received first tx tx_block_number = models.IntegerField(null=True, default=None, db_index=True) # Block number when last internal tx scan ended class Meta: abstract = True verbose_name_plural = 'Monitored addresses' def __str__(self): return f'Address={self.address} - Initial-block-number={self.initial_block_number}' \ f' - Tx-block-number={self.tx_block_number}' class ProxyFactory(MonitoredAddress): class Meta: verbose_name_plural = 'Proxy factories' ordering = ['tx_block_number'] class SafeMasterCopy(MonitoredAddress): version = models.CharField(max_length=20) class Meta: verbose_name_plural = 'Safe master copies' ordering = ['tx_block_number'] class SafeContractManager(MonitoredAddressManager): pass class SafeContract(models.Model): objects = SafeContractManager.from_queryset(MonitoredAddressQuerySet)() address = EthereumAddressField(primary_key=True) ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='safe_contracts') erc20_block_number = models.IntegerField(default=0, db_index=True) # Block number of last scan of erc20 def __str__(self): return f'Safe address={self.address} - ethereum-tx={self.ethereum_tx_id}' @property def created_block_number(self) -> Optional[Type[int]]: if self.ethereum_tx: return self.ethereum_tx.block_id class SafeContractDelegateManager(models.Manager): def get_delegates_for_safe(self, address: str) -> List[str]: return list(self.filter(safe_contract_id=address).values_list('delegate', flat=True)) class SafeContractDelegate(models.Model): """ The owners of the Safe can add users so they can propose/retrieve txs as if they were the owners of the Safe """ objects = SafeContractDelegateManager() safe_contract = models.ForeignKey(SafeContract, on_delete=models.CASCADE, related_name='safe_contract_delegates') delegate = EthereumAddressField() delegator = EthereumAddressField() # Owner who created the delegate label = models.CharField(max_length=50) read = models.BooleanField(default=True) # For permissions in the future write = models.BooleanField(default=True) class Meta: unique_together = (('safe_contract', 'delegate'),) def __str__(self): return f'Delegate={self.delegate} for Safe={self.safe_contract_id} - Label={self.label}' class SafeStatusManager(models.Manager): pass class SafeStatusQuerySet(models.QuerySet): def sorted_by_internal_tx(self): return self.order_by( 'address', '-internal_tx__ethereum_tx__block_id', '-internal_tx__ethereum_tx__transaction_index', '-internal_tx__trace_address', ) def sorted_reverse_by_internal_tx(self): return self.order_by( 'address', 'internal_tx__ethereum_tx__block_id', 'internal_tx__ethereum_tx__transaction_index', 'internal_tx__trace_address', ) def addresses_for_owner(self, owner_address: str) -> List[str]: return self.filter( owners__contains=[owner_address], internal_tx__in=self.last_for_every_address().values('pk') ).values_list('address', flat=True) def last_for_every_address(self) -> QuerySet: return self.distinct( 'address' # Uses PostgreSQL `DISTINCT ON` ).select_related( 'internal_tx__ethereum_tx' ).sorted_by_internal_tx() def last_for_address(self, address: str) -> Optional['SafeStatus']: safe_status = self.last_for_every_address().filter( address=address ).first() if not safe_status: logger.error('SafeStatus not found for address=%s', address) return safe_status class SafeStatus(models.Model): objects = SafeStatusManager.from_queryset(SafeStatusQuerySet)() internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='safe_status', primary_key=True) address = EthereumAddressField(db_index=True) owners = ArrayField(EthereumAddressField()) threshold = Uint256Field() nonce = Uint256Field(default=0) master_copy = EthereumAddressField() fallback_handler = EthereumAddressField() enabled_modules = ArrayField(EthereumAddressField(), default=list) class Meta: unique_together = (('internal_tx', 'address'),) verbose_name_plural = 'Safe statuses' def __str__(self): return f'safe={self.address} threshold={self.threshold} owners={self.owners} nonce={self.nonce}' @property def block_number(self): return self.internal_tx.ethereum_tx.block_id def store_new(self, internal_tx: InternalTx) -> None: self.internal_tx = internal_tx return self.save() class WebHookType(Enum): NEW_CONFIRMATION = 0 PENDING_MULTISIG_TRANSACTION = 1 EXECUTED_MULTISIG_TRANSACTION = 2 INCOMING_ETHER = 3 INCOMING_TOKEN = 4 class WebHookQuerySet(models.QuerySet): def matching_for_address(self, address: str): return self.filter(Q(address=address) | Q(address='')) class WebHook(models.Model): objects = WebHookQuerySet.as_manager() address = EthereumAddressField(db_index=True, blank=True) url = models.URLField() # Configurable webhook types to listen to new_confirmation = models.BooleanField(default=True) pending_outgoing_transaction = models.BooleanField(default=True) new_executed_outgoing_transaction = models.BooleanField(default=True) new_incoming_transaction = models.BooleanField(default=True) class Meta: unique_together = (('address', 'url'),) def __str__(self): if self.address: return f'Webhook for safe={self.address} to url={self.url}' else: return f'Webhook to every address to url={self.url}'
import datetime from enum import Enum from itertools import islice from logging import getLogger from typing import Any, Dict, List, Optional, Tuple, Type, TypedDict from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.indexes import GinIndex from django.db import IntegrityError, models from django.db.models import Case, JSONField, Q, QuerySet, Sum from django.db.models.expressions import (F, OuterRef, RawSQL, Subquery, Value, When) from django.db.models.signals import post_save from hexbytes import HexBytes from model_utils.models import TimeStampedModel from gnosis.eth.constants import ERC20_721_TRANSFER_TOPIC from gnosis.eth.django.models import (EthereumAddressField, HexField, Sha3HashField, Uint256Field) from gnosis.safe import SafeOperation from gnosis.safe.safe_signature import SafeSignatureType from .utils import clean_receipt_log logger = getLogger(__name__) class ConfirmationType(Enum): CONFIRMATION = 0 EXECUTION = 1 class EthereumTxCallType(Enum): # https://ethereum.stackexchange.com/questions/63743/whats-the-difference-between-type-and-calltype-in-parity-trace CALL = 0 DELEGATE_CALL = 1 CALL_CODE = 2 STATIC_CALL = 3 @staticmethod def parse_call_type(call_type: Optional[str]): if not call_type: return None call_type = call_type.lower() if call_type == 'call': return EthereumTxCallType.CALL elif call_type == 'delegatecall': return EthereumTxCallType.DELEGATE_CALL elif call_type == 'callcode': return EthereumTxCallType.CALL_CODE elif call_type == 'staticcall': return EthereumTxCallType.STATIC_CALL else: return None class EthereumTxType(Enum): CALL = 0 CREATE = 1 SELF_DESTRUCT = 2 REWARD = 3 @staticmethod def parse(tx_type: str): tx_type = tx_type.upper() if tx_type == 'CALL': return EthereumTxType.CALL elif tx_type == 'CREATE': return EthereumTxType.CREATE elif tx_type == 'SUICIDE': return EthereumTxType.SELF_DESTRUCT elif tx_type == 'REWARD': return EthereumTxType.REWARD else: raise ValueError(f'{tx_type} is not a valid EthereumTxType') class TransferDict(TypedDict): block_number: int transaction_hash: HexBytes to: str _from: str value: int execution_date: datetime.datetime token_id: int token_address: str class BulkCreateSignalMixin: def bulk_create(self, objs, batch_size: Optional[int] = None, ignore_conflicts: bool = False): objs = list(objs) # If not it won't be iterate later result = super().bulk_create(objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts) for obj in objs: post_save.send(obj.__class__, instance=obj, created=True) return result def bulk_create_from_generator(self, objs, batch_size: int = 10000, ignore_conflicts: bool = False) -> int: """ Implementation in Django is not ok, as it will do `objs = list(objs)`. If objects come from a generator they will be brought to RAM. This approach is more friendly :return: Count of inserted elements """ assert batch_size is not None and batch_size > 0 total = 0 while True: if inserted := len(self.bulk_create(islice(objs, batch_size), ignore_conflicts=ignore_conflicts)): total += inserted else: return total # class EnsLabel(models.Model): # label_hash = Sha3HashField(unique=True) # Keccak of the label # label = models.CharField(max_length=20, blank=True) # def __str__(self): # return f'Label={self.label} with hash={self.label_hash}' class EthereumBlockManager(models.Manager): def get_or_create_from_block(self, block: Dict[str, Any], confirmed: bool = False): try: return self.get(number=block['number']) except self.model.DoesNotExist: return self.create_from_block(block, confirmed=confirmed) def create_from_block(self, block: Dict[str, Any], confirmed: bool = False) -> 'EthereumBlock': """ :param block: Block Dict returned by Web3 :param confirmed: If True we will not check for reorgs in the future :return: EthereumBlock model """ try: return super().create( number=block['number'], gas_limit=block['gasLimit'], gas_used=block['gasUsed'], timestamp=datetime.datetime.fromtimestamp(block['timestamp'], datetime.timezone.utc), block_hash=block['hash'], parent_hash=block['parentHash'], confirmed=confirmed, ) except IntegrityError: # The block could be created in the meantime by other task while the block was fetched from blockchain return self.get(number=block['number']) class EthereumBlockQuerySet(models.QuerySet): def not_confirmed(self, to_block_number: Optional[int] = None): queryset = self.filter(confirmed=False) if to_block_number is not None: queryset = queryset.filter(number__lte=to_block_number) return queryset.order_by('number') class EthereumBlock(models.Model): objects = EthereumBlockManager.from_queryset(EthereumBlockQuerySet)() number = models.PositiveIntegerField(primary_key=True) gas_limit = models.PositiveIntegerField() gas_used = models.PositiveIntegerField() timestamp = models.DateTimeField() block_hash = Sha3HashField(unique=True) parent_hash = Sha3HashField(unique=True) # For reorgs, True if `current_block_number` - `number` >= MIN_CONFIRMATIONS confirmed = models.BooleanField(default=False, db_index=True) def __str__(self): return f'Block number={self.number} on {self.timestamp}' def set_confirmed(self): self.confirmed = True self.save(update_fields=['confirmed']) class EthereumTxManager(models.Manager): def create_from_tx_dict(self, tx: Dict[str, Any], tx_receipt: Optional[Dict[str, Any]] = None, ethereum_block: Optional[EthereumBlock] = None) -> 'EthereumTx': data = HexBytes(tx.get('data') or tx.get('input')) return super().create( block=ethereum_block, tx_hash=HexBytes(tx['hash']).hex(), _from=tx['from'], gas=tx['gas'], gas_price=tx['gasPrice'], gas_used=tx_receipt and tx_receipt['gasUsed'], logs=tx_receipt and [clean_receipt_log(log) for log in tx_receipt.get('logs', list())], status=tx_receipt and tx_receipt.get('status'), transaction_index=tx_receipt and tx_receipt['transactionIndex'], data=data if data else None, nonce=tx['nonce'], to=tx.get('to'), value=tx['value'], ) class EthereumTx(TimeStampedModel): objects = EthereumTxManager() block = models.ForeignKey(EthereumBlock, on_delete=models.CASCADE, null=True, default=None, related_name='txs') # If mined tx_hash = Sha3HashField(primary_key=True) gas_used = Uint256Field(null=True, default=None) # If mined status = models.IntegerField(null=True, default=None, db_index=True) # If mined. Old txs don't have `status` logs = ArrayField(JSONField(), null=True, default=None) # If mined transaction_index = models.PositiveIntegerField(null=True, default=None) # If mined _from = EthereumAddressField(null=True, db_index=True) gas = Uint256Field() gas_price = Uint256Field() data = models.BinaryField(null=True) nonce = Uint256Field() to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() def __str__(self): return '{} status={} from={} to={}'.format(self.tx_hash, self.status, self._from, self.to) @property def execution_date(self) -> Optional[datetime.datetime]: if self.block_id is not None: return self.block.timestamp return None @property def success(self) -> Optional[bool]: if self.status is not None: return self.status == 1 def update_with_block_and_receipt(self, ethereum_block: 'EthereumBlock', tx_receipt: Dict[str, Any]): if self.block is None: self.block = ethereum_block self.gas_used = tx_receipt['gasUsed'] self.logs = [clean_receipt_log(log) for log in tx_receipt.get('logs', list())] self.status = tx_receipt.get('status') self.transaction_index = tx_receipt['transactionIndex'] return self.save(update_fields=['block', 'gas_used', 'logs', 'status', 'transaction_index']) class EthereumEventQuerySet(models.QuerySet): def not_erc_20_721_events(self): return self.exclude(topic=ERC20_721_TRANSFER_TOPIC) def erc20_and_721_events(self, token_address: Optional[str] = None, address: Optional[str] = None): queryset = self.filter(topic=ERC20_721_TRANSFER_TOPIC) if token_address: queryset = queryset.filter(address=token_address) if address: queryset = queryset.filter(Q(arguments__to=address) | Q(arguments__from=address)) return queryset def erc20_events(self, token_address: Optional[str] = None, address: Optional[str] = None): return self.erc20_and_721_events(token_address=token_address, address=address).filter(arguments__has_key='value') def erc721_events(self, token_address: Optional[str] = None, address: Optional[str] = None): return self.erc20_and_721_events(token_address=token_address, address=address).filter(arguments__has_key='tokenId') def erc721_owned_by(self, address: str) -> List[Tuple[str, int]]: """ Returns erc721 owned by address, removing the ones sent :return: List of tuples(token_address: str, token_id: int) """ # Get all the token history erc721_events = self.erc721_events(address=address) # Get tokens received and remove tokens transferred tokens_in: Tuple[str, int] = [] tokens_out: Tuple[str, int] = [] for erc721_event in erc721_events: token_address = erc721_event.address token_id = erc721_event.arguments.get('tokenId') if token_id is None: logger.error('TokenId for ERC721 info token=%s with owner=%s can never be None', token_address, address) continue if erc721_event.arguments.get('to') == address: list_to_append = tokens_in else: list_to_append = tokens_out list_to_append.append((token_address, token_id)) for token_out in tokens_out: # Remove tokens sent from list if token_out in tokens_in: tokens_in.remove(token_out) return tokens_in class EthereumEventManager(BulkCreateSignalMixin, models.Manager): def from_decoded_event(self, decoded_event: Dict[str, Any]) -> 'EthereumEvent': """ Does not create the model. Requires that `ethereum_tx` exists :param decoded_event: :return: `EthereumEvent` instance (not stored in database) """ return EthereumEvent(ethereum_tx_id=decoded_event['transactionHash'], log_index=decoded_event['logIndex'], address=decoded_event['address'], topic=decoded_event['topics'][0], topics=decoded_event['topics'], arguments=decoded_event['args']) def erc20_tokens_used_by_address(self, address: str) -> List[str]: """ :param address: :return: List of token addresses used by an address """ # return self.erc20_events(address=address).values_list('address', flat=True).distinct() address_as_postgres_text = f'"{address}"' events = self.raw("""SELECT DISTINCT "id", "address" FROM "history_ethereumevent" WHERE ("topic" = %s AND (("arguments" -> 'to')::text = %s OR ("arguments" -> 'from')::text = %s) AND "arguments" ? 'value') """, [ERC20_721_TRANSFER_TOPIC[2:], address_as_postgres_text, address_as_postgres_text]) return [event.address for event in events] def erc721_tokens_used_by_address(self, address: str) -> List[str]: """ :param address: :return: List of token addresses used by an address """ # return self.erc721_events(address=address).values_list('address', flat=True).distinct() address_as_postgres_text = f'"{address}"' events = self.raw("""SELECT DISTINCT "id", "address" FROM "history_ethereumevent" WHERE ("topic" = '%s' AND (("arguments" -> 'to')::text = '"%s"' OR ("arguments" -> 'from')::text = '"%s"') AND "arguments" ? 'tokenId') """, [ERC20_721_TRANSFER_TOPIC[2:], address_as_postgres_text, address_as_postgres_text]) return [event.address for event in events] def erc20_tokens_with_balance(self, address: str) -> List[Dict[str, Any]]: """ :return: List of dictionaries {'token_address': str, 'balance': int} """ arguments_value_field = RawSQL("(arguments->>'value')::numeric", ()) return self.erc20_events( address=address ).values('address').annotate( balance=Sum(Case( When(arguments__from=address, then=-arguments_value_field), default=arguments_value_field, )) ).order_by('-balance').values('address', 'balance') def get_or_create_erc20_or_721_event(self, decoded_event: Dict[str, Any]): if 'value' not in decoded_event['args'] or 'tokenId' not in decoded_event['args']: raise ValueError('Invalid ERC20 or ERC721 event %s' % decoded_event) else: return self.get_or_create(ethereum_tx_id=decoded_event['transactionHash'], log_index=decoded_event['logIndex'], defaults={'address': decoded_event['address'], 'topic': decoded_event['topics'][0], 'topics': decoded_event['topics'], 'arguments': decoded_event['args'], }) class EthereumEvent(models.Model): objects = EthereumEventManager.from_queryset(EthereumEventQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='events') log_index = models.PositiveIntegerField() address = EthereumAddressField(db_index=True) topic = Sha3HashField(db_index=True) topics = ArrayField(Sha3HashField()) arguments = JSONField() class Meta: unique_together = (('ethereum_tx', 'log_index'),) indexes = [GinIndex(fields=['arguments'])] # There are also 2 indexes created manually by 0026 migration, both Btree for arguments->to and arguments->from # To use that indexes json queries must be rewritten to use `::text` fields def __str__(self): return f'Tx-hash={self.ethereum_tx_id} Log-index={self.log_index} Topic={self.topic} Arguments={self.arguments}' @property def created(self): return self.ethereum_tx.block.timestamp def is_erc20(self) -> bool: return self.topic == ERC20_721_TRANSFER_TOPIC and 'value' in self.arguments and 'to' in self.arguments def is_erc721(self) -> bool: return self.topic == ERC20_721_TRANSFER_TOPIC and 'tokenId' in self.arguments and 'to' in self.arguments class InternalTxManager(BulkCreateSignalMixin, models.Manager): def _trace_address_to_str(self, trace_address) -> str: return ','.join([str(address) for address in trace_address]) def build_from_trace(self, trace: Dict[str, Any], ethereum_tx: EthereumTx) -> 'InternalTx': """ Build a InternalTx object from trace, but it doesn't insert it on database :param trace: :param ethereum_tx: :return: InternalTx not inserted """ data = trace['action'].get('input') or trace['action'].get('init') tx_type = EthereumTxType.parse(trace['type']) call_type = EthereumTxCallType.parse_call_type(trace['action'].get('callType')) trace_address_str = self._trace_address_to_str(trace['traceAddress']) return InternalTx( ethereum_tx=ethereum_tx, trace_address=trace_address_str, _from=trace['action'].get('from'), gas=trace['action'].get('gas', 0), data=data if data else None, to=trace['action'].get('to') or trace['action'].get('address'), value=trace['action'].get('value') or trace['action'].get('balance', 0), gas_used=(trace.get('result') or {}).get('gasUsed', 0), contract_address=(trace.get('result') or {}).get('address'), code=(trace.get('result') or {}).get('code'), output=(trace.get('result') or {}).get('output'), refund_address=trace['action'].get('refundAddress'), tx_type=tx_type.value, call_type=call_type.value if call_type else None, error=trace.get('error') ) def get_or_create_from_trace(self, trace: Dict[str, Any], ethereum_tx: EthereumTx) -> Tuple['InternalTx', bool]: tx_type = EthereumTxType.parse(trace['type']) call_type = EthereumTxCallType.parse_call_type(trace['action'].get('callType')) trace_address_str = self._trace_address_to_str(trace['traceAddress']) return self.get_or_create( ethereum_tx=ethereum_tx, trace_address=trace_address_str, defaults={ '_from': trace['action'].get('from'), 'gas': trace['action'].get('gas', 0), 'data': trace['action'].get('input') or trace['action'].get('init'), 'to': trace['action'].get('to') or trace['action'].get('address'), 'value': trace['action'].get('value') or trace['action'].get('balance', 0), 'gas_used': (trace.get('result') or {}).get('gasUsed', 0), 'contract_address': (trace.get('result') or {}).get('address'), 'code': (trace.get('result') or {}).get('code'), 'output': (trace.get('result') or {}).get('output'), 'refund_address': trace['action'].get('refundAddress'), 'tx_type': tx_type.value, 'call_type': call_type.value if call_type else None, 'error': trace.get('error'), } ) class InternalTxQuerySet(models.QuerySet): def ether_txs(self): return self.filter( call_type=EthereumTxCallType.CALL.value, value__gt=0 ).annotate( transaction_hash=F('ethereum_tx_id'), block_number=F('ethereum_tx__block_id'), execution_date=F('ethereum_tx__block__timestamp'), token_id=Value(None, output_field=Uint256Field()), token_address=Value(None, output_field=EthereumAddressField()), ).order_by('-ethereum_tx__block_id') def ether_txs_for_address(self, address: str): return self.ether_txs().filter(Q(to=address) | Q(_from=address)) def ether_incoming_txs_for_address(self, address: str): return self.ether_txs().filter(to=address) def token_txs(self): return EthereumEvent.objects.erc20_and_721_events().annotate( to=RawSQL("arguments->>%s", ('to',)), # Order is really important! _from=RawSQL("arguments->>%s", ('from',)), value=RawSQL("(arguments->>%s)::numeric", ('value',)), transaction_hash=F('ethereum_tx_id'), block_number=F('ethereum_tx__block_id'), execution_date=F('ethereum_tx__block__timestamp'), token_id=RawSQL("(arguments->>%s)::numeric", ('tokenId',)), token_address=F('address') ).order_by('-ethereum_tx__block_id') def token_txs_for_address(self, address: str): return self.token_txs().filter(Q(arguments__to=address) | Q(arguments__from=address)) def token_incoming_txs_for_address(self, address: str): return self.token_txs().filter(arguments__to=address) def ether_and_token_txs(self, address: str): tokens_queryset = self.token_txs_for_address(address) ether_queryset = self.ether_txs_for_address(address) return self.union_ether_and_token_txs(tokens_queryset, ether_queryset) def ether_and_token_incoming_txs(self, address: str): tokens_queryset = self.token_incoming_txs_for_address(address) ether_queryset = self.ether_incoming_txs_for_address(address) return self.union_ether_and_token_txs(tokens_queryset, ether_queryset) def union_ether_and_token_txs(self, tokens_queryset: QuerySet, ether_queryset: QuerySet) -> TransferDict: values = ('block_number', 'transaction_hash', 'to', '_from', 'value', 'execution_date', 'token_id', 'token_address') return ether_queryset.values(*values).union(tokens_queryset.values(*values)).order_by('-block_number') def can_be_decoded(self): """ Every InternalTx can be decoded if: - Has data - InternalTx is not errored - EthereumTx is successful (not reverted or out of gas) - CallType is a DELEGATE_CALL (to the master copy contract) - Not already decoded :return: Txs that can be decoded """ return self.exclude( data=None ).filter( call_type=EthereumTxCallType.DELEGATE_CALL.value, error=None, ethereum_tx__status=1, decoded_tx=None, ) class InternalTx(models.Model): objects = InternalTxManager.from_queryset(InternalTxQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='internal_txs') _from = EthereumAddressField(null=True, db_index=True) # For SELF-DESTRUCT it can be null gas = Uint256Field() data = models.BinaryField(null=True) # `input` for Call, `init` for Create to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() gas_used = Uint256Field() contract_address = EthereumAddressField(null=True, db_index=True) # Create code = models.BinaryField(null=True) # Create output = models.BinaryField(null=True) # Call refund_address = EthereumAddressField(null=True, db_index=True) # For SELF-DESTRUCT tx_type = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in EthereumTxType], db_index=True) call_type = models.PositiveSmallIntegerField(null=True, choices=[(tag.value, tag.name) for tag in EthereumTxCallType], db_index=True) # Call trace_address = models.CharField(max_length=600) # Stringified traceAddress error = models.CharField(max_length=200, null=True) class Meta: unique_together = (('ethereum_tx', 'trace_address'),) def __str__(self): if self.to: return 'Internal tx hash={} from={} to={}'.format(self.ethereum_tx_id, self._from, self.to) else: return 'Internal tx hash={} from={}'.format(self.ethereum_tx_id, self._from) @property def block_number(self): return self.ethereum_tx.block_id @property def created(self): return self.ethereum_tx.block.timestamp @property def can_be_decoded(self) -> bool: return bool(self.is_delegate_call and not self.error and self.data and self.ethereum_tx.success) @property def is_call(self): return EthereumTxType(self.tx_type) == EthereumTxType.CALL @property def is_create(self): return EthereumTxType(self.tx_type) == EthereumTxType.CREATE @property def is_decoded(self): try: return bool(self.decoded_tx) except InternalTxDecoded.DoesNotExist: return False @property def is_delegate_call(self) -> bool: if self.call_type is None: return False else: return EthereumTxCallType(self.call_type) == EthereumTxCallType.DELEGATE_CALL @property def is_ether_transfer(self) -> bool: return self.call_type == EthereumTxCallType.CALL.value and self.value > 0 @property def is_relevant(self): return self.can_be_decoded or self.is_ether_transfer or self.contract_address @property def trace_address_as_list(self) -> List[int]: return [int(x) for x in self.trace_address.split(',')] class InternalTxDecodedManager(BulkCreateSignalMixin, models.Manager): pass class InternalTxDecodedQuerySet(models.QuerySet): def for_indexed_safes(self): return self.filter( Q(internal_tx___from__in=SafeContract.objects.values('address')) # Just Safes indexed | Q(function_name='setup') # This way we can index new Safes without events ) def not_processed(self): return self.filter(processed=False) def order_by_processing_queue(self): """ :return: Transactions ordered to be processed. First older transactions """ return self.order_by( 'internal_tx__ethereum_tx__block_id', 'internal_tx__ethereum_tx__transaction_index', 'internal_tx__trace_address', ) def pending_for_safes(self): """ :return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block """ return self.not_processed( ).for_indexed_safes( ).select_related( 'internal_tx__ethereum_tx__block', ).order_by_processing_queue() def pending_for_safe(self, safe_address: str): """ :return: Pending `InternalTxDecoded` sorted by block number and then transaction index inside the block """ return self.pending_for_safes().filter(internal_tx___from=safe_address) def safes_pending_to_be_processed(self) -> QuerySet: """ :return: List of Safe addresses that have transactions pending to be processed """ return self.not_processed().for_indexed_safes().values_list( 'internal_tx___from', flat=True ).distinct('internal_tx___from') class InternalTxDecoded(models.Model): objects = InternalTxDecodedManager.from_queryset(InternalTxDecodedQuerySet)() internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='decoded_tx', primary_key=True) function_name = models.CharField(max_length=256, db_index=True) arguments = JSONField() processed = models.BooleanField(default=False) class Meta: indexes = [ models.Index(name='history_decoded_processed_idx', fields=['processed'], condition=Q(processed=False)) ] verbose_name_plural = 'Internal txs decoded' def __str__(self): return f'{"Processed" if self.processed else "Not Processed"} ' \ f'fn-name={self.function_name} with arguments={self.arguments}' @property def address(self) -> str: return self.internal_tx._from @property def block_number(self) -> Type[int]: return self.internal_tx.ethereum_tx.block_id @property def tx_hash(self) -> Type[int]: return self.internal_tx.ethereum_tx_id def set_processed(self): self.processed = True return self.save(update_fields=['processed']) class MultisigTransactionManager(models.Manager): def last_nonce(self, safe: str) -> Optional[int]: """ :param safe: :return: nonce of the last executed and mined transaction. It will be None if there's no transactions or none of them is mined """ nonce_query = self.filter(safe=safe).exclude(ethereum_tx=None).order_by('-nonce').values('nonce').first() if nonce_query: return nonce_query['nonce'] def last_valid_transaction(self, safe: str) -> Optional['MultisigTransaction']: """ Find last transaction where signers match the owners registered for that Safe. Transactions out of sync have an invalid `safeNonce`, so `safeTxHash` is not valid and owners recovered from the signatures wouldn't be valid. We exclude `Approved hashes` and `Contract signatures` as that owners are not retrieved using the signature, so they will show the right owner even if `safeNonce` is not valid :param safe: :return: Last valid indexed transaction mined """ # Build list of every owner known for that Safe (even if it was deleted/replaced). Changes of collision for # invalid recovered owners from signatures are almost impossible owners_set = set() for owners_list in SafeStatus.objects.filter(address=safe).values_list('owners', flat=True).distinct(): owners_set.update(owners_list) return MultisigTransaction.objects.filter( safe=safe, confirmations__owner__in=owners_set, confirmations__signature_type__in=[SafeSignatureType.EOA.value, SafeSignatureType.ETH_SIGN.value] ).exclude( ethereum_tx=None ).order_by('-nonce').first() class MultisigTransactionQuerySet(models.QuerySet): def executed(self): return self.exclude( ethereum_tx__block=None ) def not_executed(self): return self.filter( ethereum_tx__block=None ) def with_confirmations(self): return self.exclude( confirmations__isnull=True ) def without_confirmations(self): return self.filter( confirmations__isnull=True ) def with_confirmations_required(self): """ Add confirmations required for execution when the tx was mined (threshold of the Safe at that point) :return: queryset with `confirmations_required: int` field """ threshold_query = SafeStatus.objects.filter( internal_tx__ethereum_tx=OuterRef('ethereum_tx') ).sorted_reverse_by_internal_tx().values('threshold') return self.annotate(confirmations_required=Subquery(threshold_query[:1])) class MultisigTransaction(TimeStampedModel): objects = MultisigTransactionManager.from_queryset(MultisigTransactionQuerySet)() safe_tx_hash = Sha3HashField(primary_key=True) safe = EthereumAddressField(db_index=True) ethereum_tx = models.ForeignKey(EthereumTx, null=True, default=None, blank=True, on_delete=models.SET_NULL, related_name='multisig_txs') to = EthereumAddressField(null=True, db_index=True) value = Uint256Field() data = models.BinaryField(null=True) operation = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeOperation]) safe_tx_gas = Uint256Field() base_gas = Uint256Field() gas_price = Uint256Field() gas_token = EthereumAddressField(null=True) refund_receiver = EthereumAddressField(null=True) signatures = models.BinaryField(null=True) # When tx is executed nonce = Uint256Field(db_index=True) failed = models.BooleanField(null=True, default=None, db_index=True) origin = models.CharField(null=True, default=None, max_length=100) # To store arbitrary data on the tx trusted = models.BooleanField(default=False, db_index=True) # Txs proposed by a delegate or with one confirmation def __str__(self): return f'{self.safe} - {self.nonce} - {self.safe_tx_hash}' @property def execution_date(self) -> Optional[datetime.datetime]: if self.ethereum_tx_id and self.ethereum_tx.block_id is not None: return self.ethereum_tx.block.timestamp return None @property def executed(self) -> bool: return bool(self.ethereum_tx_id and (self.ethereum_tx.block_id is not None)) def owners(self) -> Optional[List[str]]: if not self.signatures: return None else: # TODO Get owners from signatures. Not very trivial return [] class ModuleTransaction(TimeStampedModel): internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='module_tx', primary_key=True) safe = EthereumAddressField(db_index=True) # Just for convenience, it could be retrieved from `internal_tx` module = EthereumAddressField(db_index=True) # Just for convenience, it could be retrieved from `internal_tx` to = EthereumAddressField(db_index=True) value = Uint256Field() data = models.BinaryField(null=True) operation = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeOperation]) failed = models.BooleanField(default=False) def __str__(self): if self.value: return f'{self.safe} - {self.to} - {self.value}' else: return f'{self.safe} - {self.to} - {HexBytes(self.data.tobytes()).hex()[:8]}' @property def execution_date(self) -> Optional[datetime.datetime]: if self.internal_tx.ethereum_tx_id and self.internal_tx.ethereum_tx.block_id is not None: return self.internal_tx.ethereum_tx.block.timestamp return None class MultisigConfirmationManager(models.Manager): def remove_unused_confirmations(self, safe: str, current_safe_none: int, owner: str) -> int: """ :return: Remove confirmations for not executed transactions with nonce higher or equal than the current Safe nonce for a Safe and an owner (as an owner can be an owner of multiple Safes). Used when an owner is removed from the Safe. """ return self.filter( multisig_transaction__ethereum_tx=None, # Not executed multisig_transaction__safe=safe, multisig_transaction__nonce__gte=current_safe_none, owner=owner, ).delete()[0] class MultisigConfirmationQuerySet(models.QuerySet): def without_transaction(self): return self.filter(multisig_transaction=None) def with_transaction(self): return self.exclude(multisig_transaction=None) class MultisigConfirmation(TimeStampedModel): objects = MultisigConfirmationManager.from_queryset(MultisigConfirmationQuerySet)() ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='multisig_confirmations', null=True) # `null=True` for signature confirmations multisig_transaction = models.ForeignKey(MultisigTransaction, on_delete=models.CASCADE, null=True, related_name='confirmations') multisig_transaction_hash = Sha3HashField(null=True, db_index=True) # Use this while we don't have a `multisig_transaction` owner = EthereumAddressField() signature = HexField(null=True, default=None, max_length=2000) signature_type = models.PositiveSmallIntegerField(choices=[(tag.value, tag.name) for tag in SafeSignatureType], db_index=True) class Meta: unique_together = (('multisig_transaction_hash', 'owner'),) ordering = ['created'] def __str__(self): if self.multisig_transaction_id: return f'Confirmation of owner={self.owner} for transaction-hash={self.multisig_transaction_hash}' else: return f'Confirmation of owner={self.owner} for existing transaction={self.multisig_transaction_hash}' class MonitoredAddressManager(models.Manager): def update_addresses(self, addresses: List[str], from_block_number: int, block_number: int, database_field: str) -> int: """ :param addresses: Addresses to have the block number updated :param from_block_number: Make sure that no reorg has happened checking that block number was not rollbacked :param block_number: Block number to be updated :param database_field: Database field to store the block number :return: Number of entities updated """ return self.filter( **{'address__in': addresses, database_field + '__gte': from_block_number - 1, # Protect in case of reorg } ).update(**{database_field: block_number}) class MonitoredAddressQuerySet(models.QuerySet): def almost_updated(self, database_field: str, current_block_number: int, updated_blocks_behind: int, confirmations: int): return self.filter( **{database_field + '__lt': current_block_number - confirmations, database_field + '__gt': current_block_number - updated_blocks_behind}) def not_updated(self, database_field: str, current_block_number: int, confirmations: int): return self.filter( **{database_field + '__lt': current_block_number - confirmations} ) class MonitoredAddress(models.Model): objects = MonitoredAddressManager.from_queryset(MonitoredAddressQuerySet)() address = EthereumAddressField(primary_key=True) initial_block_number = models.IntegerField(default=0) # Block number when address received first tx tx_block_number = models.IntegerField(null=True, default=None, db_index=True) # Block number when last internal tx scan ended class Meta: abstract = True verbose_name_plural = 'Monitored addresses' def __str__(self): return f'Address={self.address} - Initial-block-number={self.initial_block_number}' \ f' - Tx-block-number={self.tx_block_number}' class ProxyFactory(MonitoredAddress): class Meta: verbose_name_plural = 'Proxy factories' ordering = ['tx_block_number'] class SafeMasterCopy(MonitoredAddress): version = models.CharField(max_length=20) class Meta: verbose_name_plural = 'Safe master copies' ordering = ['tx_block_number'] class SafeContractManager(MonitoredAddressManager): pass class SafeContract(models.Model): objects = SafeContractManager.from_queryset(MonitoredAddressQuerySet)() address = EthereumAddressField(primary_key=True) ethereum_tx = models.ForeignKey(EthereumTx, on_delete=models.CASCADE, related_name='safe_contracts') erc20_block_number = models.IntegerField(default=0, db_index=True) # Block number of last scan of erc20 def __str__(self): return f'Safe address={self.address} - ethereum-tx={self.ethereum_tx_id}' @property def created_block_number(self) -> Optional[Type[int]]: if self.ethereum_tx: return self.ethereum_tx.block_id class SafeContractDelegateManager(models.Manager): def get_delegates_for_safe(self, address: str) -> List[str]: return list(self.filter(safe_contract_id=address).values_list('delegate', flat=True)) class SafeContractDelegate(models.Model): """ The owners of the Safe can add users so they can propose/retrieve txs as if they were the owners of the Safe """ objects = SafeContractDelegateManager() safe_contract = models.ForeignKey(SafeContract, on_delete=models.CASCADE, related_name='safe_contract_delegates') delegate = EthereumAddressField() delegator = EthereumAddressField() # Owner who created the delegate label = models.CharField(max_length=50) read = models.BooleanField(default=True) # For permissions in the future write = models.BooleanField(default=True) class Meta: unique_together = (('safe_contract', 'delegate'),) def __str__(self): return f'Delegate={self.delegate} for Safe={self.safe_contract_id} - Label={self.label}' class SafeStatusManager(models.Manager): pass class SafeStatusQuerySet(models.QuerySet): def sorted_by_internal_tx(self): return self.order_by( 'address', '-internal_tx__ethereum_tx__block_id', '-internal_tx__ethereum_tx__transaction_index', '-internal_tx__trace_address', ) def sorted_reverse_by_internal_tx(self): return self.order_by( 'address', 'internal_tx__ethereum_tx__block_id', 'internal_tx__ethereum_tx__transaction_index', 'internal_tx__trace_address', ) def addresses_for_owner(self, owner_address: str) -> List[str]: return self.filter( owners__contains=[owner_address], internal_tx__in=self.last_for_every_address().values('pk') ).values_list('address', flat=True) def last_for_every_address(self) -> QuerySet: return self.distinct( 'address' # Uses PostgreSQL `DISTINCT ON` ).select_related( 'internal_tx__ethereum_tx' ).sorted_by_internal_tx() def last_for_address(self, address: str) -> Optional['SafeStatus']: safe_status = self.last_for_every_address().filter( address=address ).first() if not safe_status: logger.error('SafeStatus not found for address=%s', address) return safe_status class SafeStatus(models.Model): objects = SafeStatusManager.from_queryset(SafeStatusQuerySet)() internal_tx = models.OneToOneField(InternalTx, on_delete=models.CASCADE, related_name='safe_status', primary_key=True) address = EthereumAddressField(db_index=True) owners = ArrayField(EthereumAddressField()) threshold = Uint256Field() nonce = Uint256Field(default=0) master_copy = EthereumAddressField() fallback_handler = EthereumAddressField() enabled_modules = ArrayField(EthereumAddressField(), default=list) class Meta: unique_together = (('internal_tx', 'address'),) verbose_name_plural = 'Safe statuses' def __str__(self): return f'safe={self.address} threshold={self.threshold} owners={self.owners} nonce={self.nonce}' @property def block_number(self): return self.internal_tx.ethereum_tx.block_id def store_new(self, internal_tx: InternalTx) -> None: self.internal_tx = internal_tx return self.save() class WebHookType(Enum): NEW_CONFIRMATION = 0 PENDING_MULTISIG_TRANSACTION = 1 EXECUTED_MULTISIG_TRANSACTION = 2 INCOMING_ETHER = 3 INCOMING_TOKEN = 4 class WebHookQuerySet(models.QuerySet): def matching_for_address(self, address: str): return self.filter(Q(address=address) | Q(address='')) class WebHook(models.Model): objects = WebHookQuerySet.as_manager() address = EthereumAddressField(db_index=True, blank=True) url = models.URLField() # Configurable webhook types to listen to new_confirmation = models.BooleanField(default=True) pending_outgoing_transaction = models.BooleanField(default=True) new_executed_outgoing_transaction = models.BooleanField(default=True) new_incoming_transaction = models.BooleanField(default=True) class Meta: unique_together = (('address', 'url'),) def __str__(self): if self.address: return f'Webhook for safe={self.address} to url={self.url}' else: return f'Webhook to every address to url={self.url}'
#!/usr/bin/env python3 # # MIT License # # Copyright (c) 2020-2021 EntySec # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os import copy from hatvenom import HatVenom from hatsploit.core.base.types import Types from hatsploit.core.cli.badges import Badges from hatsploit.core.db.importer import Importer from hatsploit.lib.payloads import Payloads from hatsploit.lib.sessions import Sessions from hatsploit.lib.storage import LocalStorage class Modules: types = Types() badges = Badges() sessions = Sessions() payloads = Payloads() local_storage = LocalStorage() importer = Importer() def check_exist(self, name): all_modules = self.local_storage.get("modules") if all_modules: for database in all_modules: modules = all_modules[database] if name in modules: return True return False def check_imported(self, name): imported_modules = self.local_storage.get("imported_modules") if imported_modules: if name in imported_modules: return True return False def check_current_module(self): if self.local_storage.get("current_module"): if len(self.local_storage.get("current_module")) > 0: return True return False def check_if_already_used(self, module): if self.check_current_module(): if module == self.get_current_module_name(): return True return False def get_module_object(self, name): if self.check_exist(name): database = self.get_database(name) return self.local_storage.get("modules")[database][name] return None def get_current_module_object(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")) return None def get_current_module_platform(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")).details['Platform'] return None def get_current_module_name(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")).details['Module'] return None def get_database(self, name): all_modules = self.local_storage.get("modules") if all_modules: for database in all_modules: modules = all_modules[database] if name in modules: return database return None def compare_type(self, name, value, checker, module=True): value = str(value) if value.startswith('file:') and len(value) > 5 and module: file = value.split(':')[1] if not os.path.isfile(file): self.badges.print_error(f"Local file: {file}: does not exist!") return False with open(file, 'r') as f: for line in f.read().split('\n'): if line.strip(): if not checker(line.strip()): self.badges.print_error(f"File contains invalid value, expected valid {name}!") return False return True if not checker(value): self.badges.print_error(f"Invalid value, expected valid {name}!") return False return True def compare_session(self, value_type, value): session = value_type.lower().replace(' ', '') session = session.split('->') session_platforms = [] session_platform = self.get_current_module_platform() session_type = "shell" if len(session) == 2: if session[1].startswith('[') and session[1].endswith(']'): session_platforms = session[1][1:-1].split(',') else: session_type = session[1] elif len(session) == 3: if session[1].startswith('[') and session[1].endswith(']'): session_platforms = session[1][1:-1].split(',') else: session_type = session[1] if session[2].startswith('[') and session[2].endswith(']'): session_platforms = session[2][1:-1].split(',') else: session_type = session[2] if not session_platforms: if not self.sessions.check_exist(value, session_platform, session_type): self.badges.print_error("Invalid value, expected valid session!") return False else: session = 0 for platform in session_platforms: if self.sessions.check_exist(value, platform.strip(), session_type): session = 1 break if not session: self.badges.print_error("Invalid value, expected valid session!") return False return True def compare_types(self, value_type, value, module=True): if value_type and not value_type.lower == 'all': if value_type.lower() == 'mac': return self.compare_type("MAC", value, self.types.is_mac, module) if value_type.lower() == 'ip': return self.compare_type("IP", value, self.types.is_ip, module) if value_type.lower() == 'ipv4': return self.compare_type("IPv4", value, self.types.is_ipv4, module) if value_type.lower() == 'ipv6': return self.compare_type("IPv6", value, self.types.is_ipv6, module) if value_type.lower() == 'ipv4_range': return self.compare_type("IPv4 range", value, self.types.is_ipv4_range, module) if value_type.lower() == 'ipv6_range': return self.compare_type("IPv6 range", value, self.types.is_ipv6_range, module) if value_type.lower() == 'port': return self.compare_type("port", value, self.types.is_port, module) if value_type.lower() == 'port_range': return self.compare_type("port range", value, self.types.is_port_range, module) if value_type.lower() == 'number': return self.compare_type("number", value, self.types.is_number, module) if value_type.lower() == 'integer': return self.compare_type("integer", value, self.types.is_integer, module) if value_type.lower() == 'float': return self.compare_type("float", value, self.types.is_float, module) if value_type.lower() == 'boolean': return self.compare_type("boolean", value, self.types.is_boolean, module) if value_type.lower() == 'payload': current_module = self.get_current_module_object() module_name = self.get_current_module_name() module_payload = current_module.payload categories = module_payload['Categories'] types = module_payload['Types'] platforms = module_payload['Platforms'] architectures = module_payload['Architectures'] if self.payloads.check_module_compatible(value, categories, types, platforms, architectures): if self.payloads.add_payload(module_name, value): return True self.badges.print_error("Invalid valud, expected valid payload!") return False if 'session' in value_type.lower(): value = str(value) if value.startswith('file:') and len(value) > 5 and module: file = value.split(':')[1] if not os.path.isfile(file): self.badges.print_error(f"Local file: {file}: does not exist!") return False with open(file, 'r') as f: for line in f.read().split('\n'): if line.strip(): if not self.compare_session(value_type, line.strip()): self.badges.print_error(f"File contains invalid value, expected valid session!") return False return True return self.compare_session(value_type, value) return True def set_current_module_option(self, option, value): if self.check_current_module(): current_module = self.get_current_module_object() if not hasattr(current_module, "options") and not hasattr(current_module, "payload"): self.badges.print_warning("Module has no options.") return if hasattr(current_module, "options"): if option in current_module.options: value_type = current_module.options[option]['Type'] if value_type == 'payload': payloads_shorts = self.local_storage.get("payload_shorts") if payloads_shorts: if value.isdigit(): payload_number = int(value) if payload_number in payloads_shorts: value = payloads_shorts[payload_number] if self.compare_types(value_type, value): self.badges.print_information(option + " ==> " + value) if option.lower() == 'blinder': if value.lower() in ['y', 'yes']: current_module.payload['Value'] = None if value_type == 'payload': self.local_storage.set_module_payload( "current_module", self.local_storage.get("current_module_number"), value ) else: self.local_storage.set_module_option( "current_module", self.local_storage.get("current_module_number"), option, value ) return if hasattr(current_module, "payload"): current_payload = self.payloads.get_current_payload() if current_payload and hasattr(current_payload, "options"): if option in current_payload.options: value_type = current_payload.options[option]['Type'] if self.compare_types(value_type, value, False): self.badges.print_information(option + " ==> " + value) self.local_storage.set_payload_option(current_module.details['Module'], current_payload.details['Payload'], option, value) else: self.badges.print_error("Unrecognized option!") else: self.badges.print_error("Unrecognized option!") else: self.badges.print_error("Unrecognized option!") else: self.badges.print_warning("No module selected.") def import_module(self, name): modules = self.get_module_object(name) try: module_object = self.importer.import_module(modules['Path']) if not self.local_storage.get("imported_modules"): self.local_storage.set("imported_modules", {}) self.local_storage.update("imported_modules", {name: module_object}) except Exception: return None return module_object def add_module(self, name): imported_modules = self.local_storage.get("imported_modules") if self.check_imported(name): module_object = imported_modules[name] self.add_to_global(module_object) else: module_object = self.import_module(name) if module_object: if hasattr(module_object, "payload"): payload_name = module_object.payload['Value'] if payload_name: self.badges.print_process(f"Using default payload {payload_name}...") if self.payloads.check_exist(payload_name): if self.payloads.add_payload(name, payload_name): self.add_to_global(module_object) return self.badges.print_error("Invalid default payload!") return self.add_to_global(module_object) else: self.badges.print_error("Failed to select module from database!") def add_to_global(self, module_object): if self.check_current_module(): self.local_storage.add_array("current_module", '') self.local_storage.set("current_module_number", self.local_storage.get("current_module_number") + 1) self.local_storage.set_array("current_module", self.local_storage.get("current_module_number"), module_object) else: self.local_storage.set("current_module", []) self.local_storage.set("current_module_number", 0) self.local_storage.add_array("current_module", '') self.local_storage.set_array("current_module", self.local_storage.get("current_module_number"), module_object) def use_module(self, module): modules_shorts = self.local_storage.get("module_shorts") if modules_shorts: if module.isdigit(): module_number = int(module) if module_number in modules_shorts: module = modules_shorts[module_number] if not self.check_if_already_used(module): if self.check_exist(module): self.add_module(module) else: self.badges.print_error("Invalid module!") def go_back(self): if self.check_current_module(): self.local_storage.set("current_module_number", self.local_storage.get("current_module_number") - 1) self.local_storage.set("current_module", self.local_storage.get("current_module")[0:-1]) if not self.local_storage.get("current_module"): self.local_storage.set("current_module_number", 0) def entry_to_module(self, current_module): values = [] for option in current_module.options: opt = current_module.options[option] val = str(opt['Value']) if val.startswith('file:') and len(val) > 5: file = val[5:] with open(file, 'r') as f: vals = f.read().strip() values.append(vals.split('\n')) if not values: current_module.run() return if not all(len(value) == len(values[0]) for value in values): self.badges.print_error("All files should contain equal number of values!") return save = copy.deepcopy(current_module.options) for i in range(0, len(values[0])): count = 0 for option in current_module.options: opt = current_module.options[option] val = str(opt['Value']) if val.startswith('file:') and len(val) > 5: current_module.options[option]['Value'] = values[count][i] count += 1 try: current_module.run() except (KeyboardInterrupt, EOFError): pass current_module.options = save save = copy.deepcopy(current_module.options) def run_current_module(self): if self.check_current_module(): current_module = self.get_current_module_object() current_module_name = self.get_current_module_name() current_payload = self.payloads.get_current_payload() missed = "" if hasattr(current_module, "options"): for option in current_module.options: current_option = current_module.options[option] if not current_option['Value'] and current_option['Value'] != 0 and current_option['Required']: missed += option + ', ' if current_payload: if hasattr(current_payload, "options"): for option in current_payload.options: current_option = current_payload.options[option] if not current_option['Value'] and current_option['Value'] != 0 and current_option['Required']: missed += option + ', ' if len(missed) > 0: self.badges.print_error(f"These options are failed to validate: {missed[:-2]}!") else: try: if current_payload: hatvenom = HatVenom() payload_data = current_payload.run() payload_details = current_payload.details executable = 'raw' for executable_format in self.types.formats: if payload_details['Platform'] in self.types.formats[executable_format]: executable = executable_format break if isinstance(payload_data, tuple): raw = hatvenom.generate('raw', 'generic', payload_data[0], payload_data[1]) payload = hatvenom.generate( executable if payload_details['Architecture'] != 'generic' else 'raw', payload_details['Architecture'], payload_data[0], payload_data[1]) else: raw = hatvenom.generate('raw', 'generic', payload_data) payload = hatvenom.generate( executable if payload_details['Architecture'] != 'generic' else 'raw', payload_details['Architecture'], payload_data) current_module.payload['Details'] = payload_details current_module.payload['Raw'] = raw current_module.payload['Payload'] = payload self.badges.print_empty() self.entry_to_module(current_module) self.badges.print_success(f"{current_module_name.split("/")[0].title()} module completed!") except (KeyboardInterrupt, EOFError): self.badges.print_warning(f"{current_module_name.split("/")[0].title()} module interrupted.") except Exception as e: self.badges.print_error(f"An error occurred in module: {str(e)}!") self.badges.print_error(f"{current_module_name.split("/")[0].title()} module failed!") if current_payload: for key in ['Details', 'Raw', 'Payload']: if key in current_module.payload: del current_module.payload[key] else: self.badges.print_warning("No module selected.")
#!/usr/bin/env python3 # # MIT License # # Copyright (c) 2020-2021 EntySec # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os import copy from hatvenom import HatVenom from hatsploit.core.base.types import Types from hatsploit.core.cli.badges import Badges from hatsploit.core.db.importer import Importer from hatsploit.lib.payloads import Payloads from hatsploit.lib.sessions import Sessions from hatsploit.lib.storage import LocalStorage class Modules: types = Types() badges = Badges() sessions = Sessions() payloads = Payloads() local_storage = LocalStorage() importer = Importer() def check_exist(self, name): all_modules = self.local_storage.get("modules") if all_modules: for database in all_modules: modules = all_modules[database] if name in modules: return True return False def check_imported(self, name): imported_modules = self.local_storage.get("imported_modules") if imported_modules: if name in imported_modules: return True return False def check_current_module(self): if self.local_storage.get("current_module"): if len(self.local_storage.get("current_module")) > 0: return True return False def check_if_already_used(self, module): if self.check_current_module(): if module == self.get_current_module_name(): return True return False def get_module_object(self, name): if self.check_exist(name): database = self.get_database(name) return self.local_storage.get("modules")[database][name] return None def get_current_module_object(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")) return None def get_current_module_platform(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")).details['Platform'] return None def get_current_module_name(self): if self.check_current_module(): return self.local_storage.get_array("current_module", self.local_storage.get("current_module_number")).details['Module'] return None def get_database(self, name): all_modules = self.local_storage.get("modules") if all_modules: for database in all_modules: modules = all_modules[database] if name in modules: return database return None def compare_type(self, name, value, checker, module=True): value = str(value) if value.startswith('file:') and len(value) > 5 and module: file = value.split(':')[1] if not os.path.isfile(file): self.badges.print_error(f"Local file: {file}: does not exist!") return False with open(file, 'r') as f: for line in f.read().split('\n'): if line.strip(): if not checker(line.strip()): self.badges.print_error(f"File contains invalid value, expected valid {name}!") return False return True if not checker(value): self.badges.print_error(f"Invalid value, expected valid {name}!") return False return True def compare_session(self, value_type, value): session = value_type.lower().replace(' ', '') session = session.split('->') session_platforms = [] session_platform = self.get_current_module_platform() session_type = "shell" if len(session) == 2: if session[1].startswith('[') and session[1].endswith(']'): session_platforms = session[1][1:-1].split(',') else: session_type = session[1] elif len(session) == 3: if session[1].startswith('[') and session[1].endswith(']'): session_platforms = session[1][1:-1].split(',') else: session_type = session[1] if session[2].startswith('[') and session[2].endswith(']'): session_platforms = session[2][1:-1].split(',') else: session_type = session[2] if not session_platforms: if not self.sessions.check_exist(value, session_platform, session_type): self.badges.print_error("Invalid value, expected valid session!") return False else: session = 0 for platform in session_platforms: if self.sessions.check_exist(value, platform.strip(), session_type): session = 1 break if not session: self.badges.print_error("Invalid value, expected valid session!") return False return True def compare_types(self, value_type, value, module=True): if value_type and not value_type.lower == 'all': if value_type.lower() == 'mac': return self.compare_type("MAC", value, self.types.is_mac, module) if value_type.lower() == 'ip': return self.compare_type("IP", value, self.types.is_ip, module) if value_type.lower() == 'ipv4': return self.compare_type("IPv4", value, self.types.is_ipv4, module) if value_type.lower() == 'ipv6': return self.compare_type("IPv6", value, self.types.is_ipv6, module) if value_type.lower() == 'ipv4_range': return self.compare_type("IPv4 range", value, self.types.is_ipv4_range, module) if value_type.lower() == 'ipv6_range': return self.compare_type("IPv6 range", value, self.types.is_ipv6_range, module) if value_type.lower() == 'port': return self.compare_type("port", value, self.types.is_port, module) if value_type.lower() == 'port_range': return self.compare_type("port range", value, self.types.is_port_range, module) if value_type.lower() == 'number': return self.compare_type("number", value, self.types.is_number, module) if value_type.lower() == 'integer': return self.compare_type("integer", value, self.types.is_integer, module) if value_type.lower() == 'float': return self.compare_type("float", value, self.types.is_float, module) if value_type.lower() == 'boolean': return self.compare_type("boolean", value, self.types.is_boolean, module) if value_type.lower() == 'payload': current_module = self.get_current_module_object() module_name = self.get_current_module_name() module_payload = current_module.payload categories = module_payload['Categories'] types = module_payload['Types'] platforms = module_payload['Platforms'] architectures = module_payload['Architectures'] if self.payloads.check_module_compatible(value, categories, types, platforms, architectures): if self.payloads.add_payload(module_name, value): return True self.badges.print_error("Invalid valud, expected valid payload!") return False if 'session' in value_type.lower(): value = str(value) if value.startswith('file:') and len(value) > 5 and module: file = value.split(':')[1] if not os.path.isfile(file): self.badges.print_error(f"Local file: {file}: does not exist!") return False with open(file, 'r') as f: for line in f.read().split('\n'): if line.strip(): if not self.compare_session(value_type, line.strip()): self.badges.print_error(f"File contains invalid value, expected valid session!") return False return True return self.compare_session(value_type, value) return True def set_current_module_option(self, option, value): if self.check_current_module(): current_module = self.get_current_module_object() if not hasattr(current_module, "options") and not hasattr(current_module, "payload"): self.badges.print_warning("Module has no options.") return if hasattr(current_module, "options"): if option in current_module.options: value_type = current_module.options[option]['Type'] if value_type == 'payload': payloads_shorts = self.local_storage.get("payload_shorts") if payloads_shorts: if value.isdigit(): payload_number = int(value) if payload_number in payloads_shorts: value = payloads_shorts[payload_number] if self.compare_types(value_type, value): self.badges.print_information(option + " ==> " + value) if option.lower() == 'blinder': if value.lower() in ['y', 'yes']: current_module.payload['Value'] = None if value_type == 'payload': self.local_storage.set_module_payload( "current_module", self.local_storage.get("current_module_number"), value ) else: self.local_storage.set_module_option( "current_module", self.local_storage.get("current_module_number"), option, value ) return if hasattr(current_module, "payload"): current_payload = self.payloads.get_current_payload() if current_payload and hasattr(current_payload, "options"): if option in current_payload.options: value_type = current_payload.options[option]['Type'] if self.compare_types(value_type, value, False): self.badges.print_information(option + " ==> " + value) self.local_storage.set_payload_option(current_module.details['Module'], current_payload.details['Payload'], option, value) else: self.badges.print_error("Unrecognized option!") else: self.badges.print_error("Unrecognized option!") else: self.badges.print_error("Unrecognized option!") else: self.badges.print_warning("No module selected.") def import_module(self, name): modules = self.get_module_object(name) try: module_object = self.importer.import_module(modules['Path']) if not self.local_storage.get("imported_modules"): self.local_storage.set("imported_modules", {}) self.local_storage.update("imported_modules", {name: module_object}) except Exception: return None return module_object def add_module(self, name): imported_modules = self.local_storage.get("imported_modules") if self.check_imported(name): module_object = imported_modules[name] self.add_to_global(module_object) else: module_object = self.import_module(name) if module_object: if hasattr(module_object, "payload"): payload_name = module_object.payload['Value'] if payload_name: self.badges.print_process(f"Using default payload {payload_name}...") if self.payloads.check_exist(payload_name): if self.payloads.add_payload(name, payload_name): self.add_to_global(module_object) return self.badges.print_error("Invalid default payload!") return self.add_to_global(module_object) else: self.badges.print_error("Failed to select module from database!") def add_to_global(self, module_object): if self.check_current_module(): self.local_storage.add_array("current_module", '') self.local_storage.set("current_module_number", self.local_storage.get("current_module_number") + 1) self.local_storage.set_array("current_module", self.local_storage.get("current_module_number"), module_object) else: self.local_storage.set("current_module", []) self.local_storage.set("current_module_number", 0) self.local_storage.add_array("current_module", '') self.local_storage.set_array("current_module", self.local_storage.get("current_module_number"), module_object) def use_module(self, module): modules_shorts = self.local_storage.get("module_shorts") if modules_shorts: if module.isdigit(): module_number = int(module) if module_number in modules_shorts: module = modules_shorts[module_number] if not self.check_if_already_used(module): if self.check_exist(module): self.add_module(module) else: self.badges.print_error("Invalid module!") def go_back(self): if self.check_current_module(): self.local_storage.set("current_module_number", self.local_storage.get("current_module_number") - 1) self.local_storage.set("current_module", self.local_storage.get("current_module")[0:-1]) if not self.local_storage.get("current_module"): self.local_storage.set("current_module_number", 0) def entry_to_module(self, current_module): values = [] for option in current_module.options: opt = current_module.options[option] val = str(opt['Value']) if val.startswith('file:') and len(val) > 5: file = val[5:] with open(file, 'r') as f: vals = f.read().strip() values.append(vals.split('\n')) if not values: current_module.run() return if not all(len(value) == len(values[0]) for value in values): self.badges.print_error("All files should contain equal number of values!") return save = copy.deepcopy(current_module.options) for i in range(0, len(values[0])): count = 0 for option in current_module.options: opt = current_module.options[option] val = str(opt['Value']) if val.startswith('file:') and len(val) > 5: current_module.options[option]['Value'] = values[count][i] count += 1 try: current_module.run() except (KeyboardInterrupt, EOFError): pass current_module.options = save save = copy.deepcopy(current_module.options) def run_current_module(self): if self.check_current_module(): current_module = self.get_current_module_object() current_module_name = self.get_current_module_name() current_payload = self.payloads.get_current_payload() missed = "" if hasattr(current_module, "options"): for option in current_module.options: current_option = current_module.options[option] if not current_option['Value'] and current_option['Value'] != 0 and current_option['Required']: missed += option + ', ' if current_payload: if hasattr(current_payload, "options"): for option in current_payload.options: current_option = current_payload.options[option] if not current_option['Value'] and current_option['Value'] != 0 and current_option['Required']: missed += option + ', ' if len(missed) > 0: self.badges.print_error(f"These options are failed to validate: {missed[:-2]}!") else: try: if current_payload: hatvenom = HatVenom() payload_data = current_payload.run() payload_details = current_payload.details executable = 'raw' for executable_format in self.types.formats: if payload_details['Platform'] in self.types.formats[executable_format]: executable = executable_format break if isinstance(payload_data, tuple): raw = hatvenom.generate('raw', 'generic', payload_data[0], payload_data[1]) payload = hatvenom.generate( executable if payload_details['Architecture'] != 'generic' else 'raw', payload_details['Architecture'], payload_data[0], payload_data[1]) else: raw = hatvenom.generate('raw', 'generic', payload_data) payload = hatvenom.generate( executable if payload_details['Architecture'] != 'generic' else 'raw', payload_details['Architecture'], payload_data) current_module.payload['Details'] = payload_details current_module.payload['Raw'] = raw current_module.payload['Payload'] = payload self.badges.print_empty() self.entry_to_module(current_module) self.badges.print_success(f"{current_module_name.split('/')[0].title()} module completed!") except (KeyboardInterrupt, EOFError): self.badges.print_warning(f"{current_module_name.split('/')[0].title()} module interrupted.") except Exception as e: self.badges.print_error(f"An error occurred in module: {str(e)}!") self.badges.print_error(f"{current_module_name.split('/')[0].title()} module failed!") if current_payload: for key in ['Details', 'Raw', 'Payload']: if key in current_module.payload: del current_module.payload[key] else: self.badges.print_warning("No module selected.")
import copy import datetime import inspect import json import logging import traceback import warnings from collections import defaultdict, namedtuple from collections.abc import Hashable from typing import Any, Dict, Iterable, List, Optional, Set import pandas as pd from dateutil.parser import parse from tqdm.auto import tqdm from great_expectations import __version__ as ge_version from great_expectations.core.batch import Batch from great_expectations.core.expectation_configuration import ExpectationConfiguration from great_expectations.core.expectation_suite import ( ExpectationSuite, expectationSuiteSchema, ) from great_expectations.core.expectation_validation_result import ( ExpectationSuiteValidationResult, ExpectationValidationResult, ) from great_expectations.core.id_dict import BatchSpec from great_expectations.core.run_identifier import RunIdentifier from great_expectations.data_asset.util import recursively_convert_to_json_serializable from great_expectations.dataset import PandasDataset, SparkDFDataset, SqlAlchemyDataset from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference from great_expectations.exceptions import ( GreatExpectationsError, InvalidExpectationConfigurationError, ) from great_expectations.execution_engine import ( ExecutionEngine, PandasExecutionEngine, SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) from great_expectations.execution_engine.pandas_batch_data import PandasBatchData from great_expectations.expectations.registry import ( get_expectation_impl, get_metric_provider, list_registered_expectation_implementations, ) from great_expectations.marshmallow__shade import ValidationError from great_expectations.types import ClassConfig from great_expectations.util import load_class, verify_dynamic_loading_support from great_expectations.validator.validation_graph import ( MetricConfiguration, MetricEdge, ValidationGraph, ) logger = logging.getLogger(__name__) logging.captureWarnings(True) class Validator: def __init__( self, execution_engine, interactive_evaluation=True, expectation_suite=None, expectation_suite_name=None, data_context=None, batches=None, **kwargs, ): """ Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature. """ self._data_context = data_context self._execution_engine = execution_engine self._expose_dataframe_methods = False self._validator_config = {} if batches is None: batches = tuple() self._batches = dict() for batch in batches: assert isinstance( batch, Batch ), "batches provided to Validator must be Great Expectations Batch objects" self._execution_engine.load_batch_data(batch.id, batch.data) self._batches[batch.id] = batch if len(batches) > 1: logger.warning( f"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active " f"batch are {self.active_batch.batch_definition["batch_identifiers"].items()}" ) self.interactive_evaluation = interactive_evaluation self._initialize_expectations( expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name, ) self._default_expectation_args = { "include_config": True, "catch_exceptions": False, "result_format": "BASIC", } self._validator_config = {} # This special state variable tracks whether a validation run is going on, which will disable # saving expectation config objects self._active_validation = False if self._data_context and hasattr( self._data_context, "_expectation_explorer_manager" ): # TODO: verify flow of default expectation arguments self.set_default_expectation_argument("include_config", True) def __dir__(self): """ This custom magic method is used to enable expectation tab completion on Validator objects. It also allows users to call Pandas.DataFrame methods on Validator objects """ validator_attrs = set(super().__dir__()) class_expectation_impls = set(list_registered_expectation_implementations()) # execution_engine_expectation_impls = ( # { # attr_name # for attr_name in self.execution_engine.__dir__() # if attr_name.startswith("expect_") # } # if self.execution_engine # else set() # ) combined_dir = ( validator_attrs | class_expectation_impls # | execution_engine_expectation_impls ) if self._expose_dataframe_methods: combined_dir | set(dir(pd.DataFrame)) return list(combined_dir) @property def expose_dataframe_methods(self): return self._expose_dataframe_methods @expose_dataframe_methods.setter def expose_dataframe_methods(self, value: bool): self._expose_dataframe_methods = value def __getattr__(self, name): if name.startswith("expect_") and get_expectation_impl(name): return self.validate_expectation(name) elif ( self._expose_dataframe_methods and isinstance(self.active_batch.data, PandasBatchData) and hasattr(pd.DataFrame, name) ): return getattr(self.active_batch.data.dataframe, name) else: raise AttributeError( f"'{type(self).__name__}' object has no attribute '{name}'" ) def validate_expectation(self, name): """ Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's validate method to obtain a validation result. Also adds in the runtime configuration Args: name (str): The name of the Expectation being validated Returns: The Expectation's validation result """ def inst_expectation(*args, **kwargs): try: expectation_impl = get_expectation_impl(name) allowed_config_keys = expectation_impl.get_allowed_config_keys() expectation_kwargs = recursively_convert_to_json_serializable(kwargs) meta = None # This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing # positional arguments to expectation methods legacy_arg_names = expectation_impl.legacy_method_parameters.get( name, tuple() ) for idx, arg in enumerate(args): try: arg_name = legacy_arg_names[idx] if arg_name in allowed_config_keys: expectation_kwargs[arg_name] = arg if arg_name == "meta": meta = arg except IndexError: raise InvalidExpectationConfigurationError( f"Invalid positional argument: {arg}" ) # this is used so that exceptions are caught appropriately when they occur in expectation config basic_runtime_configuration = { k: v for k, v in kwargs.items() if k in ("result_format", "include_config", "catch_exceptions") } configuration = ExpectationConfiguration( expectation_type=name, kwargs=expectation_kwargs, meta=meta ) # runtime_configuration = configuration.get_runtime_kwargs() expectation = expectation_impl(configuration) """Given an implementation and a configuration for any Expectation, returns its validation result""" if not self.interactive_evaluation and not self._active_validation: validation_result = ExpectationValidationResult( expectation_config=copy.deepcopy(expectation.configuration) ) else: validation_result = expectation.validate( validator=self, evaluation_parameters=self._expectation_suite.evaluation_parameters, data_context=self._data_context, runtime_configuration=basic_runtime_configuration, ) # If validate has set active_validation to true, then we do not save the config to avoid # saving updating expectation configs to the same suite during validation runs if self._active_validation is True: stored_config = configuration.get_raw_configuration() else: # Append the expectation to the config. stored_config = self._expectation_suite.add_expectation( configuration.get_raw_configuration() ) # If there was no interactive evaluation, success will not have been computed. if validation_result.success is not None: # Add a "success" object to the config stored_config.success_on_last_run = validation_result.success if self._data_context is not None: validation_result = self._data_context.update_return_obj( self, validation_result ) except Exception as err: if basic_runtime_configuration.get("catch_exceptions"): raised_exception = True exception_traceback = traceback.format_exc() exception_message = "{}: {}".format(type(err).__name__, str(err)) validation_result = ExpectationValidationResult(success=False) validation_result.exception_info = { "raised_exception": raised_exception, "exception_message": exception_message, "exception_traceback": exception_traceback, } else: raise err return validation_result inst_expectation.__name__ = name return inst_expectation @property def execution_engine(self): """Returns the execution engine being used by the validator at the given time""" return self._execution_engine def list_available_expectation_types(self): """ Returns a list of all expectations available to the validator""" keys = dir(self) return [ expectation for expectation in keys if expectation.startswith("expect_") ] def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]: """Return a dictionary with the requested metrics""" graph = ValidationGraph() resolved_metrics = {} for metric_name, metric_configuration in metrics.items(): provider_cls, _ = get_metric_provider( metric_configuration.metric_name, self.execution_engine ) for key in provider_cls.domain_keys: if ( key not in metric_configuration.metric_domain_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_domain_kwargs[ key ] = provider_cls.default_kwarg_values[key] for key in provider_cls.value_keys: if ( key not in metric_configuration.metric_value_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_value_kwargs[ key ] = provider_cls.default_kwarg_values[key] self.build_metric_dependency_graph( graph, child_node=metric_configuration, configuration=None, execution_engine=self._execution_engine, runtime_configuration=None, ) self.resolve_validation_graph(graph, resolved_metrics) return { metric_name: resolved_metrics[metric_configuration.id] for (metric_name, metric_configuration) in metrics.items() } def get_metric(self, metric: MetricConfiguration) -> Any: """return the value of the requested metric.""" return self.get_metrics({"_": metric})["_"] def build_metric_dependency_graph( self, graph: ValidationGraph, child_node: MetricConfiguration, configuration: Optional[ExpectationConfiguration], execution_engine: "ExecutionEngine", parent_node: Optional[MetricConfiguration] = None, runtime_configuration: Optional[dict] = None, ) -> None: """Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph until all metrics have been added.""" # metric_kwargs = get_metric_kwargs(metric_name) metric_impl = get_metric_provider( child_node.metric_name, execution_engine=execution_engine )[0] metric_dependencies = metric_impl.get_evaluation_dependencies( metric=child_node, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) child_node.metric_dependencies = metric_dependencies if parent_node: graph.add( MetricEdge( parent_node, child_node, ) ) if len(metric_dependencies) == 0: graph.add( MetricEdge( child_node, None, ) ) else: for metric_dependency in metric_dependencies.values(): if metric_dependency.id == child_node.id: logger.warning( f"Metric {str(child_node.id)} has created a circular dependency" ) continue self.build_metric_dependency_graph( graph, metric_dependency, configuration, execution_engine, child_node, runtime_configuration=runtime_configuration, ) def graph_validate( self, configurations: List[ExpectationConfiguration], metrics: dict = None, runtime_configuration: dict = None, ) -> List[ExpectationValidationResult]: """Obtains validation dependencies for each metric using the implementation of their associated expectation, then proceeds to add these dependencies to the validation graph, supply readily available metric implementations to fulfill current metric requirements, and validate these metrics. Args: batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used for Expectation Validation. configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will be used to supply domain and values for metrics. execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics from the registry. metrics (dict): A list of currently registered metrics in the registry runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics such as the result_format. Returns: A list of Validations, validating that all necessary metrics are available. """ graph = ValidationGraph() if runtime_configuration is None: runtime_configuration = dict() if runtime_configuration.get("catch_exceptions", True): catch_exceptions = True else: catch_exceptions = False processed_configurations = [] evrs = [] for configuration in configurations: # Validating try: assert ( configuration.expectation_type is not None ), "Given configuration should include expectation type" except AssertionError as e: raise InvalidExpectationConfigurationError(str(e)) expectation_impl = get_expectation_impl(configuration.expectation_type) validation_dependencies = expectation_impl().get_validation_dependencies( configuration, self._execution_engine, runtime_configuration )["metrics"] try: for metric in validation_dependencies.values(): self.build_metric_dependency_graph( graph, metric, configuration, self._execution_engine, runtime_configuration=runtime_configuration, ) processed_configurations.append(configuration) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, ) evrs.append(result) else: raise err if metrics is None: metrics = dict() metrics = self.resolve_validation_graph(graph, metrics, runtime_configuration) for configuration in processed_configurations: try: result = configuration.metrics_validate( metrics, execution_engine=self._execution_engine, runtime_configuration=runtime_configuration, ) evrs.append(result) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, ) evrs.append(result) else: raise err return evrs def resolve_validation_graph(self, graph, metrics, runtime_configuration=None): done: bool = False pbar = None while not done: ready_metrics, needed_metrics = self._parse_validation_graph(graph, metrics) if pbar is None: pbar = tqdm( total=len(ready_metrics) + len(needed_metrics), desc="Calculating Metrics", disable=len(graph._edges) < 3, ) pbar.update(0) metrics.update( self._resolve_metrics( execution_engine=self._execution_engine, metrics_to_resolve=ready_metrics, metrics=metrics, runtime_configuration=runtime_configuration, ) ) pbar.update(len(ready_metrics)) if len(ready_metrics) + len(needed_metrics) == 0: done = True pbar.close() return metrics def _parse_validation_graph(self, validation_graph, metrics): """Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of validation graph (a graph structure of metric ids) edges""" unmet_dependency_ids = set() unmet_dependency = set() maybe_ready_ids = set() maybe_ready = set() for edge in validation_graph.edges: if edge.left.id not in metrics: if edge.right is None or edge.right.id in metrics: if edge.left.id not in maybe_ready_ids: maybe_ready_ids.add(edge.left.id) maybe_ready.add(edge.left) else: if edge.left.id not in unmet_dependency_ids: unmet_dependency_ids.add(edge.left.id) unmet_dependency.add(edge.left) return maybe_ready - unmet_dependency, unmet_dependency def _resolve_metrics( self, execution_engine: "ExecutionEngine", metrics_to_resolve: Iterable[MetricConfiguration], metrics: Dict, runtime_configuration: dict = None, ): """A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are resolved""" return execution_engine.resolve_metrics( metrics_to_resolve, metrics, runtime_configuration ) def _initialize_expectations( self, expectation_suite: ExpectationSuite = None, expectation_suite_name: str = None, ): """Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None """ # Checking type of expectation_suite. # Check for expectation_suite_name is already done by ExpectationSuiteIdentifier if expectation_suite and not isinstance(expectation_suite, ExpectationSuite): raise TypeError( "expectation_suite must be of type ExpectationSuite, not {}".format( type(expectation_suite) ) ) if expectation_suite is not None: if isinstance(expectation_suite, dict): expectation_suite = expectationSuiteSchema.load(expectation_suite) else: expectation_suite = copy.deepcopy(expectation_suite) self._expectation_suite = expectation_suite if expectation_suite_name is not None: if ( self._expectation_suite.expectation_suite_name != expectation_suite_name ): logger.warning( "Overriding existing expectation_suite_name {n1} with new name {n2}".format( n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name, ) ) self._expectation_suite.expectation_suite_name = expectation_suite_name else: if expectation_suite_name is None: expectation_suite_name = "default" self._expectation_suite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) self._expectation_suite.execution_engine_type = type( self.execution_engine ).__name__ def append_expectation(self, expectation_config): """This method is a thin wrapper for ExpectationSuite.append_expectation""" warnings.warn( "append_expectation is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.add_expectation instead.", DeprecationWarning, ) self._expectation_suite.append_expectation(expectation_config) def find_expectation_indexes( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[int]: """This method is a thin wrapper for ExpectationSuite.find_expectation_indexes""" warnings.warn( "find_expectation_indexes is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectation_indexes( expectation_configuration=expectation_configuration, match_type=match_type ) def find_expectations( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.find_expectations()""" warnings.warn( "find_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectations( expectation_configuration=expectation_configuration, match_type=match_type ) def remove_expectation( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.remove()""" warnings.warn( "DataAsset.remove_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.remove_expectation instead.", DeprecationWarning, ) return self._expectation_suite.remove_expectation( expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches, ) def set_config_value(self, key, value): """Setter for config value""" self._validator_config[key] = value def get_config_value(self, key): """Getter for config value""" return self._validator_config.get(key) def load_batch(self, batch_list: List[Batch]): for batch in batch_list: self._execution_engine.load_batch_data(batch.id, batch.data) self._batches[batch.id] = batch # We set the active_batch_id in each iteration of the loop to keep in sync with the active_batch_id for the # execution_engine. The final active_batch_id will be that of the final batch loaded. self.active_batch_id = batch.id return batch_list @property def batches(self) -> Dict[str, Batch]: """Getter for batches""" return self._batches @property def loaded_batch_ids(self) -> List[str]: return self.execution_engine.loaded_batch_data_ids @property def active_batch(self) -> Batch: """Getter for active batch""" active_batch_id: str = self.execution_engine.active_batch_data_id batch: Batch = self.batches.get(active_batch_id) if active_batch_id else None return batch @property def active_batch_spec(self) -> Optional[BatchSpec]: """Getter for active batch's batch_spec""" if not self.active_batch: return None else: return self.active_batch.batch_spec @property def active_batch_id(self) -> str: """Getter for active batch id""" return self.execution_engine.active_batch_data_id @active_batch_id.setter def active_batch_id(self, batch_id: str): assert set(self.batches.keys()).issubset(set(self.loaded_batch_ids)) available_batch_ids: Set[str] = set(self.batches.keys()).union( set(self.loaded_batch_ids) ) if batch_id not in available_batch_ids: raise ValueError( f"""batch_id {batch_id} not found in loaded batches. Batches must first be loaded before they can be \ set as active. """ ) else: self.execution_engine._active_batch_data_id = batch_id @property def active_batch_markers(self): """Getter for active batch's batch markers""" if not self.active_batch: return None else: return self.active_batch.batch_markers @property def active_batch_definition(self): """Getter for the active batch's batch definition""" if not self.active_batch: return None else: return self.active_batch.batch_definition def discard_failing_expectations(self): """Removes any expectations from the validator where the validation has failed""" res = self.validate(only_return_failures=True).results if any(res): for item in res: self.remove_expectation( expectation_configuration=item.expectation_config, match_type="runtime", ) warnings.warn("Removed %s expectations that were 'False'" % len(res)) def get_default_expectation_arguments(self): """Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments """ return self._default_expectation_args @property def default_expectation_args(self): """A getter for default Expectation arguments""" return self._default_expectation_args def set_default_expectation_argument(self, argument, value): """ Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments """ self._default_expectation_args[argument] = value def get_expectations_config( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """ Returns an expectation configuration, providing an option to discard failed expectation and discard/ include' different result aspects, such as exceptions and result format. """ warnings.warn( "get_expectations_config is deprecated, and will be removed in a future release. " + "Please use get_expectation_suite instead.", DeprecationWarning, ) return self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) def get_expectation_suite( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, ): """Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object. """ expectation_suite = copy.deepcopy(self._expectation_suite) expectations = expectation_suite.expectations discards = defaultdict(int) if discard_failed_expectations: new_expectations = [] for expectation in expectations: # Note: This is conservative logic. # Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. # In cases where expectation.success is missing or None, expectations are *retained*. # Such a case could occur if expectations were loaded from a config file and never run. if expectation.success_on_last_run is False: discards["failed_expectations"] += 1 else: new_expectations.append(expectation) expectations = new_expectations message = "\t%d expectation(s) included in expectation_suite." % len( expectations ) if discards["failed_expectations"] > 0 and not suppress_warnings: message += ( " Omitting %d expectation(s) that failed when last run; set " "discard_failed_expectations=False to include them." % discards["failed_expectations"] ) for expectation in expectations: # FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, # which calls _copy_and_clean_up_expectation expectation.success_on_last_run = None if discard_result_format_kwargs: if "result_format" in expectation.kwargs: del expectation.kwargs["result_format"] discards["result_format"] += 1 if discard_include_config_kwargs: if "include_config" in expectation.kwargs: del expectation.kwargs["include_config"] discards["include_config"] += 1 if discard_catch_exceptions_kwargs: if "catch_exceptions" in expectation.kwargs: del expectation.kwargs["catch_exceptions"] discards["catch_exceptions"] += 1 settings_message = "" if discards["result_format"] > 0 and not suppress_warnings: settings_message += " result_format" if discards["include_config"] > 0 and not suppress_warnings: settings_message += " include_config" if discards["catch_exceptions"] > 0 and not suppress_warnings: settings_message += " catch_exceptions" if ( len(settings_message) > 1 ): # Only add this if we added one of the settings above. settings_message += " settings filtered." expectation_suite.expectations = expectations if not suppress_logging: logger.info(message + settings_message) return expectation_suite def save_expectation_suite( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed. """ expectation_suite = self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) if filepath is None and self._data_context is not None: self._data_context.save_expectation_suite(expectation_suite) elif filepath is not None: with open(filepath, "w") as outfile: json.dump( expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True, ) else: raise ValueError( "Unable to save config: filepath or data_context must be available." ) def validate( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, ): """Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError """ try: validation_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) assert not (run_id and run_name) and not ( run_id and run_time ), "Please provide either a run_id or run_name and/or run_time." if isinstance(run_id, str) and not run_name: warnings.warn( "String run_ids will be deprecated in the future. Please provide a run_id of type " "RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name " "and run_time (both optional). Instead of providing a run_id, you may also provide" "run_name and run_time separately.", DeprecationWarning, ) try: run_time = parse(run_id) except (ValueError, TypeError): pass run_id = RunIdentifier(run_name=run_id, run_time=run_time) elif isinstance(run_id, dict): run_id = RunIdentifier(**run_id) elif not isinstance(run_id, RunIdentifier): run_id = RunIdentifier(run_name=run_name, run_time=run_time) self._active_validation = True if result_format is None: result_format = {"result_format": "BASIC"} # If a different validation data context was provided, override validate__data_context = self._data_context if data_context is None and self._data_context is not None: data_context = self._data_context elif data_context is not None: # temporarily set self._data_context so it is used inside the expectation decorator self._data_context = data_context if expectation_suite is None: expectation_suite = self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False, ) elif isinstance(expectation_suite, str): try: with open(expectation_suite) as infile: expectation_suite = expectationSuiteSchema.loads(infile.read()) except ValidationError: raise except OSError: raise GreatExpectationsError( "Unable to load expectation suite: IO error while reading %s" % expectation_suite ) elif not isinstance(expectation_suite, ExpectationSuite): logger.error( "Unable to validate using the provided value for expectation suite; does it need to be " "loaded from a dictionary?" ) if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info( self ), success=False, ) return ExpectationValidationResult(success=False) # Evaluation parameter priority is # 1. from provided parameters # 2. from expectation configuration # 3. from data context # So, we load them in reverse order if data_context is not None: runtime_evaluation_parameters = ( data_context.evaluation_parameter_store.get_bind_params(run_id) ) else: runtime_evaluation_parameters = {} if expectation_suite.evaluation_parameters: runtime_evaluation_parameters.update( expectation_suite.evaluation_parameters ) if evaluation_parameters is not None: runtime_evaluation_parameters.update(evaluation_parameters) # Convert evaluation parameters to be json-serializable runtime_evaluation_parameters = recursively_convert_to_json_serializable( runtime_evaluation_parameters ) # Warn if our version is different from the version in the configuration # TODO: Deprecate "great_expectations.__version__" suite_ge_version = expectation_suite.meta.get( "great_expectations_version" ) or expectation_suite.meta.get("great_expectations.__version__") # Group expectations by column columns = {} for expectation in expectation_suite.expectations: expectation.process_evaluation_parameters( evaluation_parameters=runtime_evaluation_parameters, interactive_evaluation=self.interactive_evaluation, data_context=self._data_context, ) if "column" in expectation.kwargs and isinstance( expectation.kwargs["column"], Hashable ): column = expectation.kwargs["column"] else: column = "_nocolumn" if column not in columns: columns[column] = [] columns[column].append(expectation) expectations_to_evaluate = [] for col in columns: expectations_to_evaluate.extend(columns[col]) results = self.graph_validate( expectations_to_evaluate, runtime_configuration={ "catch_exceptions": catch_exceptions, "result_format": result_format, }, ) statistics = _calc_validation_statistics(results) if only_return_failures: abbrev_results = [] for exp in results: if not exp.success: abbrev_results.append(exp) results = abbrev_results expectation_suite_name = expectation_suite.expectation_suite_name result = ExpectationSuiteValidationResult( results=results, success=statistics.success, statistics={ "evaluated_expectations": statistics.evaluated_expectations, "successful_expectations": statistics.successful_expectations, "unsuccessful_expectations": statistics.unsuccessful_expectations, "success_percent": statistics.success_percent, }, evaluation_parameters=runtime_evaluation_parameters, meta={ "great_expectations_version": ge_version, "expectation_suite_name": expectation_suite_name, "run_id": run_id, "batch_spec": self.active_batch_spec, "batch_markers": self.active_batch_markers, "active_batch_definition": self.active_batch_definition, "validation_time": validation_time, }, ) self._data_context = validate__data_context except Exception as e: if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False, ) raise finally: self._active_validation = False if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True, ) return result def get_evaluation_parameter(self, parameter_name, default_value=None): """ Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter. """ if parameter_name in self._expectation_suite.evaluation_parameters: return self._expectation_suite.evaluation_parameters[parameter_name] else: return default_value def set_evaluation_parameter(self, parameter_name, parameter_value): """ Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used """ self._expectation_suite.evaluation_parameters.update( {parameter_name: parameter_value} ) def add_citation( self, comment, batch_spec=None, batch_markers=None, batch_definition=None, citation_date=None, ): """Adds a citation to an existing Expectation Suite within the validator""" if batch_spec is None: batch_spec = self.batch_spec if batch_markers is None: batch_markers = self.active_batch_markers if batch_definition is None: batch_definition = self.active_batch_definition self._expectation_suite.add_citation( comment, batch_spec=batch_spec, batch_markers=batch_markers, batch_definition=batch_definition, citation_date=citation_date, ) @property def expectation_suite_name(self): """Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.""" return self._expectation_suite.expectation_suite_name @expectation_suite_name.setter def expectation_suite_name(self, expectation_suite_name): """Sets the expectation_suite name of this data_asset as stored in the expectations configuration.""" self._expectation_suite.expectation_suite_name = expectation_suite_name def test_expectation_function(self, function, *args, **kwargs): """Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information. """ argspec = inspect.getfullargspec(function)[0][1:] new_function = self.expectation(argspec)(function) return new_function(self, *args, **kwargs) def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]: if domain_kwargs is None: domain_kwargs = { "batch_id": self.execution_engine.active_batch_data_id, } columns: List[str] = self.get_metric( metric=MetricConfiguration( metric_name="table.columns", metric_domain_kwargs=domain_kwargs, ) ) return columns def head( self, n_rows: Optional[int] = 5, domain_kwargs: Optional[Dict[str, Any]] = None, fetch_all: Optional[bool] = False, ) -> pd.DataFrame: if domain_kwargs is None: domain_kwargs = { "batch_id": self.execution_engine.active_batch_data_id, } data: Any = self.get_metric( metric=MetricConfiguration( metric_name="table.head", metric_domain_kwargs=domain_kwargs, metric_value_kwargs={ "n_rows": n_rows, "fetch_all": fetch_all, }, ) ) df: pd.DataFrame if isinstance( self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine) ): df = pd.DataFrame(data=data) elif isinstance(self.execution_engine, SparkDFExecutionEngine): rows: List[Dict[str, Any]] = [datum.asDict() for datum in data] df = pd.DataFrame(data=rows) else: raise GreatExpectationsError( "Unsupported or unknown ExecutionEngine type encountered in Validator class." ) return df.reset_index(drop=True, inplace=False) ValidationStatistics = namedtuple( "ValidationStatistics", [ "evaluated_expectations", "successful_expectations", "unsuccessful_expectations", "success_percent", "success", ], ) def _calc_validation_statistics(validation_results): """ Calculate summary statistics for the validation results and return ``ExpectationStatistics``. """ # calc stats successful_expectations = sum(exp.success for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: # success_percent = float("nan") success_percent = None return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, ) class BridgeValidator: """This is currently helping bridge APIs""" def __init__(self, batch, expectation_suite, expectation_engine=None, **kwargs): """Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.) Args: batch (Batch): A Batch in Pandas, Spark, or SQL format expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data Context expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is determined by the type of data within the given batch """ self.batch = batch self.expectation_suite = expectation_suite if isinstance(expectation_engine, dict): expectation_engine = ClassConfig(**expectation_engine) if isinstance(expectation_engine, ClassConfig): module_name = expectation_engine.module_name or "great_expectations.dataset" verify_dynamic_loading_support(module_name=module_name) expectation_engine = load_class( class_name=expectation_engine.class_name, module_name=module_name ) self.expectation_engine = expectation_engine if self.expectation_engine is None: # Guess the engine try: import pandas as pd if isinstance(batch.data, pd.DataFrame): self.expectation_engine = PandasDataset except ImportError: pass if self.expectation_engine is None: if isinstance(batch.data, SqlAlchemyBatchReference): self.expectation_engine = SqlAlchemyDataset if self.expectation_engine is None: try: import pyspark if isinstance(batch.data, pyspark.sql.DataFrame): self.expectation_engine = SparkDFDataset except ImportError: pass if self.expectation_engine is None: raise ValueError( "Unable to identify expectation_engine. It must be a subclass of DataAsset." ) self.init_kwargs = kwargs def get_dataset(self): """ Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data) """ if issubclass(self.expectation_engine, PandasDataset): import pandas as pd if not isinstance(self.batch["data"], pd.DataFrame): raise ValueError( "PandasDataset expectation_engine requires a Pandas Dataframe for its batch" ) return self.expectation_engine( self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SqlAlchemyDataset): if not isinstance(self.batch.data, SqlAlchemyBatchReference): raise ValueError( "SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch" ) init_kwargs = self.batch.data.get_init_kwargs() init_kwargs.update(self.init_kwargs) return self.expectation_engine( batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, expectation_suite=self.expectation_suite, **init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SparkDFDataset): import pyspark if not isinstance(self.batch.data, pyspark.sql.DataFrame): raise ValueError( "SparkDFDataset expectation_engine requires a spark DataFrame for its batch" ) return self.expectation_engine( spark_df=self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), )
import copy import datetime import inspect import json import logging import traceback import warnings from collections import defaultdict, namedtuple from collections.abc import Hashable from typing import Any, Dict, Iterable, List, Optional, Set import pandas as pd from dateutil.parser import parse from tqdm.auto import tqdm from great_expectations import __version__ as ge_version from great_expectations.core.batch import Batch from great_expectations.core.expectation_configuration import ExpectationConfiguration from great_expectations.core.expectation_suite import ( ExpectationSuite, expectationSuiteSchema, ) from great_expectations.core.expectation_validation_result import ( ExpectationSuiteValidationResult, ExpectationValidationResult, ) from great_expectations.core.id_dict import BatchSpec from great_expectations.core.run_identifier import RunIdentifier from great_expectations.data_asset.util import recursively_convert_to_json_serializable from great_expectations.dataset import PandasDataset, SparkDFDataset, SqlAlchemyDataset from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference from great_expectations.exceptions import ( GreatExpectationsError, InvalidExpectationConfigurationError, ) from great_expectations.execution_engine import ( ExecutionEngine, PandasExecutionEngine, SparkDFExecutionEngine, SqlAlchemyExecutionEngine, ) from great_expectations.execution_engine.pandas_batch_data import PandasBatchData from great_expectations.expectations.registry import ( get_expectation_impl, get_metric_provider, list_registered_expectation_implementations, ) from great_expectations.marshmallow__shade import ValidationError from great_expectations.types import ClassConfig from great_expectations.util import load_class, verify_dynamic_loading_support from great_expectations.validator.validation_graph import ( MetricConfiguration, MetricEdge, ValidationGraph, ) logger = logging.getLogger(__name__) logging.captureWarnings(True) class Validator: def __init__( self, execution_engine, interactive_evaluation=True, expectation_suite=None, expectation_suite_name=None, data_context=None, batches=None, **kwargs, ): """ Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature. """ self._data_context = data_context self._execution_engine = execution_engine self._expose_dataframe_methods = False self._validator_config = {} if batches is None: batches = tuple() self._batches = dict() for batch in batches: assert isinstance( batch, Batch ), "batches provided to Validator must be Great Expectations Batch objects" self._execution_engine.load_batch_data(batch.id, batch.data) self._batches[batch.id] = batch if len(batches) > 1: logger.warning( f"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active " f"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}" ) self.interactive_evaluation = interactive_evaluation self._initialize_expectations( expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name, ) self._default_expectation_args = { "include_config": True, "catch_exceptions": False, "result_format": "BASIC", } self._validator_config = {} # This special state variable tracks whether a validation run is going on, which will disable # saving expectation config objects self._active_validation = False if self._data_context and hasattr( self._data_context, "_expectation_explorer_manager" ): # TODO: verify flow of default expectation arguments self.set_default_expectation_argument("include_config", True) def __dir__(self): """ This custom magic method is used to enable expectation tab completion on Validator objects. It also allows users to call Pandas.DataFrame methods on Validator objects """ validator_attrs = set(super().__dir__()) class_expectation_impls = set(list_registered_expectation_implementations()) # execution_engine_expectation_impls = ( # { # attr_name # for attr_name in self.execution_engine.__dir__() # if attr_name.startswith("expect_") # } # if self.execution_engine # else set() # ) combined_dir = ( validator_attrs | class_expectation_impls # | execution_engine_expectation_impls ) if self._expose_dataframe_methods: combined_dir | set(dir(pd.DataFrame)) return list(combined_dir) @property def expose_dataframe_methods(self): return self._expose_dataframe_methods @expose_dataframe_methods.setter def expose_dataframe_methods(self, value: bool): self._expose_dataframe_methods = value def __getattr__(self, name): if name.startswith("expect_") and get_expectation_impl(name): return self.validate_expectation(name) elif ( self._expose_dataframe_methods and isinstance(self.active_batch.data, PandasBatchData) and hasattr(pd.DataFrame, name) ): return getattr(self.active_batch.data.dataframe, name) else: raise AttributeError( f"'{type(self).__name__}' object has no attribute '{name}'" ) def validate_expectation(self, name): """ Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's validate method to obtain a validation result. Also adds in the runtime configuration Args: name (str): The name of the Expectation being validated Returns: The Expectation's validation result """ def inst_expectation(*args, **kwargs): try: expectation_impl = get_expectation_impl(name) allowed_config_keys = expectation_impl.get_allowed_config_keys() expectation_kwargs = recursively_convert_to_json_serializable(kwargs) meta = None # This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing # positional arguments to expectation methods legacy_arg_names = expectation_impl.legacy_method_parameters.get( name, tuple() ) for idx, arg in enumerate(args): try: arg_name = legacy_arg_names[idx] if arg_name in allowed_config_keys: expectation_kwargs[arg_name] = arg if arg_name == "meta": meta = arg except IndexError: raise InvalidExpectationConfigurationError( f"Invalid positional argument: {arg}" ) # this is used so that exceptions are caught appropriately when they occur in expectation config basic_runtime_configuration = { k: v for k, v in kwargs.items() if k in ("result_format", "include_config", "catch_exceptions") } configuration = ExpectationConfiguration( expectation_type=name, kwargs=expectation_kwargs, meta=meta ) # runtime_configuration = configuration.get_runtime_kwargs() expectation = expectation_impl(configuration) """Given an implementation and a configuration for any Expectation, returns its validation result""" if not self.interactive_evaluation and not self._active_validation: validation_result = ExpectationValidationResult( expectation_config=copy.deepcopy(expectation.configuration) ) else: validation_result = expectation.validate( validator=self, evaluation_parameters=self._expectation_suite.evaluation_parameters, data_context=self._data_context, runtime_configuration=basic_runtime_configuration, ) # If validate has set active_validation to true, then we do not save the config to avoid # saving updating expectation configs to the same suite during validation runs if self._active_validation is True: stored_config = configuration.get_raw_configuration() else: # Append the expectation to the config. stored_config = self._expectation_suite.add_expectation( configuration.get_raw_configuration() ) # If there was no interactive evaluation, success will not have been computed. if validation_result.success is not None: # Add a "success" object to the config stored_config.success_on_last_run = validation_result.success if self._data_context is not None: validation_result = self._data_context.update_return_obj( self, validation_result ) except Exception as err: if basic_runtime_configuration.get("catch_exceptions"): raised_exception = True exception_traceback = traceback.format_exc() exception_message = "{}: {}".format(type(err).__name__, str(err)) validation_result = ExpectationValidationResult(success=False) validation_result.exception_info = { "raised_exception": raised_exception, "exception_message": exception_message, "exception_traceback": exception_traceback, } else: raise err return validation_result inst_expectation.__name__ = name return inst_expectation @property def execution_engine(self): """Returns the execution engine being used by the validator at the given time""" return self._execution_engine def list_available_expectation_types(self): """ Returns a list of all expectations available to the validator""" keys = dir(self) return [ expectation for expectation in keys if expectation.startswith("expect_") ] def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]: """Return a dictionary with the requested metrics""" graph = ValidationGraph() resolved_metrics = {} for metric_name, metric_configuration in metrics.items(): provider_cls, _ = get_metric_provider( metric_configuration.metric_name, self.execution_engine ) for key in provider_cls.domain_keys: if ( key not in metric_configuration.metric_domain_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_domain_kwargs[ key ] = provider_cls.default_kwarg_values[key] for key in provider_cls.value_keys: if ( key not in metric_configuration.metric_value_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_value_kwargs[ key ] = provider_cls.default_kwarg_values[key] self.build_metric_dependency_graph( graph, child_node=metric_configuration, configuration=None, execution_engine=self._execution_engine, runtime_configuration=None, ) self.resolve_validation_graph(graph, resolved_metrics) return { metric_name: resolved_metrics[metric_configuration.id] for (metric_name, metric_configuration) in metrics.items() } def get_metric(self, metric: MetricConfiguration) -> Any: """return the value of the requested metric.""" return self.get_metrics({"_": metric})["_"] def build_metric_dependency_graph( self, graph: ValidationGraph, child_node: MetricConfiguration, configuration: Optional[ExpectationConfiguration], execution_engine: "ExecutionEngine", parent_node: Optional[MetricConfiguration] = None, runtime_configuration: Optional[dict] = None, ) -> None: """Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph until all metrics have been added.""" # metric_kwargs = get_metric_kwargs(metric_name) metric_impl = get_metric_provider( child_node.metric_name, execution_engine=execution_engine )[0] metric_dependencies = metric_impl.get_evaluation_dependencies( metric=child_node, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) child_node.metric_dependencies = metric_dependencies if parent_node: graph.add( MetricEdge( parent_node, child_node, ) ) if len(metric_dependencies) == 0: graph.add( MetricEdge( child_node, None, ) ) else: for metric_dependency in metric_dependencies.values(): if metric_dependency.id == child_node.id: logger.warning( f"Metric {str(child_node.id)} has created a circular dependency" ) continue self.build_metric_dependency_graph( graph, metric_dependency, configuration, execution_engine, child_node, runtime_configuration=runtime_configuration, ) def graph_validate( self, configurations: List[ExpectationConfiguration], metrics: dict = None, runtime_configuration: dict = None, ) -> List[ExpectationValidationResult]: """Obtains validation dependencies for each metric using the implementation of their associated expectation, then proceeds to add these dependencies to the validation graph, supply readily available metric implementations to fulfill current metric requirements, and validate these metrics. Args: batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used for Expectation Validation. configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will be used to supply domain and values for metrics. execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics from the registry. metrics (dict): A list of currently registered metrics in the registry runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics such as the result_format. Returns: A list of Validations, validating that all necessary metrics are available. """ graph = ValidationGraph() if runtime_configuration is None: runtime_configuration = dict() if runtime_configuration.get("catch_exceptions", True): catch_exceptions = True else: catch_exceptions = False processed_configurations = [] evrs = [] for configuration in configurations: # Validating try: assert ( configuration.expectation_type is not None ), "Given configuration should include expectation type" except AssertionError as e: raise InvalidExpectationConfigurationError(str(e)) expectation_impl = get_expectation_impl(configuration.expectation_type) validation_dependencies = expectation_impl().get_validation_dependencies( configuration, self._execution_engine, runtime_configuration )["metrics"] try: for metric in validation_dependencies.values(): self.build_metric_dependency_graph( graph, metric, configuration, self._execution_engine, runtime_configuration=runtime_configuration, ) processed_configurations.append(configuration) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, ) evrs.append(result) else: raise err if metrics is None: metrics = dict() metrics = self.resolve_validation_graph(graph, metrics, runtime_configuration) for configuration in processed_configurations: try: result = configuration.metrics_validate( metrics, execution_engine=self._execution_engine, runtime_configuration=runtime_configuration, ) evrs.append(result) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, ) evrs.append(result) else: raise err return evrs def resolve_validation_graph(self, graph, metrics, runtime_configuration=None): done: bool = False pbar = None while not done: ready_metrics, needed_metrics = self._parse_validation_graph(graph, metrics) if pbar is None: pbar = tqdm( total=len(ready_metrics) + len(needed_metrics), desc="Calculating Metrics", disable=len(graph._edges) < 3, ) pbar.update(0) metrics.update( self._resolve_metrics( execution_engine=self._execution_engine, metrics_to_resolve=ready_metrics, metrics=metrics, runtime_configuration=runtime_configuration, ) ) pbar.update(len(ready_metrics)) if len(ready_metrics) + len(needed_metrics) == 0: done = True pbar.close() return metrics def _parse_validation_graph(self, validation_graph, metrics): """Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of validation graph (a graph structure of metric ids) edges""" unmet_dependency_ids = set() unmet_dependency = set() maybe_ready_ids = set() maybe_ready = set() for edge in validation_graph.edges: if edge.left.id not in metrics: if edge.right is None or edge.right.id in metrics: if edge.left.id not in maybe_ready_ids: maybe_ready_ids.add(edge.left.id) maybe_ready.add(edge.left) else: if edge.left.id not in unmet_dependency_ids: unmet_dependency_ids.add(edge.left.id) unmet_dependency.add(edge.left) return maybe_ready - unmet_dependency, unmet_dependency def _resolve_metrics( self, execution_engine: "ExecutionEngine", metrics_to_resolve: Iterable[MetricConfiguration], metrics: Dict, runtime_configuration: dict = None, ): """A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are resolved""" return execution_engine.resolve_metrics( metrics_to_resolve, metrics, runtime_configuration ) def _initialize_expectations( self, expectation_suite: ExpectationSuite = None, expectation_suite_name: str = None, ): """Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None """ # Checking type of expectation_suite. # Check for expectation_suite_name is already done by ExpectationSuiteIdentifier if expectation_suite and not isinstance(expectation_suite, ExpectationSuite): raise TypeError( "expectation_suite must be of type ExpectationSuite, not {}".format( type(expectation_suite) ) ) if expectation_suite is not None: if isinstance(expectation_suite, dict): expectation_suite = expectationSuiteSchema.load(expectation_suite) else: expectation_suite = copy.deepcopy(expectation_suite) self._expectation_suite = expectation_suite if expectation_suite_name is not None: if ( self._expectation_suite.expectation_suite_name != expectation_suite_name ): logger.warning( "Overriding existing expectation_suite_name {n1} with new name {n2}".format( n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name, ) ) self._expectation_suite.expectation_suite_name = expectation_suite_name else: if expectation_suite_name is None: expectation_suite_name = "default" self._expectation_suite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) self._expectation_suite.execution_engine_type = type( self.execution_engine ).__name__ def append_expectation(self, expectation_config): """This method is a thin wrapper for ExpectationSuite.append_expectation""" warnings.warn( "append_expectation is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.add_expectation instead.", DeprecationWarning, ) self._expectation_suite.append_expectation(expectation_config) def find_expectation_indexes( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[int]: """This method is a thin wrapper for ExpectationSuite.find_expectation_indexes""" warnings.warn( "find_expectation_indexes is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectation_indexes( expectation_configuration=expectation_configuration, match_type=match_type ) def find_expectations( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.find_expectations()""" warnings.warn( "find_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectations( expectation_configuration=expectation_configuration, match_type=match_type ) def remove_expectation( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.remove()""" warnings.warn( "DataAsset.remove_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.remove_expectation instead.", DeprecationWarning, ) return self._expectation_suite.remove_expectation( expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches, ) def set_config_value(self, key, value): """Setter for config value""" self._validator_config[key] = value def get_config_value(self, key): """Getter for config value""" return self._validator_config.get(key) def load_batch(self, batch_list: List[Batch]): for batch in batch_list: self._execution_engine.load_batch_data(batch.id, batch.data) self._batches[batch.id] = batch # We set the active_batch_id in each iteration of the loop to keep in sync with the active_batch_id for the # execution_engine. The final active_batch_id will be that of the final batch loaded. self.active_batch_id = batch.id return batch_list @property def batches(self) -> Dict[str, Batch]: """Getter for batches""" return self._batches @property def loaded_batch_ids(self) -> List[str]: return self.execution_engine.loaded_batch_data_ids @property def active_batch(self) -> Batch: """Getter for active batch""" active_batch_id: str = self.execution_engine.active_batch_data_id batch: Batch = self.batches.get(active_batch_id) if active_batch_id else None return batch @property def active_batch_spec(self) -> Optional[BatchSpec]: """Getter for active batch's batch_spec""" if not self.active_batch: return None else: return self.active_batch.batch_spec @property def active_batch_id(self) -> str: """Getter for active batch id""" return self.execution_engine.active_batch_data_id @active_batch_id.setter def active_batch_id(self, batch_id: str): assert set(self.batches.keys()).issubset(set(self.loaded_batch_ids)) available_batch_ids: Set[str] = set(self.batches.keys()).union( set(self.loaded_batch_ids) ) if batch_id not in available_batch_ids: raise ValueError( f"""batch_id {batch_id} not found in loaded batches. Batches must first be loaded before they can be \ set as active. """ ) else: self.execution_engine._active_batch_data_id = batch_id @property def active_batch_markers(self): """Getter for active batch's batch markers""" if not self.active_batch: return None else: return self.active_batch.batch_markers @property def active_batch_definition(self): """Getter for the active batch's batch definition""" if not self.active_batch: return None else: return self.active_batch.batch_definition def discard_failing_expectations(self): """Removes any expectations from the validator where the validation has failed""" res = self.validate(only_return_failures=True).results if any(res): for item in res: self.remove_expectation( expectation_configuration=item.expectation_config, match_type="runtime", ) warnings.warn("Removed %s expectations that were 'False'" % len(res)) def get_default_expectation_arguments(self): """Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments """ return self._default_expectation_args @property def default_expectation_args(self): """A getter for default Expectation arguments""" return self._default_expectation_args def set_default_expectation_argument(self, argument, value): """ Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments """ self._default_expectation_args[argument] = value def get_expectations_config( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """ Returns an expectation configuration, providing an option to discard failed expectation and discard/ include' different result aspects, such as exceptions and result format. """ warnings.warn( "get_expectations_config is deprecated, and will be removed in a future release. " + "Please use get_expectation_suite instead.", DeprecationWarning, ) return self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) def get_expectation_suite( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, ): """Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object. """ expectation_suite = copy.deepcopy(self._expectation_suite) expectations = expectation_suite.expectations discards = defaultdict(int) if discard_failed_expectations: new_expectations = [] for expectation in expectations: # Note: This is conservative logic. # Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. # In cases where expectation.success is missing or None, expectations are *retained*. # Such a case could occur if expectations were loaded from a config file and never run. if expectation.success_on_last_run is False: discards["failed_expectations"] += 1 else: new_expectations.append(expectation) expectations = new_expectations message = "\t%d expectation(s) included in expectation_suite." % len( expectations ) if discards["failed_expectations"] > 0 and not suppress_warnings: message += ( " Omitting %d expectation(s) that failed when last run; set " "discard_failed_expectations=False to include them." % discards["failed_expectations"] ) for expectation in expectations: # FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, # which calls _copy_and_clean_up_expectation expectation.success_on_last_run = None if discard_result_format_kwargs: if "result_format" in expectation.kwargs: del expectation.kwargs["result_format"] discards["result_format"] += 1 if discard_include_config_kwargs: if "include_config" in expectation.kwargs: del expectation.kwargs["include_config"] discards["include_config"] += 1 if discard_catch_exceptions_kwargs: if "catch_exceptions" in expectation.kwargs: del expectation.kwargs["catch_exceptions"] discards["catch_exceptions"] += 1 settings_message = "" if discards["result_format"] > 0 and not suppress_warnings: settings_message += " result_format" if discards["include_config"] > 0 and not suppress_warnings: settings_message += " include_config" if discards["catch_exceptions"] > 0 and not suppress_warnings: settings_message += " catch_exceptions" if ( len(settings_message) > 1 ): # Only add this if we added one of the settings above. settings_message += " settings filtered." expectation_suite.expectations = expectations if not suppress_logging: logger.info(message + settings_message) return expectation_suite def save_expectation_suite( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed. """ expectation_suite = self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) if filepath is None and self._data_context is not None: self._data_context.save_expectation_suite(expectation_suite) elif filepath is not None: with open(filepath, "w") as outfile: json.dump( expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True, ) else: raise ValueError( "Unable to save config: filepath or data_context must be available." ) def validate( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, ): """Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError """ try: validation_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) assert not (run_id and run_name) and not ( run_id and run_time ), "Please provide either a run_id or run_name and/or run_time." if isinstance(run_id, str) and not run_name: warnings.warn( "String run_ids will be deprecated in the future. Please provide a run_id of type " "RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name " "and run_time (both optional). Instead of providing a run_id, you may also provide" "run_name and run_time separately.", DeprecationWarning, ) try: run_time = parse(run_id) except (ValueError, TypeError): pass run_id = RunIdentifier(run_name=run_id, run_time=run_time) elif isinstance(run_id, dict): run_id = RunIdentifier(**run_id) elif not isinstance(run_id, RunIdentifier): run_id = RunIdentifier(run_name=run_name, run_time=run_time) self._active_validation = True if result_format is None: result_format = {"result_format": "BASIC"} # If a different validation data context was provided, override validate__data_context = self._data_context if data_context is None and self._data_context is not None: data_context = self._data_context elif data_context is not None: # temporarily set self._data_context so it is used inside the expectation decorator self._data_context = data_context if expectation_suite is None: expectation_suite = self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False, ) elif isinstance(expectation_suite, str): try: with open(expectation_suite) as infile: expectation_suite = expectationSuiteSchema.loads(infile.read()) except ValidationError: raise except OSError: raise GreatExpectationsError( "Unable to load expectation suite: IO error while reading %s" % expectation_suite ) elif not isinstance(expectation_suite, ExpectationSuite): logger.error( "Unable to validate using the provided value for expectation suite; does it need to be " "loaded from a dictionary?" ) if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info( self ), success=False, ) return ExpectationValidationResult(success=False) # Evaluation parameter priority is # 1. from provided parameters # 2. from expectation configuration # 3. from data context # So, we load them in reverse order if data_context is not None: runtime_evaluation_parameters = ( data_context.evaluation_parameter_store.get_bind_params(run_id) ) else: runtime_evaluation_parameters = {} if expectation_suite.evaluation_parameters: runtime_evaluation_parameters.update( expectation_suite.evaluation_parameters ) if evaluation_parameters is not None: runtime_evaluation_parameters.update(evaluation_parameters) # Convert evaluation parameters to be json-serializable runtime_evaluation_parameters = recursively_convert_to_json_serializable( runtime_evaluation_parameters ) # Warn if our version is different from the version in the configuration # TODO: Deprecate "great_expectations.__version__" suite_ge_version = expectation_suite.meta.get( "great_expectations_version" ) or expectation_suite.meta.get("great_expectations.__version__") # Group expectations by column columns = {} for expectation in expectation_suite.expectations: expectation.process_evaluation_parameters( evaluation_parameters=runtime_evaluation_parameters, interactive_evaluation=self.interactive_evaluation, data_context=self._data_context, ) if "column" in expectation.kwargs and isinstance( expectation.kwargs["column"], Hashable ): column = expectation.kwargs["column"] else: column = "_nocolumn" if column not in columns: columns[column] = [] columns[column].append(expectation) expectations_to_evaluate = [] for col in columns: expectations_to_evaluate.extend(columns[col]) results = self.graph_validate( expectations_to_evaluate, runtime_configuration={ "catch_exceptions": catch_exceptions, "result_format": result_format, }, ) statistics = _calc_validation_statistics(results) if only_return_failures: abbrev_results = [] for exp in results: if not exp.success: abbrev_results.append(exp) results = abbrev_results expectation_suite_name = expectation_suite.expectation_suite_name result = ExpectationSuiteValidationResult( results=results, success=statistics.success, statistics={ "evaluated_expectations": statistics.evaluated_expectations, "successful_expectations": statistics.successful_expectations, "unsuccessful_expectations": statistics.unsuccessful_expectations, "success_percent": statistics.success_percent, }, evaluation_parameters=runtime_evaluation_parameters, meta={ "great_expectations_version": ge_version, "expectation_suite_name": expectation_suite_name, "run_id": run_id, "batch_spec": self.active_batch_spec, "batch_markers": self.active_batch_markers, "active_batch_definition": self.active_batch_definition, "validation_time": validation_time, }, ) self._data_context = validate__data_context except Exception as e: if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False, ) raise finally: self._active_validation = False if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True, ) return result def get_evaluation_parameter(self, parameter_name, default_value=None): """ Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter. """ if parameter_name in self._expectation_suite.evaluation_parameters: return self._expectation_suite.evaluation_parameters[parameter_name] else: return default_value def set_evaluation_parameter(self, parameter_name, parameter_value): """ Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used """ self._expectation_suite.evaluation_parameters.update( {parameter_name: parameter_value} ) def add_citation( self, comment, batch_spec=None, batch_markers=None, batch_definition=None, citation_date=None, ): """Adds a citation to an existing Expectation Suite within the validator""" if batch_spec is None: batch_spec = self.batch_spec if batch_markers is None: batch_markers = self.active_batch_markers if batch_definition is None: batch_definition = self.active_batch_definition self._expectation_suite.add_citation( comment, batch_spec=batch_spec, batch_markers=batch_markers, batch_definition=batch_definition, citation_date=citation_date, ) @property def expectation_suite_name(self): """Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.""" return self._expectation_suite.expectation_suite_name @expectation_suite_name.setter def expectation_suite_name(self, expectation_suite_name): """Sets the expectation_suite name of this data_asset as stored in the expectations configuration.""" self._expectation_suite.expectation_suite_name = expectation_suite_name def test_expectation_function(self, function, *args, **kwargs): """Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information. """ argspec = inspect.getfullargspec(function)[0][1:] new_function = self.expectation(argspec)(function) return new_function(self, *args, **kwargs) def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]: if domain_kwargs is None: domain_kwargs = { "batch_id": self.execution_engine.active_batch_data_id, } columns: List[str] = self.get_metric( metric=MetricConfiguration( metric_name="table.columns", metric_domain_kwargs=domain_kwargs, ) ) return columns def head( self, n_rows: Optional[int] = 5, domain_kwargs: Optional[Dict[str, Any]] = None, fetch_all: Optional[bool] = False, ) -> pd.DataFrame: if domain_kwargs is None: domain_kwargs = { "batch_id": self.execution_engine.active_batch_data_id, } data: Any = self.get_metric( metric=MetricConfiguration( metric_name="table.head", metric_domain_kwargs=domain_kwargs, metric_value_kwargs={ "n_rows": n_rows, "fetch_all": fetch_all, }, ) ) df: pd.DataFrame if isinstance( self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine) ): df = pd.DataFrame(data=data) elif isinstance(self.execution_engine, SparkDFExecutionEngine): rows: List[Dict[str, Any]] = [datum.asDict() for datum in data] df = pd.DataFrame(data=rows) else: raise GreatExpectationsError( "Unsupported or unknown ExecutionEngine type encountered in Validator class." ) return df.reset_index(drop=True, inplace=False) ValidationStatistics = namedtuple( "ValidationStatistics", [ "evaluated_expectations", "successful_expectations", "unsuccessful_expectations", "success_percent", "success", ], ) def _calc_validation_statistics(validation_results): """ Calculate summary statistics for the validation results and return ``ExpectationStatistics``. """ # calc stats successful_expectations = sum(exp.success for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: # success_percent = float("nan") success_percent = None return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, ) class BridgeValidator: """This is currently helping bridge APIs""" def __init__(self, batch, expectation_suite, expectation_engine=None, **kwargs): """Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.) Args: batch (Batch): A Batch in Pandas, Spark, or SQL format expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data Context expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is determined by the type of data within the given batch """ self.batch = batch self.expectation_suite = expectation_suite if isinstance(expectation_engine, dict): expectation_engine = ClassConfig(**expectation_engine) if isinstance(expectation_engine, ClassConfig): module_name = expectation_engine.module_name or "great_expectations.dataset" verify_dynamic_loading_support(module_name=module_name) expectation_engine = load_class( class_name=expectation_engine.class_name, module_name=module_name ) self.expectation_engine = expectation_engine if self.expectation_engine is None: # Guess the engine try: import pandas as pd if isinstance(batch.data, pd.DataFrame): self.expectation_engine = PandasDataset except ImportError: pass if self.expectation_engine is None: if isinstance(batch.data, SqlAlchemyBatchReference): self.expectation_engine = SqlAlchemyDataset if self.expectation_engine is None: try: import pyspark if isinstance(batch.data, pyspark.sql.DataFrame): self.expectation_engine = SparkDFDataset except ImportError: pass if self.expectation_engine is None: raise ValueError( "Unable to identify expectation_engine. It must be a subclass of DataAsset." ) self.init_kwargs = kwargs def get_dataset(self): """ Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data) """ if issubclass(self.expectation_engine, PandasDataset): import pandas as pd if not isinstance(self.batch["data"], pd.DataFrame): raise ValueError( "PandasDataset expectation_engine requires a Pandas Dataframe for its batch" ) return self.expectation_engine( self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SqlAlchemyDataset): if not isinstance(self.batch.data, SqlAlchemyBatchReference): raise ValueError( "SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch" ) init_kwargs = self.batch.data.get_init_kwargs() init_kwargs.update(self.init_kwargs) return self.expectation_engine( batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, expectation_suite=self.expectation_suite, **init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SparkDFDataset): import pyspark if not isinstance(self.batch.data, pyspark.sql.DataFrame): raise ValueError( "SparkDFDataset expectation_engine requires a spark DataFrame for its batch" ) return self.expectation_engine( spark_df=self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), )
""" BSD 3-Clause License Copyright (c) 2021, Netskope OSS All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """Chronicle plugin.""" import time import datetime import requests import json import re from typing import List from jsonpath import jsonpath from netskope.common.utils import add_user_agent from netskope.common.utils import AlertsHelper from netskope.integrations.cls.plugin_base import ( PluginBase, ValidationResult, PushResult, ) from .utils.chronicle_client import ( ChronicleClient, ) from .utils.chronicle_helper import ( get_chronicle_mappings, ) from .utils.chronicle_udm_generator import ( # NOQA: E501 UDMGenerator, ) from .utils.chronicle_exceptions import ( MappingValidationError, EmptyExtensionError, FieldNotFoundError, ) from .utils.chronicle_validator import ( ChronicleValidator, ) class ChroniclePlugin(PluginBase): """The Chronicle plugin implementation class.""" def validate(self, configuration: dict) -> ValidationResult: """Validate the configuration parameters dict.""" chronicle_validator = ChronicleValidator(self.logger) if ( "base_url" not in configuration or type(configuration["base_url"]) != str or not configuration["base_url"].strip() ): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid URL found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid URL provided." ) # validating api key if ( "api_key" not in configuration or not configuration["api_key"].strip() or type(configuration["api_key"]) != str ): self.logger.error( "Plugin Chronicle: Validation error occurred. Error: \ Invalid API key found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid API key provided." ) try: self._validate_auth(configuration) except Exception: self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Connection to Chronicle platform is not established." ) return ValidationResult( success=False, message="Error occurred while establishing connection with Chronicle server. " "Make sure you have provided valid base url and API Token", ) # validating mapping file mappings = self.mappings.get("jsonData", None) try: mappings = json.loads(mappings) except json.decoder.JSONDecodeError as err: self.logger.error( f"Chronicle Plugin: error occurred decoding of json file: {err}" ) return ValidationResult( success=False, message=f"Invalid Chronicle attribute mapping provided. {err}", ) if type( mappings ) != dict or not chronicle_validator.validate_chronicle_map(mappings): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid Chronicle attribute mapping found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid Chronicle attribute mapping provided.", ) # validating valid extensions if ( "valid_extensions" not in configuration or type(configuration["valid_extensions"]) != str or not configuration["valid_extensions"].strip() or not chronicle_validator.validate_valid_extensions( configuration["valid_extensions"] ) ): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid extensions found in the configuration parameters." " Check heading names of Valid Extensions." ) return ValidationResult( success=False, message="Invalid extensions provided." ) return ValidationResult(success=True, message="Validation successful.") def _validate_auth(self, configuration: dict) -> ValidationResult: """Validate API key by making REST API call.""" try: response = requests.get( f"{configuration["base_url"].strip().strip("/")}/v1/logtypes", params={"key": configuration["api_key"].strip()}, headers=add_user_agent() ) response.raise_for_status() if response.status_code == 200 or response.status_code == 201: return ValidationResult( success=True, message="Validation successful." ) except Exception as ex: self.logger.error( "Chronicle: Could not validate authentication credentials." ) self.logger.error( re.sub(r"key=(.*?) ", "key=******** ", str(repr(ex))) ) return ValidationResult( success=False, message="Error occurred while validating account credentials.", ) def push(self, transformed_data, data_type, subtype) -> PushResult: """Push the transformed_data to the 3rd party platform. Args: transformed_data (list): The transformed data to be ingested. data_type (str): The type of data to be ingested (alert/event) subtype (str): The subtype of data to be ingested \ (DLP, anomaly etc. in case of alerts) Returns: PushResult: Result indicating ingesting outcome and message """ logger = self.logger logger.info( "Chronicle Plugin: Starting Pushing data for Chronicle plugin." ) self.chronicle_client = ChronicleClient( self.configuration, self.logger ) try: self.chronicle_client._api_request(transformed_data) except Exception as e: self.logger.error(f"Error while pushing to Chronicle Plugin: {e}") raise logger.info( "Chronicle Plugin: Finished Pushing data for Chronicle plugin." ) return def get_mapping_value_from_json_path(self, data, json_path): """To Fetch the value from given JSON object using given JSON path. Args: data: JSON object from which the value is to be fetched json_path: JSON path indicating the path of the value in given JSON Returns: fetched value. """ return jsonpath(data, json_path) def get_mapping_value_from_field(self, data, field): """To Fetch the value from given field. Args: data: JSON object from which the value is to be fetched field: Field whose value is to be fetched Returns: fetched value. """ return ( data[field] if data[field] or isinstance(data[field], int) else "null" ) def get_subtype_mapping(self, mappings, subtype): """To Retrieve subtype mappings case insensitively. Args: mappings: Mapping JSON from which subtypes are to be retrieved subtype: Subtype (e.g. DLP for alerts) for which the mapping is to be fetched Returns: Fetched mapping JSON object """ mappings = {k.lower(): v for k, v in mappings.items()} if subtype.lower() in mappings: return mappings[subtype.lower()] else: return mappings[subtype.upper()] def get_headers(self, header_mappings, data, data_type, subtype): """To Create a dictionary of UDM headers from given header mappings. Args: subtype: Subtype for which the headers are being transformed data_type: Data type for which the headers are being transformed header_mappings: UDM header mapping with Netskope fields data: The alert/event for which the UDM header is being generated Returns: header dict """ headers = {} helper = AlertsHelper() tenant = helper.get_tenant_cls(self.source) mapping_variables = {"$tenant_name": tenant.name} try: headers[ "metadata.event_timestamp" ] = datetime.datetime.utcfromtimestamp( data.get("timestamp", time.time()) ).strftime( "%Y-%m-%dT%H:%M:%SZ" ) except Exception: raise missing_fields = [] # Iterate over mapped headers for udm_header, header_mapping in header_mappings.items(): try: headers[udm_header] = self.get_field_value_from_data( header_mapping, data, data_type, subtype, False ) # Handle variable mappings if ( isinstance(headers[udm_header], str) and headers[udm_header].lower() in mapping_variables ): headers[udm_header] = mapping_variables[ headers[udm_header].lower() ] except FieldNotFoundError as err: missing_fields.append(str(err)) return headers def get_extensions(self, extension_mappings, data, data_type, subtype): """Fetch extensions from given mappings. Args: subtype: Subtype for which the headers are being transformed data_type: Data type for which the headers are being transformed extension_mappings: Mapping of extensions data: The data to be transformed Returns: extensions (dict) """ extension = {} missing_fields = [] # Iterate over mapped extensions for udm_extension, extension_mapping in extension_mappings.items(): try: extension[udm_extension] = self.get_field_value_from_data( extension_mapping, data, data_type, subtype, is_json_path="is_json_path" in extension_mapping, ) except FieldNotFoundError as err: missing_fields.append(str(err)) return extension def get_field_value_from_data( self, extension_mapping, data, data_type, subtype, is_json_path=False ): """To Fetch the value of extension based on "mapping" and "default". Args: extension_mapping: Dict containing "mapping" and "default" fields data: Data instance retrieved from Netskope subtype: Subtype for which the extension are being transformed data_type: Data type for which the headers are being transformed is_json_path: Whether the mapped value is JSON path or direct field name Returns: Fetched values of extension --------------------------------------------------------------------- Mapping | Response | Retrieved Value ----------------------| | default | Mapping | | --------------------------------------------------------------------- P | P | P | Mapped P | P | NP | Default P | NP | P | Default NP | P | P | Mapped P | NP | NP | Default NP | P | NP | - NP | NP | P | - (Not possible) NP | NP | NP | - (Not possible) ----------------------------------------------------------------------- """ if ( "mapping_field" in extension_mapping and extension_mapping["mapping_field"] ): if is_json_path: # If mapping field specified by JSON path is present in data, # map that field, else skip by raising # exception: value = self.get_mapping_value_from_json_path( data, extension_mapping["mapping_field"] ) if value: return ",".join([str(val) for val in value]) else: raise FieldNotFoundError( extension_mapping["mapping_field"] ) else: # TODO: Add merging feild logic # If mapping is present in data, map that field, # else skip by raising exception field_list = extension_mapping["mapping_field"].split("-") if len(field_list) == 1: if ( extension_mapping["mapping_field"] in data ): # case #1 and case #4 return self.get_mapping_value_from_field( data, extension_mapping["mapping_field"] ) elif "default_value" in extension_mapping: # If mapped value is not found in response and default # is mapped, map the default value (case #2) return extension_mapping["default_value"] else: # case #6 raise FieldNotFoundError( extension_mapping["mapping_field"] ) out_list = [] for field in field_list: field = field.strip(" ") field = field.strip("[]") if field == "NULL": out_list.append("NULL") elif field in data: # case #1 and case #4 out_list.append( self.get_mapping_value_from_field(data, field) ) elif "default_value" in extension_mapping: # If mapped value is not found in response and default # is mapped, map the default value (case #2) return extension_mapping["default_value"] else: # case #6 raise FieldNotFoundError( extension_mapping["mapping_field"] ) return " - ".join(out_list) else: # If mapping is not present, 'default_value' must be there # because of validation (case #3 and case #5) return extension_mapping["default_value"] def transform(self, raw_data, data_type, subtype) -> List: """Transform the raw data into target platform supported data formats. Args: raw_data (list): The raw data to be tranformed. data_type (str): The type of data to be ingested (alert/event) subtype (str): The subtype of data to be ingested (DLP, anomaly etc. in case of alerts) Raises: NotImplementedError: If the method is not implemented. Returns: List: list of transformed data. """ try: udm_version, chronicle_mappings = get_chronicle_mappings( self.mappings, data_type ) except KeyError as err: self.logger.error( "Error in chronicle mapping file. Error: {}.".format(str(err)) ) raise except MappingValidationError as err: self.logger.error(str(err)) raise except Exception as err: self.logger.error( f"An error occurred while mapping data using given json " f"mappings. Error: {str(err)}." ) raise transformed_data = [] udm_generator = UDMGenerator( self.configuration["valid_extensions"], udm_version, self.logger, ) for data in raw_data: # First retrieve the mapping of subtype being transformed try: subtype_mapping = self.get_subtype_mapping( chronicle_mappings[data_type], subtype ) except Exception: self.logger.error( f"Error occurred while retrieving mappings for subtype" f" '{subtype}'. Transformation of current record will be" f" skipped." ) continue # Generating the UDM header try: header = self.get_headers( subtype_mapping["header"], data, data_type, subtype ) except Exception as err: self.logger.error( f"[{data_type}][{subtype}]: Error occurred while creating " f"UDM header: {str(err)}. Transformation of " f"current record will be skipped." ) continue try: extension = self.get_extensions( subtype_mapping["extension"], data, data_type, subtype ) except Exception as err: self.logger.error( f"[{data_type}][{subtype}]: Error occurred while creating" f" UDM extension: {str(err)}." f" Transformation of the current record will be skipped." ) continue try: transformed_data.append( udm_generator.get_udm_event( header, extension, data_type, subtype ) ) # pass except EmptyExtensionError: self.logger.error( "[{}][{}]: Got empty extension during transformation." "Transformation of current record will be skipped.".format( data_type, subtype ) ) except Exception as err: self.logger.error( "[{}][{}]: An error occurred during transformation." " Error: {}.".format(data_type, subtype, str(err)) ) return transformed_data
""" BSD 3-Clause License Copyright (c) 2021, Netskope OSS All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """Chronicle plugin.""" import time import datetime import requests import json import re from typing import List from jsonpath import jsonpath from netskope.common.utils import add_user_agent from netskope.common.utils import AlertsHelper from netskope.integrations.cls.plugin_base import ( PluginBase, ValidationResult, PushResult, ) from .utils.chronicle_client import ( ChronicleClient, ) from .utils.chronicle_helper import ( get_chronicle_mappings, ) from .utils.chronicle_udm_generator import ( # NOQA: E501 UDMGenerator, ) from .utils.chronicle_exceptions import ( MappingValidationError, EmptyExtensionError, FieldNotFoundError, ) from .utils.chronicle_validator import ( ChronicleValidator, ) class ChroniclePlugin(PluginBase): """The Chronicle plugin implementation class.""" def validate(self, configuration: dict) -> ValidationResult: """Validate the configuration parameters dict.""" chronicle_validator = ChronicleValidator(self.logger) if ( "base_url" not in configuration or type(configuration["base_url"]) != str or not configuration["base_url"].strip() ): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid URL found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid URL provided." ) # validating api key if ( "api_key" not in configuration or not configuration["api_key"].strip() or type(configuration["api_key"]) != str ): self.logger.error( "Plugin Chronicle: Validation error occurred. Error: \ Invalid API key found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid API key provided." ) try: self._validate_auth(configuration) except Exception: self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Connection to Chronicle platform is not established." ) return ValidationResult( success=False, message="Error occurred while establishing connection with Chronicle server. " "Make sure you have provided valid base url and API Token", ) # validating mapping file mappings = self.mappings.get("jsonData", None) try: mappings = json.loads(mappings) except json.decoder.JSONDecodeError as err: self.logger.error( f"Chronicle Plugin: error occurred decoding of json file: {err}" ) return ValidationResult( success=False, message=f"Invalid Chronicle attribute mapping provided. {err}", ) if type( mappings ) != dict or not chronicle_validator.validate_chronicle_map(mappings): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid Chronicle attribute mapping found in the configuration parameters." ) return ValidationResult( success=False, message="Invalid Chronicle attribute mapping provided.", ) # validating valid extensions if ( "valid_extensions" not in configuration or type(configuration["valid_extensions"]) != str or not configuration["valid_extensions"].strip() or not chronicle_validator.validate_valid_extensions( configuration["valid_extensions"] ) ): self.logger.error( "Chronicle Plugin: Validation error occurred. Error: " "Invalid extensions found in the configuration parameters." " Check heading names of Valid Extensions." ) return ValidationResult( success=False, message="Invalid extensions provided." ) return ValidationResult(success=True, message="Validation successful.") def _validate_auth(self, configuration: dict) -> ValidationResult: """Validate API key by making REST API call.""" try: response = requests.get( f"{configuration['base_url'].strip().strip('/')}/v1/logtypes", params={"key": configuration["api_key"].strip()}, headers=add_user_agent() ) response.raise_for_status() if response.status_code == 200 or response.status_code == 201: return ValidationResult( success=True, message="Validation successful." ) except Exception as ex: self.logger.error( "Chronicle: Could not validate authentication credentials." ) self.logger.error( re.sub(r"key=(.*?) ", "key=******** ", str(repr(ex))) ) return ValidationResult( success=False, message="Error occurred while validating account credentials.", ) def push(self, transformed_data, data_type, subtype) -> PushResult: """Push the transformed_data to the 3rd party platform. Args: transformed_data (list): The transformed data to be ingested. data_type (str): The type of data to be ingested (alert/event) subtype (str): The subtype of data to be ingested \ (DLP, anomaly etc. in case of alerts) Returns: PushResult: Result indicating ingesting outcome and message """ logger = self.logger logger.info( "Chronicle Plugin: Starting Pushing data for Chronicle plugin." ) self.chronicle_client = ChronicleClient( self.configuration, self.logger ) try: self.chronicle_client._api_request(transformed_data) except Exception as e: self.logger.error(f"Error while pushing to Chronicle Plugin: {e}") raise logger.info( "Chronicle Plugin: Finished Pushing data for Chronicle plugin." ) return def get_mapping_value_from_json_path(self, data, json_path): """To Fetch the value from given JSON object using given JSON path. Args: data: JSON object from which the value is to be fetched json_path: JSON path indicating the path of the value in given JSON Returns: fetched value. """ return jsonpath(data, json_path) def get_mapping_value_from_field(self, data, field): """To Fetch the value from given field. Args: data: JSON object from which the value is to be fetched field: Field whose value is to be fetched Returns: fetched value. """ return ( data[field] if data[field] or isinstance(data[field], int) else "null" ) def get_subtype_mapping(self, mappings, subtype): """To Retrieve subtype mappings case insensitively. Args: mappings: Mapping JSON from which subtypes are to be retrieved subtype: Subtype (e.g. DLP for alerts) for which the mapping is to be fetched Returns: Fetched mapping JSON object """ mappings = {k.lower(): v for k, v in mappings.items()} if subtype.lower() in mappings: return mappings[subtype.lower()] else: return mappings[subtype.upper()] def get_headers(self, header_mappings, data, data_type, subtype): """To Create a dictionary of UDM headers from given header mappings. Args: subtype: Subtype for which the headers are being transformed data_type: Data type for which the headers are being transformed header_mappings: UDM header mapping with Netskope fields data: The alert/event for which the UDM header is being generated Returns: header dict """ headers = {} helper = AlertsHelper() tenant = helper.get_tenant_cls(self.source) mapping_variables = {"$tenant_name": tenant.name} try: headers[ "metadata.event_timestamp" ] = datetime.datetime.utcfromtimestamp( data.get("timestamp", time.time()) ).strftime( "%Y-%m-%dT%H:%M:%SZ" ) except Exception: raise missing_fields = [] # Iterate over mapped headers for udm_header, header_mapping in header_mappings.items(): try: headers[udm_header] = self.get_field_value_from_data( header_mapping, data, data_type, subtype, False ) # Handle variable mappings if ( isinstance(headers[udm_header], str) and headers[udm_header].lower() in mapping_variables ): headers[udm_header] = mapping_variables[ headers[udm_header].lower() ] except FieldNotFoundError as err: missing_fields.append(str(err)) return headers def get_extensions(self, extension_mappings, data, data_type, subtype): """Fetch extensions from given mappings. Args: subtype: Subtype for which the headers are being transformed data_type: Data type for which the headers are being transformed extension_mappings: Mapping of extensions data: The data to be transformed Returns: extensions (dict) """ extension = {} missing_fields = [] # Iterate over mapped extensions for udm_extension, extension_mapping in extension_mappings.items(): try: extension[udm_extension] = self.get_field_value_from_data( extension_mapping, data, data_type, subtype, is_json_path="is_json_path" in extension_mapping, ) except FieldNotFoundError as err: missing_fields.append(str(err)) return extension def get_field_value_from_data( self, extension_mapping, data, data_type, subtype, is_json_path=False ): """To Fetch the value of extension based on "mapping" and "default". Args: extension_mapping: Dict containing "mapping" and "default" fields data: Data instance retrieved from Netskope subtype: Subtype for which the extension are being transformed data_type: Data type for which the headers are being transformed is_json_path: Whether the mapped value is JSON path or direct field name Returns: Fetched values of extension --------------------------------------------------------------------- Mapping | Response | Retrieved Value ----------------------| | default | Mapping | | --------------------------------------------------------------------- P | P | P | Mapped P | P | NP | Default P | NP | P | Default NP | P | P | Mapped P | NP | NP | Default NP | P | NP | - NP | NP | P | - (Not possible) NP | NP | NP | - (Not possible) ----------------------------------------------------------------------- """ if ( "mapping_field" in extension_mapping and extension_mapping["mapping_field"] ): if is_json_path: # If mapping field specified by JSON path is present in data, # map that field, else skip by raising # exception: value = self.get_mapping_value_from_json_path( data, extension_mapping["mapping_field"] ) if value: return ",".join([str(val) for val in value]) else: raise FieldNotFoundError( extension_mapping["mapping_field"] ) else: # TODO: Add merging feild logic # If mapping is present in data, map that field, # else skip by raising exception field_list = extension_mapping["mapping_field"].split("-") if len(field_list) == 1: if ( extension_mapping["mapping_field"] in data ): # case #1 and case #4 return self.get_mapping_value_from_field( data, extension_mapping["mapping_field"] ) elif "default_value" in extension_mapping: # If mapped value is not found in response and default # is mapped, map the default value (case #2) return extension_mapping["default_value"] else: # case #6 raise FieldNotFoundError( extension_mapping["mapping_field"] ) out_list = [] for field in field_list: field = field.strip(" ") field = field.strip("[]") if field == "NULL": out_list.append("NULL") elif field in data: # case #1 and case #4 out_list.append( self.get_mapping_value_from_field(data, field) ) elif "default_value" in extension_mapping: # If mapped value is not found in response and default # is mapped, map the default value (case #2) return extension_mapping["default_value"] else: # case #6 raise FieldNotFoundError( extension_mapping["mapping_field"] ) return " - ".join(out_list) else: # If mapping is not present, 'default_value' must be there # because of validation (case #3 and case #5) return extension_mapping["default_value"] def transform(self, raw_data, data_type, subtype) -> List: """Transform the raw data into target platform supported data formats. Args: raw_data (list): The raw data to be tranformed. data_type (str): The type of data to be ingested (alert/event) subtype (str): The subtype of data to be ingested (DLP, anomaly etc. in case of alerts) Raises: NotImplementedError: If the method is not implemented. Returns: List: list of transformed data. """ try: udm_version, chronicle_mappings = get_chronicle_mappings( self.mappings, data_type ) except KeyError as err: self.logger.error( "Error in chronicle mapping file. Error: {}.".format(str(err)) ) raise except MappingValidationError as err: self.logger.error(str(err)) raise except Exception as err: self.logger.error( f"An error occurred while mapping data using given json " f"mappings. Error: {str(err)}." ) raise transformed_data = [] udm_generator = UDMGenerator( self.configuration["valid_extensions"], udm_version, self.logger, ) for data in raw_data: # First retrieve the mapping of subtype being transformed try: subtype_mapping = self.get_subtype_mapping( chronicle_mappings[data_type], subtype ) except Exception: self.logger.error( f"Error occurred while retrieving mappings for subtype" f" '{subtype}'. Transformation of current record will be" f" skipped." ) continue # Generating the UDM header try: header = self.get_headers( subtype_mapping["header"], data, data_type, subtype ) except Exception as err: self.logger.error( f"[{data_type}][{subtype}]: Error occurred while creating " f"UDM header: {str(err)}. Transformation of " f"current record will be skipped." ) continue try: extension = self.get_extensions( subtype_mapping["extension"], data, data_type, subtype ) except Exception as err: self.logger.error( f"[{data_type}][{subtype}]: Error occurred while creating" f" UDM extension: {str(err)}." f" Transformation of the current record will be skipped." ) continue try: transformed_data.append( udm_generator.get_udm_event( header, extension, data_type, subtype ) ) # pass except EmptyExtensionError: self.logger.error( "[{}][{}]: Got empty extension during transformation." "Transformation of current record will be skipped.".format( data_type, subtype ) ) except Exception as err: self.logger.error( "[{}][{}]: An error occurred during transformation." " Error: {}.".format(data_type, subtype, str(err)) ) return transformed_data
""" Digirule assembler support. :author: Athanasios Anastasiou :date: June 2020 """ from .digirule import Digirule from .exceptions import DgtoolsErrorSymbolAlreadyDefined, DgtoolsErrorSymbolUndefined, DgtoolsErrorASMSyntaxError import pyparsing import functools class DgAssembler: def __init__(self, digirule_cls): if not issubclass(digirule_cls, Digirule): raise TypeError(f"Expected Digirule, received {type(digirule_cls)}") # Action functions to convert valid string literals to numbers char2num = lambda toks:ord(toks[0][1:-1]) & 0xFF uchar2num = lambda toks:int(toks[0]) & 0xFF buchar2num = lambda toks:int(toks[0],2) & 0xFF xuchar2num = lambda toks:int(toks[0],16) & 0xFF # An identifier for labels and symbols. It must be at least 1 character, start with a letter or number and # can include the underscore. identifier = pyparsing.Regex(r"[a-zA-Z_][a-zA-Z0-9_]*") # A literal can be: # * An integer (4, -14,52), # * A binary number (0b100, 0b1110, 0b110100) # * A hexadecimal number (0x4, 0x0E, 0x34) # * A single character ("A","J","8", anything from space to tilde on the ascii table). literal_char = pyparsing.Regex(r"\"[ -~]\"|'[ -~]'").setParseAction(char2num) # TODO: LOW, Rename uchar, as it is not a uchar anymore. This is a remnant. literal_uchar = pyparsing.Regex(r"[-]?[0-9][0-9]?[0-9]?").setParseAction(uchar2num) literal_buchar = pyparsing.Regex(r"0b[0|1]+").setParseAction(buchar2num) literal_xuchar = pyparsing.Regex(r"0x[0-9A-F][0-9A-F]?").setParseAction(xuchar2num) literal = literal_char ^ literal_uchar ^ literal_buchar ^ literal_xuchar # Opcodes can accept literals or identifiers (.EQU or labels) as opcodes. literal_or_identifier = pyparsing.Group(literal("literal") ^ identifier("symbol"))("value_type") existing_defs = {"identifier":identifier, "literal_char":literal_char, "literal_uchar":literal_uchar, "literal_buchar":literal_buchar, "literal_xuchar":literal_xuchar, "literal":literal, "literal_or_identifier":literal_or_identifier} # Existing defs are passed down in case the digirule ASM code needs to specialise instructions asm_statement = digirule_cls.get_asm_statement_def(existing_defs) # Assembler directives # label: Defines a label dir_label = pyparsing.Group(identifier("idf") + pyparsing.Suppress(":"))("def_label") # .DB A static coma delimited list of byte defs # H = lambda x:[((x >> (k*8)) & 0xFF) for k in range(math.ceil(math.log2(abs(x))/8)-1,-1,-1)] dir_db_str = pyparsing.quotedString().setParseAction(lambda s,loc,tok:[(ord(u) & 0xFF) for u in tok[0][1:-1]]) dir_db_values = pyparsing.delimitedList(pyparsing.Group(literal("literal") ^ \ identifier("symbol") ^ \ dir_db_str("string"))) dir_db = pyparsing.Group(pyparsing.Regex(".DB")("cmd") + dir_db_values("values"))("def_db") # .EQU A "symbol" (that in the future would be able to evaluate to a proper macro. dir_equ = pyparsing.Group(pyparsing.Regex(".EQU")("cmd") + \ identifier("idf") + \ pyparsing.Suppress("=") + \ literal("value"))("def_equ") # A directive statement dir_statement = pyparsing.Group(dir_label ^ dir_db ^ dir_equ) # Comments # A line of ASM code is either a comment or code with an optional inline comment prog_or_dir_statement = pyparsing.Group(asm_statement ^ dir_statement)("prog_dir_statement") dir_comment = pyparsing.Group(pyparsing.Suppress("#") + pyparsing.restOfLine("text"))("def_comment") dir_code_comment = pyparsing.Group(dir_comment ^ (prog_or_dir_statement + pyparsing.Optional(dir_comment))) program = pyparsing.OneOrMore(dir_code_comment) # In the end, ignore the comments. program.ignore(dir_comment) self._parser = program def text_to_ast(self, asm_code_text): try: parsed_code = self._parser.parseString(asm_code_text, parseAll=True) except pyparsing.ParseException as e: raise DgtoolsErrorASMSyntaxError(f"line {e.lineno}, col {e.col}: " f" {e.line}: " f"Syntax Error: {e.args[2]}") return parsed_code def asm_ast_to_obj(self, parsed_code): """ Transforms the parsed AST to a binary for the Digirule target architecture :param asm: Parsed ASM text, EXCLUDING COMMENT tags. :type asm: list<pyparsing.ParseElement> :returns: A dictionary of compiled code, symbols and variable offsets or the parsexception at failure :rtype: dict<"program":list<uint8>, "labels":dict<str, int>>, "symbols":dict<str,int>>, pyparsing.ParseException """ mem = [] labels = {} symbols = {} # Read through the code and load it to memory # While doing that, keep track of where labels and symbols appear. These will be substituted # in the second pass. for a_line in parsed_code: command, arguments = list(a_line["prog_dir_statement"][0].items())[0] if command == "def_label": # Tie the label to where it points to if arguments["idf"] not in labels: labels[arguments["idf"]] = len(mem) else: raise DgtoolsErrorSymbolAlreadyDefined(f"Label {arguments["idf"]} is getting redefined.") elif command == "def_db": # .DB defines raw data that are simply dumped where they appear. If a label is not set to a # data block, it cannot be referenced. #value_data = list(map(lambda x:x[0] if "string" not in x else [u for u in x],arguments["values"])) #import pdb #pdb.set_trace() #mem.extend(value_data) for a_val in arguments["values"]: mem.extend(a_val) elif command == "def_equ": if arguments["idf"] not in symbols: symbols[arguments["idf"]] = arguments["value"] else: raise DgtoolsErrorSymbolAlreadyDefined(f"Symbol {arguments["idf"]} is getting redefined") else: # It's an instruction. The opcode of the instruction has already been recognised, # but we need to grab the operands wherever they are available inst_data = command.split(":") instruction_code = int(inst_data[0]) instruction_num_op = int(inst_data[1]) mem.append(instruction_code) mem.extend(list(map(lambda x:x[0], arguments[1:(1+instruction_num_op)]))) # The first pass produces an intermediate object that still contains symbolic references. # This second pass here substitutes those references and produces the final object. symbol_offsets = {} subst_entries = filter(lambda x:type(x[1]) is str, enumerate(mem)) for an_entry in subst_entries: if an_entry[1] in labels: mem[an_entry[0]] = labels[an_entry[1]] elif an_entry[1] in symbols: # Note where the symbol is used if an_entry[1] not in symbol_offsets: symbol_offsets[an_entry[1]] = [] if an_entry[0] not in symbol_offsets[an_entry[1]]: symbol_offsets[an_entry[1]].append(an_entry[0]) # Make the substitution mem[an_entry[0]] = symbols[an_entry[1]] else: raise DgtoolsErrorSymbolUndefined(f"Symbol {an_entry[1]} not found.") return {"program":mem, "labels":labels}
""" Digirule assembler support. :author: Athanasios Anastasiou :date: June 2020 """ from .digirule import Digirule from .exceptions import DgtoolsErrorSymbolAlreadyDefined, DgtoolsErrorSymbolUndefined, DgtoolsErrorASMSyntaxError import pyparsing import functools class DgAssembler: def __init__(self, digirule_cls): if not issubclass(digirule_cls, Digirule): raise TypeError(f"Expected Digirule, received {type(digirule_cls)}") # Action functions to convert valid string literals to numbers char2num = lambda toks:ord(toks[0][1:-1]) & 0xFF uchar2num = lambda toks:int(toks[0]) & 0xFF buchar2num = lambda toks:int(toks[0],2) & 0xFF xuchar2num = lambda toks:int(toks[0],16) & 0xFF # An identifier for labels and symbols. It must be at least 1 character, start with a letter or number and # can include the underscore. identifier = pyparsing.Regex(r"[a-zA-Z_][a-zA-Z0-9_]*") # A literal can be: # * An integer (4, -14,52), # * A binary number (0b100, 0b1110, 0b110100) # * A hexadecimal number (0x4, 0x0E, 0x34) # * A single character ("A","J","8", anything from space to tilde on the ascii table). literal_char = pyparsing.Regex(r"\"[ -~]\"|'[ -~]'").setParseAction(char2num) # TODO: LOW, Rename uchar, as it is not a uchar anymore. This is a remnant. literal_uchar = pyparsing.Regex(r"[-]?[0-9][0-9]?[0-9]?").setParseAction(uchar2num) literal_buchar = pyparsing.Regex(r"0b[0|1]+").setParseAction(buchar2num) literal_xuchar = pyparsing.Regex(r"0x[0-9A-F][0-9A-F]?").setParseAction(xuchar2num) literal = literal_char ^ literal_uchar ^ literal_buchar ^ literal_xuchar # Opcodes can accept literals or identifiers (.EQU or labels) as opcodes. literal_or_identifier = pyparsing.Group(literal("literal") ^ identifier("symbol"))("value_type") existing_defs = {"identifier":identifier, "literal_char":literal_char, "literal_uchar":literal_uchar, "literal_buchar":literal_buchar, "literal_xuchar":literal_xuchar, "literal":literal, "literal_or_identifier":literal_or_identifier} # Existing defs are passed down in case the digirule ASM code needs to specialise instructions asm_statement = digirule_cls.get_asm_statement_def(existing_defs) # Assembler directives # label: Defines a label dir_label = pyparsing.Group(identifier("idf") + pyparsing.Suppress(":"))("def_label") # .DB A static coma delimited list of byte defs # H = lambda x:[((x >> (k*8)) & 0xFF) for k in range(math.ceil(math.log2(abs(x))/8)-1,-1,-1)] dir_db_str = pyparsing.quotedString().setParseAction(lambda s,loc,tok:[(ord(u) & 0xFF) for u in tok[0][1:-1]]) dir_db_values = pyparsing.delimitedList(pyparsing.Group(literal("literal") ^ \ identifier("symbol") ^ \ dir_db_str("string"))) dir_db = pyparsing.Group(pyparsing.Regex(".DB")("cmd") + dir_db_values("values"))("def_db") # .EQU A "symbol" (that in the future would be able to evaluate to a proper macro. dir_equ = pyparsing.Group(pyparsing.Regex(".EQU")("cmd") + \ identifier("idf") + \ pyparsing.Suppress("=") + \ literal("value"))("def_equ") # A directive statement dir_statement = pyparsing.Group(dir_label ^ dir_db ^ dir_equ) # Comments # A line of ASM code is either a comment or code with an optional inline comment prog_or_dir_statement = pyparsing.Group(asm_statement ^ dir_statement)("prog_dir_statement") dir_comment = pyparsing.Group(pyparsing.Suppress("#") + pyparsing.restOfLine("text"))("def_comment") dir_code_comment = pyparsing.Group(dir_comment ^ (prog_or_dir_statement + pyparsing.Optional(dir_comment))) program = pyparsing.OneOrMore(dir_code_comment) # In the end, ignore the comments. program.ignore(dir_comment) self._parser = program def text_to_ast(self, asm_code_text): try: parsed_code = self._parser.parseString(asm_code_text, parseAll=True) except pyparsing.ParseException as e: raise DgtoolsErrorASMSyntaxError(f"line {e.lineno}, col {e.col}: " f" {e.line}: " f"Syntax Error: {e.args[2]}") return parsed_code def asm_ast_to_obj(self, parsed_code): """ Transforms the parsed AST to a binary for the Digirule target architecture :param asm: Parsed ASM text, EXCLUDING COMMENT tags. :type asm: list<pyparsing.ParseElement> :returns: A dictionary of compiled code, symbols and variable offsets or the parsexception at failure :rtype: dict<"program":list<uint8>, "labels":dict<str, int>>, "symbols":dict<str,int>>, pyparsing.ParseException """ mem = [] labels = {} symbols = {} # Read through the code and load it to memory # While doing that, keep track of where labels and symbols appear. These will be substituted # in the second pass. for a_line in parsed_code: command, arguments = list(a_line["prog_dir_statement"][0].items())[0] if command == "def_label": # Tie the label to where it points to if arguments["idf"] not in labels: labels[arguments["idf"]] = len(mem) else: raise DgtoolsErrorSymbolAlreadyDefined(f"Label {arguments['idf']} is getting redefined.") elif command == "def_db": # .DB defines raw data that are simply dumped where they appear. If a label is not set to a # data block, it cannot be referenced. #value_data = list(map(lambda x:x[0] if "string" not in x else [u for u in x],arguments["values"])) #import pdb #pdb.set_trace() #mem.extend(value_data) for a_val in arguments["values"]: mem.extend(a_val) elif command == "def_equ": if arguments["idf"] not in symbols: symbols[arguments["idf"]] = arguments["value"] else: raise DgtoolsErrorSymbolAlreadyDefined(f"Symbol {arguments['idf']} is getting redefined") else: # It's an instruction. The opcode of the instruction has already been recognised, # but we need to grab the operands wherever they are available inst_data = command.split(":") instruction_code = int(inst_data[0]) instruction_num_op = int(inst_data[1]) mem.append(instruction_code) mem.extend(list(map(lambda x:x[0], arguments[1:(1+instruction_num_op)]))) # The first pass produces an intermediate object that still contains symbolic references. # This second pass here substitutes those references and produces the final object. symbol_offsets = {} subst_entries = filter(lambda x:type(x[1]) is str, enumerate(mem)) for an_entry in subst_entries: if an_entry[1] in labels: mem[an_entry[0]] = labels[an_entry[1]] elif an_entry[1] in symbols: # Note where the symbol is used if an_entry[1] not in symbol_offsets: symbol_offsets[an_entry[1]] = [] if an_entry[0] not in symbol_offsets[an_entry[1]]: symbol_offsets[an_entry[1]].append(an_entry[0]) # Make the substitution mem[an_entry[0]] = symbols[an_entry[1]] else: raise DgtoolsErrorSymbolUndefined(f"Symbol {an_entry[1]} not found.") return {"program":mem, "labels":labels}
import numpy as np import pandas as pd import json import requests from shapely.geometry import LineString from sklearn.cluster import KMeans import time import os from geocoding.config import Config def query_api(query, fpath): """ Queries Overpass API for *query*. Args: query (str): The query to be passed to API fpath (str): File path to write the API response Returns: None """ status = 0 overpass_url = 'http://overpass-api.de/api/interpreter' try: response = requests.get(overpass_url, params={'data': query}).json() with open(fpath, 'w') as f: json.dump(response, f) except ValueError: print('Overpass api error: Trying again with a greater timeout...') time.sleep(3) status = 1 return status def parse_streets(fpath): """ Parses the API response from *fpath* and converts it to a dataframe. Args: fpath (str): File path to read Returns: pandas.DataFrame: Contains all streets as well as their geometries """ # Helper function def convert_to_wkt_geometry(row): lons = [p['lon'] for p in row['geometry']] lats = [p['lat'] for p in row['geometry']] if len(lons) < 2 or len(lats) < 2: return None return LineString(list(zip(lons, lats))) with open(fpath, encoding='utf-8') as f: streets = json.load(f)['elements'] if not streets: return None data = [(street['id'], street['geometry']) for street in streets] cols = ['id', 'geometry'] street_df = pd.DataFrame(data=data, columns=cols) street_df['geometry'] = street_df.apply(convert_to_wkt_geometry, axis=1) street_df = street_df.dropna() return street_df def extract_streets(points, path): """ A wrapper function that administrates the streets download. Args: points (numpy.ndarray): Contains the data points that define the area \ to extract from Overpass API path (str): Path to write Returns: None """ labels = cluster_points(points) clusters_bboxes = get_clusters_bboxes(points, labels) street_dfs = [] for cluster, bbox in clusters_bboxes.items(): print('Getting bbox', cluster + 1, 'out of', len(clusters_bboxes)) cell_street_df = download_cell(bbox, os.path.join(path, "osm_streets.json")) if cell_street_df is not None: print('Number of streets:', len(cell_street_df)) street_dfs.append(cell_street_df) else: print('Number of streets:', 0) # if (cluster + 1) % 5 == 0: # print(f'Suspended for {config.osm_timeout} secs...') # time.sleep(config.osm_timeout) # delete file if os.path.exists(os.path.join(path, "osm_streets.json")): os.remove(os.path.join(path, "osm_streets.json")) street_df = pd.concat(street_dfs, ignore_index=True) street_df.drop_duplicates(subset='id', inplace=True) street_df.to_csv(f'{os.path.join(path, 'osm_streets.csv')}', columns=['id', 'geometry'], index=False) print(f'Extracted {len(street_df.index)} unique streets') def download_cell(cell, fpath): """ Downloads *cell* from Overpass API, writes results in *fpath* and then \ parses them into a pandas.DataFrame. Args: cell (list): Contains the bounding box coords fpath (str): Path to write results and then to read from in order to \ parse them Returns: pandas.DataFrame: Contains all street elements included in *cell* """ west, south, east, north = cell counter = 0 status = 1 while status and (counter < Config.max_overpass_tries): counter += 1 query = ( f'[out:json][timeout:{Config.osm_timeout * counter}];' # f'way["highway"]["highway"!~"^(cycleway|footway)$"]' f'way["highway"]["highway"!~"^(cycleway)$"]' # 'way["highway"~"^(motorway|trunk|primary)$"];' # 'way["highway"]' f'({south},{west},{north},{east});' 'out geom;') status = query_api(query, fpath) if status: print('Overpass api error: Exiting.') exit() return parse_streets(fpath) def cluster_points(X): """ Clusters points given in *X*. Args: X (numpy.ndarray): Contains the points to be clustered Returns: numpy.ndarray: The predicted clusters labels """ n_clusters = int(Config.clusters_pct * X.shape[0]) kmeans = KMeans( n_clusters=n_clusters, random_state=Config.seed_no, n_init=20, max_iter=500, ).fit(X) labels = kmeans.predict(X) return labels def get_clusters_bboxes(X, labels): """ Extracts a bounding box for each one of the clusters. Args: X (numpy.ndarray): Contains the clustered points labels (numpy.ndarray): Contains the cluster label for each point in \ *X* Returns: dict: Contains the cluster labels as keys and the corresponding \ bounding box as values """ bboxes = {} for i in range(len(set(labels))): cluster_points = np.vstack([p for j, p in enumerate(X) if labels[j] == i]) xmin, ymin = cluster_points.min(axis=0) - Config.osm_buffer xmax, ymax = cluster_points.max(axis=0) + Config.osm_buffer bboxes[i] = [xmin, ymin, xmax, ymax] # print({k: v for k, v in sorted(bboxes.items(), key=lambda item: item[1][0])}) return bboxes
import numpy as np import pandas as pd import json import requests from shapely.geometry import LineString from sklearn.cluster import KMeans import time import os from geocoding.config import Config def query_api(query, fpath): """ Queries Overpass API for *query*. Args: query (str): The query to be passed to API fpath (str): File path to write the API response Returns: None """ status = 0 overpass_url = 'http://overpass-api.de/api/interpreter' try: response = requests.get(overpass_url, params={'data': query}).json() with open(fpath, 'w') as f: json.dump(response, f) except ValueError: print('Overpass api error: Trying again with a greater timeout...') time.sleep(3) status = 1 return status def parse_streets(fpath): """ Parses the API response from *fpath* and converts it to a dataframe. Args: fpath (str): File path to read Returns: pandas.DataFrame: Contains all streets as well as their geometries """ # Helper function def convert_to_wkt_geometry(row): lons = [p['lon'] for p in row['geometry']] lats = [p['lat'] for p in row['geometry']] if len(lons) < 2 or len(lats) < 2: return None return LineString(list(zip(lons, lats))) with open(fpath, encoding='utf-8') as f: streets = json.load(f)['elements'] if not streets: return None data = [(street['id'], street['geometry']) for street in streets] cols = ['id', 'geometry'] street_df = pd.DataFrame(data=data, columns=cols) street_df['geometry'] = street_df.apply(convert_to_wkt_geometry, axis=1) street_df = street_df.dropna() return street_df def extract_streets(points, path): """ A wrapper function that administrates the streets download. Args: points (numpy.ndarray): Contains the data points that define the area \ to extract from Overpass API path (str): Path to write Returns: None """ labels = cluster_points(points) clusters_bboxes = get_clusters_bboxes(points, labels) street_dfs = [] for cluster, bbox in clusters_bboxes.items(): print('Getting bbox', cluster + 1, 'out of', len(clusters_bboxes)) cell_street_df = download_cell(bbox, os.path.join(path, "osm_streets.json")) if cell_street_df is not None: print('Number of streets:', len(cell_street_df)) street_dfs.append(cell_street_df) else: print('Number of streets:', 0) # if (cluster + 1) % 5 == 0: # print(f'Suspended for {config.osm_timeout} secs...') # time.sleep(config.osm_timeout) # delete file if os.path.exists(os.path.join(path, "osm_streets.json")): os.remove(os.path.join(path, "osm_streets.json")) street_df = pd.concat(street_dfs, ignore_index=True) street_df.drop_duplicates(subset='id', inplace=True) street_df.to_csv(f'{os.path.join(path, "osm_streets.csv")}', columns=['id', 'geometry'], index=False) print(f'Extracted {len(street_df.index)} unique streets') def download_cell(cell, fpath): """ Downloads *cell* from Overpass API, writes results in *fpath* and then \ parses them into a pandas.DataFrame. Args: cell (list): Contains the bounding box coords fpath (str): Path to write results and then to read from in order to \ parse them Returns: pandas.DataFrame: Contains all street elements included in *cell* """ west, south, east, north = cell counter = 0 status = 1 while status and (counter < Config.max_overpass_tries): counter += 1 query = ( f'[out:json][timeout:{Config.osm_timeout * counter}];' # f'way["highway"]["highway"!~"^(cycleway|footway)$"]' f'way["highway"]["highway"!~"^(cycleway)$"]' # 'way["highway"~"^(motorway|trunk|primary)$"];' # 'way["highway"]' f'({south},{west},{north},{east});' 'out geom;') status = query_api(query, fpath) if status: print('Overpass api error: Exiting.') exit() return parse_streets(fpath) def cluster_points(X): """ Clusters points given in *X*. Args: X (numpy.ndarray): Contains the points to be clustered Returns: numpy.ndarray: The predicted clusters labels """ n_clusters = int(Config.clusters_pct * X.shape[0]) kmeans = KMeans( n_clusters=n_clusters, random_state=Config.seed_no, n_init=20, max_iter=500, ).fit(X) labels = kmeans.predict(X) return labels def get_clusters_bboxes(X, labels): """ Extracts a bounding box for each one of the clusters. Args: X (numpy.ndarray): Contains the clustered points labels (numpy.ndarray): Contains the cluster label for each point in \ *X* Returns: dict: Contains the cluster labels as keys and the corresponding \ bounding box as values """ bboxes = {} for i in range(len(set(labels))): cluster_points = np.vstack([p for j, p in enumerate(X) if labels[j] == i]) xmin, ymin = cluster_points.min(axis=0) - Config.osm_buffer xmax, ymax = cluster_points.max(axis=0) + Config.osm_buffer bboxes[i] = [xmin, ymin, xmax, ymax] # print({k: v for k, v in sorted(bboxes.items(), key=lambda item: item[1][0])}) return bboxes
import logging import smtplib import urllib.parse from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from itsdangerous import URLSafeTimedSerializer from flask import current_app from server.services.messaging.template_service import get_template class SMTPService: @staticmethod def send_verification_email(to_address: str, username: str): """ Sends a verification email with a unique token so we can verify user owns this email address """ # TODO these could be localised if needed, in the future html_template = get_template("email_verification_en.html") text_template = get_template("email_verification_en.txt") verification_url = SMTPService._generate_email_verification_url( to_address, username ) html_template = html_template.replace("[USERNAME]", username) html_template = html_template.replace("[VEFIFICATION_LINK]", verification_url) text_template = text_template.replace("[USERNAME]", username) text_template = text_template.replace("[VEFIFICATION_LINK]", verification_url) subject = "HOT Tasking Manager - Email Verification" SMTPService._send_mesage(to_address, subject, html_template, text_template) return True @staticmethod def send_email_alert(to_address: str, username: str): """ Send an email to user to alert them they have a new message""" current_app.logger.debug(f"Test if email required {to_address}") if not to_address: return False # Many users will not have supplied email address so return # TODO these could be localised if needed, in the future html_template = get_template("message_alert_en.html") text_template = get_template("message_alert_en.txt") inbox_url = f"{current_app.config["APP_BASE_URL"]}/inbox" html_template = html_template.replace("[USERNAME]", username) html_template = html_template.replace("[PROFILE_LINK]", inbox_url) text_template = text_template.replace("[USERNAME]", username) text_template = text_template.replace("[PROFILE_LINK]", inbox_url) subject = "You have a new message on the HOT Tasking Manager" SMTPService._send_mesage(to_address, subject, html_template, text_template) return True @staticmethod def _send_mesage( to_address: str, subject: str, html_message: str, text_message: str ): """ Helper sends SMTP message """ from_address = current_app.config["EMAIL_FROM_ADDRESS"] msg = MIMEMultipart("alternative") msg["Subject"] = subject msg["From"] = from_address msg["To"] = to_address # Record the MIME types of both parts - text/plain and text/html. part1 = MIMEText(text_message, "plain") part2 = MIMEText(html_message, "html") msg.attach(part1) msg.attach(part2) current_app.logger.debug(f"Sending email via SMTP {to_address}") if current_app.config["LOG_LEVEL"] == logging.DEBUG: current_app.logger.debug(msg.as_string()) else: sender = SMTPService._init_smtp_client() sender.sendmail(from_address, to_address, msg.as_string()) sender.quit() current_app.logger.debug(f"Email sent {to_address}") @staticmethod def _init_smtp_client(): """ Initialise SMTP client from app settings """ smtp_settings = current_app.config["SMTP_SETTINGS"] sender = smtplib.SMTP(smtp_settings["host"], port=smtp_settings["smtp_port"]) sender.starttls() sender.login(smtp_settings["smtp_user"], smtp_settings["smtp_password"]) return sender @staticmethod def _generate_email_verification_url(email_address: str, user_name: str): """ Generate email verification url with unique token """ entropy = current_app.secret_key if current_app.secret_key else "un1testingmode" serializer = URLSafeTimedSerializer(entropy) token = serializer.dumps(email_address.lower()) base_url = current_app.config["APP_BASE_URL"] verification_params = {"token": token, "username": user_name} verification_url = "{0}/api/auth/email?{1}".format( base_url, urllib.parse.urlencode(verification_params) ) return verification_url
import logging import smtplib import urllib.parse from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from itsdangerous import URLSafeTimedSerializer from flask import current_app from server.services.messaging.template_service import get_template class SMTPService: @staticmethod def send_verification_email(to_address: str, username: str): """ Sends a verification email with a unique token so we can verify user owns this email address """ # TODO these could be localised if needed, in the future html_template = get_template("email_verification_en.html") text_template = get_template("email_verification_en.txt") verification_url = SMTPService._generate_email_verification_url( to_address, username ) html_template = html_template.replace("[USERNAME]", username) html_template = html_template.replace("[VEFIFICATION_LINK]", verification_url) text_template = text_template.replace("[USERNAME]", username) text_template = text_template.replace("[VEFIFICATION_LINK]", verification_url) subject = "HOT Tasking Manager - Email Verification" SMTPService._send_mesage(to_address, subject, html_template, text_template) return True @staticmethod def send_email_alert(to_address: str, username: str): """ Send an email to user to alert them they have a new message""" current_app.logger.debug(f"Test if email required {to_address}") if not to_address: return False # Many users will not have supplied email address so return # TODO these could be localised if needed, in the future html_template = get_template("message_alert_en.html") text_template = get_template("message_alert_en.txt") inbox_url = f"{current_app.config['APP_BASE_URL']}/inbox" html_template = html_template.replace("[USERNAME]", username) html_template = html_template.replace("[PROFILE_LINK]", inbox_url) text_template = text_template.replace("[USERNAME]", username) text_template = text_template.replace("[PROFILE_LINK]", inbox_url) subject = "You have a new message on the HOT Tasking Manager" SMTPService._send_mesage(to_address, subject, html_template, text_template) return True @staticmethod def _send_mesage( to_address: str, subject: str, html_message: str, text_message: str ): """ Helper sends SMTP message """ from_address = current_app.config["EMAIL_FROM_ADDRESS"] msg = MIMEMultipart("alternative") msg["Subject"] = subject msg["From"] = from_address msg["To"] = to_address # Record the MIME types of both parts - text/plain and text/html. part1 = MIMEText(text_message, "plain") part2 = MIMEText(html_message, "html") msg.attach(part1) msg.attach(part2) current_app.logger.debug(f"Sending email via SMTP {to_address}") if current_app.config["LOG_LEVEL"] == logging.DEBUG: current_app.logger.debug(msg.as_string()) else: sender = SMTPService._init_smtp_client() sender.sendmail(from_address, to_address, msg.as_string()) sender.quit() current_app.logger.debug(f"Email sent {to_address}") @staticmethod def _init_smtp_client(): """ Initialise SMTP client from app settings """ smtp_settings = current_app.config["SMTP_SETTINGS"] sender = smtplib.SMTP(smtp_settings["host"], port=smtp_settings["smtp_port"]) sender.starttls() sender.login(smtp_settings["smtp_user"], smtp_settings["smtp_password"]) return sender @staticmethod def _generate_email_verification_url(email_address: str, user_name: str): """ Generate email verification url with unique token """ entropy = current_app.secret_key if current_app.secret_key else "un1testingmode" serializer = URLSafeTimedSerializer(entropy) token = serializer.dumps(email_address.lower()) base_url = current_app.config["APP_BASE_URL"] verification_params = {"token": token, "username": user_name} verification_url = "{0}/api/auth/email?{1}".format( base_url, urllib.parse.urlencode(verification_params) ) return verification_url
"""Module for initializing data sources.""" from datetime import datetime from typing import Optional, Union, List, Tuple, Dict from uta_tools import logger from uta_tools.schemas import Assembly, GenomicData, TranscriptExonData, ResidueMode, \ GenomicDataResponse, ServiceMeta, TranscriptExonDataResponse from uta_tools.data_sources import MANETranscript, MANETranscriptMappings,\ SeqRepoAccess, TranscriptMappings, UTADatabase, GeneNormalizer from uta_tools import SEQREPO_DATA_PATH, \ TRANSCRIPT_MAPPINGS_PATH, LRG_REFSEQGENE_PATH, MANE_SUMMARY_PATH, \ UTA_DB_URL from uta_tools.version import __version__ class UTATools: """Class to initialize data sources.""" def __init__(self, seqrepo_data_path: str = SEQREPO_DATA_PATH, transcript_file_path: str = TRANSCRIPT_MAPPINGS_PATH, lrg_refseqgene_path: str = LRG_REFSEQGENE_PATH, mane_data_path: str = MANE_SUMMARY_PATH, db_url: str = UTA_DB_URL, db_pwd: str = "", gene_db_url: str = "", gene_db_region: str = "us-east-2" ) -> None: """Initialize UTATools class :param str seqrepo_data_path: The path to the seqrepo directory. :param str transcript_file_path: The path to transcript_mappings.tsv :param str lrg_refseqgene_path: The path to LRG_RefSeqGene :param str mane_data_path: Path to RefSeq MANE summary data :param str db_url: PostgreSQL connection URL Format: `driver://user:pass@host/database/schema` :param str db_pwd: User's password for uta database :param str gene_db_url: URL to gene normalizer dynamodb :param str gene_db_region: AWS region for gene normalizer db """ self.seqrepo_access = SeqRepoAccess( seqrepo_data_path=seqrepo_data_path) self.transcript_mappings = TranscriptMappings( transcript_file_path=transcript_file_path, lrg_refseqgene_path=lrg_refseqgene_path) self.mane_transcript_mappings = MANETranscriptMappings( mane_data_path=mane_data_path) self.uta_db = UTADatabase(db_url=db_url, db_pwd=db_pwd) gene_normalizer = GeneNormalizer(gene_db_url, gene_db_region) self.mane_transcript = MANETranscript( self.seqrepo_access, self.transcript_mappings, self.mane_transcript_mappings, self.uta_db, gene_normalizer) @staticmethod def service_meta() -> ServiceMeta: """Return ServiceMeta for uta_tools :return: ServiceMeta object """ return ServiceMeta( version=__version__, response_datetime=datetime.now() ) @staticmethod def _return_warnings( resp: Union[GenomicDataResponse, TranscriptExonDataResponse], warning_msg: str) -> Union[GenomicDataResponse, TranscriptExonDataResponse]: # noqa: E501 """Add warnings to response object :param Union[GenomicDataResponse, TranscriptExonDataResponse] resp: Response object :param str warning_msg: Warning message on why `transcript_exon_data` or `genomic_data` field is None :return: Response object with warning message """ logger.warning(warning_msg) resp.warnings.append(warning_msg) return resp async def transcript_to_genomic_coordinates( self, gene: Optional[str] = None, transcript: Optional[str] = None, exon_start: Optional[int] = None, exon_start_offset: Optional[int] = 0, # noqa: E501 exon_end: Optional[int] = None, exon_end_offset: Optional[int] = 0, **kwargs) -> GenomicDataResponse: """Get genomic data given transcript data. Will liftover to GRCh38 coordinates if possible. :param Optional[str] gene: Gene symbol :param Optional[str] transcript: Transcript accession :param Optional[int] exon_start: Starting transcript exon number :param Optional[int] exon_end: Ending transcript exon number :param Optional[int] exon_start_offset: Starting exon offset :param Optional[int] exon_end_offset: Ending exon offset :return: Genomic data (inter-residue coordinates) """ resp = GenomicDataResponse( genomic_data=None, warnings=[], service_meta=self.service_meta() ) if not transcript: return self._return_warnings(resp, "Must provide `transcript`") else: transcript = transcript.strip() if exon_start is None and exon_end is None: return self._return_warnings( resp, "Must provide either `exon_start` or `exon_end`") if gene: gene = gene.upper().strip() if exon_start and exon_end: if exon_start > exon_end: return self._return_warnings( resp, f"Start exon {exon_start} is greater than end exon {exon_end}" # noqa: E501 ) tx_exons, warning = await self.uta_db.get_tx_exons(transcript) if not tx_exons: return self._return_warnings(resp, warning) tx_exon_coords, warning = self.uta_db.get_tx_exon_coords( transcript, tx_exons, exon_start, exon_end) if not tx_exon_coords: return self._return_warnings(resp, warning) tx_exon_start, tx_exon_end = tx_exon_coords alt_ac_start_end, warning = await self.uta_db.get_alt_ac_start_and_end( transcript, tx_exon_start, tx_exon_end, gene=gene) if not alt_ac_start_end: return self._return_warnings(resp, warning) alt_ac_start, alt_ac_end = alt_ac_start_end gene = alt_ac_start[0] if alt_ac_start else alt_ac_end[0] chromosome = alt_ac_start[1] if alt_ac_start else alt_ac_end[1] if gene is None or chromosome is None: return self._return_warnings( resp, "Unable to retrieve `gene` or `chromosome` from " "genomic start or end data") start = alt_ac_start[3] if alt_ac_start else None end = alt_ac_end[2] if alt_ac_end else None strand = alt_ac_start[4] if alt_ac_start else alt_ac_end[4] # Using none since could set to 0 start_exits = start is not None end_exists = end is not None if strand == -1: start_offset = exon_start_offset * -1 if start_exits else None end_offset = exon_end_offset * -1 if end_exists else None else: start_offset = exon_start_offset if start_exits else None end_offset = exon_end_offset if end_exists else None start = start + start_offset if start_exits else None end = end + end_offset if end_exists else None resp.genomic_data = GenomicData( gene=gene, chr=chromosome, start=start, end=end, exon_start=exon_start if start_exits else None, exon_start_offset=exon_start_offset if start_exits else None, exon_end=exon_end if end_exists else None, exon_end_offset=exon_end_offset if end_exists else None, transcript=transcript, strand=strand ) return resp async def genomic_to_transcript_exon_coordinates( self, chromosome: Union[str, int], start: Optional[int] = None, end: Optional[int] = None, strand: Optional[int] = None, transcript: Optional[str] = None, gene: Optional[str] = None, residue_mode: ResidueMode = ResidueMode.RESIDUE, **kwargs) -> GenomicDataResponse: """Get transcript data for genomic data. MANE Transcript data will be returned iff `transcript` is not supplied. `gene` must be supplied in order to retrieve MANE Transcript data. Liftovers genomic coordinates to GRCh38 :param str chromosome: Chromosome. Must either give chromosome number (i.e. `1`) or accession (i.e. `NC_000001.11`). :param int start: Start genomic position :param int end: End genomic position :param str strand: Strand. Must be either `-1` or `1`. :param str transcript: The transcript to use. If this is not given, we will try the following transcripts: MANE Select, MANE Clinical Plus, Longest Remaining Compatible Transcript :param str gene: Gene symbol :param str residue_mode: Default is `resiude` (1-based). Must be either `residue` or `inter-residue` (0-based). :return: Genomic data (inter-residue coordinates) """ resp = GenomicDataResponse( genomic_data=None, warnings=[], service_meta=self.service_meta() ) if start is None and end is None: return self._return_warnings( resp, "Must provide either `start` or `end`") params = {key: None for key in GenomicData.__fields__.keys()} if gene is not None: gene = gene.upper().strip() if start: if residue_mode == ResidueMode.RESIDUE: start -= 1 start_data = await self._genomic_to_transcript_exon_coordinate( chromosome, start, strand=strand, transcript=transcript, gene=gene, is_start=True, residue_mode=ResidueMode.INTER_RESIDUE ) if start_data.transcript_exon_data: start_data = start_data.transcript_exon_data.dict() else: return self._return_warnings(resp, start_data.warnings[0]) else: start_data = None if end: if residue_mode == ResidueMode.RESIDUE: end -= 1 end_data = await self._genomic_to_transcript_exon_coordinate( chromosome, end, strand=strand, transcript=transcript, gene=gene, is_start=False, residue_mode=ResidueMode.INTER_RESIDUE ) if end_data.transcript_exon_data: end_data = end_data.transcript_exon_data.dict() else: return self._return_warnings(resp, end_data.warnings[0]) else: end_data = None for field in ["transcript", "gene", "chr", "strand"]: if start_data: if end_data: if start_data[field] != end_data[field]: msg = f"Start `{field}`, {start_data[field]}, does " \ f"not match End `{field}`, {end_data[field]}" return self._return_warnings(resp, msg) params[field] = start_data[field] else: params[field] = end_data[field] if gene and gene != params["gene"]: msg = f"Input gene, {gene}, does not match expected output" \ f"gene, {params["gene"]}" return self._return_warnings(resp, msg) for label, data in [("start", start_data), ("end", end_data)]: if data: params[label] = data["pos"] params[f"exon_{label}"] = data["exon"] params[f"exon_{label}_offset"] = data["exon_offset"] resp.genomic_data = GenomicData(**params) return resp async def _genomic_to_transcript_exon_coordinate( self, chromosome: Union[str, int], pos: int, strand: int = None, transcript: str = None, gene: str = None, is_start: bool = True, residue_mode: ResidueMode = ResidueMode.RESIDUE) -> TranscriptExonDataResponse: # noqa: E501 """Convert individual genomic data to transcript data :param str chromosome: Chromosome. Must either give chromosome number (i.e. `1`) or accession (i.e. `NC_000001.11`). :param int pos: Genomic position :param str strand: Strand. Must be either `-1` or `1`. :param str transcript: The transcript to use. If this is not given, we will try the following transcripts: MANE Select, MANE Clinical Plus, Longest Remaining Compatible Transcript :param str gene: Gene symbol :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :param str residue_mode: Default is `resiude` (1-based). Must be either `residue` or `inter-residue` (0-based). :return: Transcript data (inter-residue coordinates) """ resp = TranscriptExonDataResponse( transcript_exon_data=None, warnings=[], service_meta=self.service_meta() ) if transcript is None and gene is None: return self._return_warnings( resp, "Must provide either `gene` or `transcript`" ) params = {key: None for key in TranscriptExonData.__fields__.keys()} try: # Check if just chromosome is given. If it is, we should # convert this to the correct accession version if chromosome == "X": chromosome = 23 elif chromosome == "Y": chromosome = 24 else: chromosome = int(chromosome) except ValueError: # Check if valid accession is given if not await self.uta_db.validate_genomic_ac(chromosome): return self._return_warnings( resp, f"Invalid chromosome: {chromosome}") if isinstance(chromosome, str): # Accession given genes_alt_acs, warning = \ await self.uta_db.chr_to_gene_and_accessions( chromosome, pos, strand=strand, alt_ac=chromosome, gene=gene) else: # Number given genes_alt_acs, warning = \ await self.uta_db.chr_to_gene_and_accessions( chromosome, pos, strand=strand, alt_ac=None, gene=gene) if not genes_alt_acs: return self._return_warnings(resp, warning) gene_alt_ac, warning = self._get_gene_and_alt_ac(genes_alt_acs, gene) if not gene_alt_ac: return self._return_warnings(resp, warning) gene, alt_ac = gene_alt_ac if transcript is None: warnings = await self._set_mane_genomic_data( params, gene, alt_ac, pos, strand, is_start, residue_mode) if warnings: return self._return_warnings(resp, warnings) else: params["transcript"] = transcript params["gene"] = gene params["pos"] = pos params["chr"] = alt_ac warning = await self._set_genomic_data(params, strand, is_start) if warning: return self._return_warnings(resp, warning) resp.transcript_exon_data = TranscriptExonData(**params) return resp @staticmethod def _get_gene_and_alt_ac( genes_alt_acs: Dict, gene: Optional[str] ) -> Tuple[Optional[Tuple[str, str]], Optional[str]]: """Return gene genomic accession :param Dict genes_alt_acs: Dictionary containing genes and genomic accessions :param Optional[str] gene: Gene symbol :return: [Gene, Genomic accession] if both exist """ alt_acs = genes_alt_acs["alt_acs"] len_alt_acs = len(alt_acs) if len_alt_acs > 1: return None, f"Found more than one accessions: {alt_acs}" elif len_alt_acs == 0: return None, "No genomic accessions found" alt_ac = next(iter(alt_acs)) genes = genes_alt_acs["genes"] len_genes = len(genes) input_gene = gene output_gene = None if len_genes == 1: output_gene = next(iter(genes)) elif len_genes > 1: return None, f"Found more than one gene: {genes}" elif len_genes == 0: return None, "No genes found" if input_gene is not None: if output_gene != input_gene.upper(): return None, f"Input gene, {input_gene}, does not match " \ f"expected output gene, {output_gene}" gene = output_gene if output_gene else input_gene return (gene, alt_ac), None async def _set_mane_genomic_data( self, params: Dict, gene: str, alt_ac: str, pos: int, strand: int, is_start: bool, residue_mode: str ) -> Optional[str]: """Set genomic data in `params` found from MANE. :param Dict params: Parameters for response :param str gene: Gene symbol :param str alt_ac: Genomic accession :param int pos: Genomic position :param int strand: Strand :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :param str residue_mode: Residue mode for start/end positions Must be either `inter-residue` or `residue` :return: Warnings if found """ mane_data = await self.mane_transcript.get_mane_transcript( alt_ac, pos, "g", gene=gene, try_longest_compatible=True, residue_mode=residue_mode ) if not mane_data: msg = f"Unable to find mane data for {alt_ac} with position {pos}" if gene: msg += f" on gene {gene}" logger.warning(msg) return msg if mane_data["strand"] == "-": mane_data["strand"] = -1 elif mane_data["strand"] == "+": mane_data["strand"] = 1 params["gene"] = mane_data["gene"] params["transcript"] = mane_data["refseq"] if mane_data["refseq"] \ else mane_data["ensembl"] if mane_data["ensembl"] else None tx_exons = await self._structure_exons(params["transcript"]) if not tx_exons: return f"Unable to get exons for {params["transcript"]}" tx_pos = mane_data["pos"][0] + mane_data["coding_start_site"] params["exon"] = self._get_exon_number(tx_exons, tx_pos) try: tx_exon = tx_exons[params["exon"] - 1] except IndexError: msg = f"{params["transcript"]} with position {tx_pos} "\ f"does not exist on exons: {tx_exons}" logger.warning(msg) return msg strand_to_use = strand if strand is not None else mane_data["strand"] params["strand"] = strand_to_use self._set_exon_offset(params, tx_exon[0], tx_exon[1], tx_pos, is_start=is_start, strand=strand_to_use) # Need to check if we need to change pos for liftover genomic_data, warnings = await self.uta_db.get_alt_ac_start_or_end( params["transcript"], tx_pos, tx_pos, gene) if genomic_data is None: return warnings params["chr"] = genomic_data[1] genomic_coords = genomic_data[2], genomic_data[3] genomic_pos = genomic_coords[1] if is_start else genomic_coords[0] params["pos"] = genomic_pos - params["exon_offset"] if \ strand_to_use == -1 else genomic_pos + params["exon_offset"] return None async def _set_genomic_data(self, params: Dict, strand: int, is_start: bool) -> Optional[str]: """Set genomic data in `params`. :param Dict params: Parameters for response :param int strand: Strand :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :return: Warnings if found """ # We should always try to liftover grch38_ac = await self.uta_db.get_newest_assembly_ac(params["chr"]) if not grch38_ac: return f"Invalid genomic accession: {params["chr"]}" grch38_ac = grch38_ac[0][0] if grch38_ac != params["chr"]: # Liftover to 38 descr = await self.uta_db.get_chr_assembly(params["chr"]) if descr is None: return f"Unable to get chromosome and assembly for " \ f"{params["chr"]}" chromosome_number, assembly = descr liftover_data = self.uta_db.get_liftover( chromosome_number, params["pos"], Assembly.GRCH38) if liftover_data is None: return f"Position {params["pos"]} does not exist on " \ f"chromosome {chromosome_number}" params["pos"] = liftover_data[1] params["chr"] = grch38_ac tx_exons = await self._structure_exons(params["transcript"]) if not tx_exons: return f"Unable to get exons for {params["transcript"]}" data = await self.uta_db.get_tx_exon_aln_v_data( params["transcript"], params["pos"], params["pos"], alt_ac=params["chr"], use_tx_pos=False) if len(data) != 1: return f"Must find exactly one row for genomic data, " \ f"but found: {len(data)}" # Find exon number data = data[0] data_exons = data[2], data[3] i = 1 found_tx_exon = False for exon in tx_exons: if data_exons == exon: found_tx_exon = True break i += 1 if not found_tx_exon: # Either first or last i = 1 if data_exons == (0, tx_exons[0][1]) else i - 1 params["exon"] = i strand_to_use = strand if strand is not None else data[7] params["strand"] = strand_to_use self._set_exon_offset(params, data[5], data[6], params["pos"], is_start=is_start, strand=strand_to_use) return None @staticmethod def _set_exon_offset(params: Dict, start: int, end: int, pos: int, is_start: bool, strand: int) -> None: """Set `exon_offset` in params. :param Dict params: Parameters for response :param int start: Start exon coord (can be transcript or genomic) :param int end: End exon coord (can be transcript or genomic) :param int pos: Position change (can be transcript or genomic) :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position :param int strand: Strand """ if is_start: if strand == -1: params["exon_offset"] = end - pos else: params["exon_offset"] = pos - end else: if strand == -1: params["exon_offset"] = start - pos else: params["exon_offset"] = pos - start async def _structure_exons(self, transcript: str) -> List[Tuple[int, int]]: """Structure exons as list of tuples. :param str transcript: Transcript accession :return: List of tuples containing transcript exon coordinates """ result = list() tx_exons, _ = await self.uta_db.get_tx_exons(transcript) if not tx_exons: return result for tx_exon in tx_exons: coords = tx_exon.split(",") result.append((int(coords[0]), int(coords[1]))) return result @staticmethod def _get_exon_number(tx_exons: List, tx_pos: int) -> int: """Find exon number. :param List tx_exons: List of exon coordinates :param int tx_pos: Transcript position change :return: Exon number associated to transcript position change """ i = 1 for coords in tx_exons: if coords[0] <= tx_pos <= coords[1]: break i += 1 return i
"""Module for initializing data sources.""" from datetime import datetime from typing import Optional, Union, List, Tuple, Dict from uta_tools import logger from uta_tools.schemas import Assembly, GenomicData, TranscriptExonData, ResidueMode, \ GenomicDataResponse, ServiceMeta, TranscriptExonDataResponse from uta_tools.data_sources import MANETranscript, MANETranscriptMappings,\ SeqRepoAccess, TranscriptMappings, UTADatabase, GeneNormalizer from uta_tools import SEQREPO_DATA_PATH, \ TRANSCRIPT_MAPPINGS_PATH, LRG_REFSEQGENE_PATH, MANE_SUMMARY_PATH, \ UTA_DB_URL from uta_tools.version import __version__ class UTATools: """Class to initialize data sources.""" def __init__(self, seqrepo_data_path: str = SEQREPO_DATA_PATH, transcript_file_path: str = TRANSCRIPT_MAPPINGS_PATH, lrg_refseqgene_path: str = LRG_REFSEQGENE_PATH, mane_data_path: str = MANE_SUMMARY_PATH, db_url: str = UTA_DB_URL, db_pwd: str = "", gene_db_url: str = "", gene_db_region: str = "us-east-2" ) -> None: """Initialize UTATools class :param str seqrepo_data_path: The path to the seqrepo directory. :param str transcript_file_path: The path to transcript_mappings.tsv :param str lrg_refseqgene_path: The path to LRG_RefSeqGene :param str mane_data_path: Path to RefSeq MANE summary data :param str db_url: PostgreSQL connection URL Format: `driver://user:pass@host/database/schema` :param str db_pwd: User's password for uta database :param str gene_db_url: URL to gene normalizer dynamodb :param str gene_db_region: AWS region for gene normalizer db """ self.seqrepo_access = SeqRepoAccess( seqrepo_data_path=seqrepo_data_path) self.transcript_mappings = TranscriptMappings( transcript_file_path=transcript_file_path, lrg_refseqgene_path=lrg_refseqgene_path) self.mane_transcript_mappings = MANETranscriptMappings( mane_data_path=mane_data_path) self.uta_db = UTADatabase(db_url=db_url, db_pwd=db_pwd) gene_normalizer = GeneNormalizer(gene_db_url, gene_db_region) self.mane_transcript = MANETranscript( self.seqrepo_access, self.transcript_mappings, self.mane_transcript_mappings, self.uta_db, gene_normalizer) @staticmethod def service_meta() -> ServiceMeta: """Return ServiceMeta for uta_tools :return: ServiceMeta object """ return ServiceMeta( version=__version__, response_datetime=datetime.now() ) @staticmethod def _return_warnings( resp: Union[GenomicDataResponse, TranscriptExonDataResponse], warning_msg: str) -> Union[GenomicDataResponse, TranscriptExonDataResponse]: # noqa: E501 """Add warnings to response object :param Union[GenomicDataResponse, TranscriptExonDataResponse] resp: Response object :param str warning_msg: Warning message on why `transcript_exon_data` or `genomic_data` field is None :return: Response object with warning message """ logger.warning(warning_msg) resp.warnings.append(warning_msg) return resp async def transcript_to_genomic_coordinates( self, gene: Optional[str] = None, transcript: Optional[str] = None, exon_start: Optional[int] = None, exon_start_offset: Optional[int] = 0, # noqa: E501 exon_end: Optional[int] = None, exon_end_offset: Optional[int] = 0, **kwargs) -> GenomicDataResponse: """Get genomic data given transcript data. Will liftover to GRCh38 coordinates if possible. :param Optional[str] gene: Gene symbol :param Optional[str] transcript: Transcript accession :param Optional[int] exon_start: Starting transcript exon number :param Optional[int] exon_end: Ending transcript exon number :param Optional[int] exon_start_offset: Starting exon offset :param Optional[int] exon_end_offset: Ending exon offset :return: Genomic data (inter-residue coordinates) """ resp = GenomicDataResponse( genomic_data=None, warnings=[], service_meta=self.service_meta() ) if not transcript: return self._return_warnings(resp, "Must provide `transcript`") else: transcript = transcript.strip() if exon_start is None and exon_end is None: return self._return_warnings( resp, "Must provide either `exon_start` or `exon_end`") if gene: gene = gene.upper().strip() if exon_start and exon_end: if exon_start > exon_end: return self._return_warnings( resp, f"Start exon {exon_start} is greater than end exon {exon_end}" # noqa: E501 ) tx_exons, warning = await self.uta_db.get_tx_exons(transcript) if not tx_exons: return self._return_warnings(resp, warning) tx_exon_coords, warning = self.uta_db.get_tx_exon_coords( transcript, tx_exons, exon_start, exon_end) if not tx_exon_coords: return self._return_warnings(resp, warning) tx_exon_start, tx_exon_end = tx_exon_coords alt_ac_start_end, warning = await self.uta_db.get_alt_ac_start_and_end( transcript, tx_exon_start, tx_exon_end, gene=gene) if not alt_ac_start_end: return self._return_warnings(resp, warning) alt_ac_start, alt_ac_end = alt_ac_start_end gene = alt_ac_start[0] if alt_ac_start else alt_ac_end[0] chromosome = alt_ac_start[1] if alt_ac_start else alt_ac_end[1] if gene is None or chromosome is None: return self._return_warnings( resp, "Unable to retrieve `gene` or `chromosome` from " "genomic start or end data") start = alt_ac_start[3] if alt_ac_start else None end = alt_ac_end[2] if alt_ac_end else None strand = alt_ac_start[4] if alt_ac_start else alt_ac_end[4] # Using none since could set to 0 start_exits = start is not None end_exists = end is not None if strand == -1: start_offset = exon_start_offset * -1 if start_exits else None end_offset = exon_end_offset * -1 if end_exists else None else: start_offset = exon_start_offset if start_exits else None end_offset = exon_end_offset if end_exists else None start = start + start_offset if start_exits else None end = end + end_offset if end_exists else None resp.genomic_data = GenomicData( gene=gene, chr=chromosome, start=start, end=end, exon_start=exon_start if start_exits else None, exon_start_offset=exon_start_offset if start_exits else None, exon_end=exon_end if end_exists else None, exon_end_offset=exon_end_offset if end_exists else None, transcript=transcript, strand=strand ) return resp async def genomic_to_transcript_exon_coordinates( self, chromosome: Union[str, int], start: Optional[int] = None, end: Optional[int] = None, strand: Optional[int] = None, transcript: Optional[str] = None, gene: Optional[str] = None, residue_mode: ResidueMode = ResidueMode.RESIDUE, **kwargs) -> GenomicDataResponse: """Get transcript data for genomic data. MANE Transcript data will be returned iff `transcript` is not supplied. `gene` must be supplied in order to retrieve MANE Transcript data. Liftovers genomic coordinates to GRCh38 :param str chromosome: Chromosome. Must either give chromosome number (i.e. `1`) or accession (i.e. `NC_000001.11`). :param int start: Start genomic position :param int end: End genomic position :param str strand: Strand. Must be either `-1` or `1`. :param str transcript: The transcript to use. If this is not given, we will try the following transcripts: MANE Select, MANE Clinical Plus, Longest Remaining Compatible Transcript :param str gene: Gene symbol :param str residue_mode: Default is `resiude` (1-based). Must be either `residue` or `inter-residue` (0-based). :return: Genomic data (inter-residue coordinates) """ resp = GenomicDataResponse( genomic_data=None, warnings=[], service_meta=self.service_meta() ) if start is None and end is None: return self._return_warnings( resp, "Must provide either `start` or `end`") params = {key: None for key in GenomicData.__fields__.keys()} if gene is not None: gene = gene.upper().strip() if start: if residue_mode == ResidueMode.RESIDUE: start -= 1 start_data = await self._genomic_to_transcript_exon_coordinate( chromosome, start, strand=strand, transcript=transcript, gene=gene, is_start=True, residue_mode=ResidueMode.INTER_RESIDUE ) if start_data.transcript_exon_data: start_data = start_data.transcript_exon_data.dict() else: return self._return_warnings(resp, start_data.warnings[0]) else: start_data = None if end: if residue_mode == ResidueMode.RESIDUE: end -= 1 end_data = await self._genomic_to_transcript_exon_coordinate( chromosome, end, strand=strand, transcript=transcript, gene=gene, is_start=False, residue_mode=ResidueMode.INTER_RESIDUE ) if end_data.transcript_exon_data: end_data = end_data.transcript_exon_data.dict() else: return self._return_warnings(resp, end_data.warnings[0]) else: end_data = None for field in ["transcript", "gene", "chr", "strand"]: if start_data: if end_data: if start_data[field] != end_data[field]: msg = f"Start `{field}`, {start_data[field]}, does " \ f"not match End `{field}`, {end_data[field]}" return self._return_warnings(resp, msg) params[field] = start_data[field] else: params[field] = end_data[field] if gene and gene != params["gene"]: msg = f"Input gene, {gene}, does not match expected output" \ f"gene, {params['gene']}" return self._return_warnings(resp, msg) for label, data in [("start", start_data), ("end", end_data)]: if data: params[label] = data["pos"] params[f"exon_{label}"] = data["exon"] params[f"exon_{label}_offset"] = data["exon_offset"] resp.genomic_data = GenomicData(**params) return resp async def _genomic_to_transcript_exon_coordinate( self, chromosome: Union[str, int], pos: int, strand: int = None, transcript: str = None, gene: str = None, is_start: bool = True, residue_mode: ResidueMode = ResidueMode.RESIDUE) -> TranscriptExonDataResponse: # noqa: E501 """Convert individual genomic data to transcript data :param str chromosome: Chromosome. Must either give chromosome number (i.e. `1`) or accession (i.e. `NC_000001.11`). :param int pos: Genomic position :param str strand: Strand. Must be either `-1` or `1`. :param str transcript: The transcript to use. If this is not given, we will try the following transcripts: MANE Select, MANE Clinical Plus, Longest Remaining Compatible Transcript :param str gene: Gene symbol :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :param str residue_mode: Default is `resiude` (1-based). Must be either `residue` or `inter-residue` (0-based). :return: Transcript data (inter-residue coordinates) """ resp = TranscriptExonDataResponse( transcript_exon_data=None, warnings=[], service_meta=self.service_meta() ) if transcript is None and gene is None: return self._return_warnings( resp, "Must provide either `gene` or `transcript`" ) params = {key: None for key in TranscriptExonData.__fields__.keys()} try: # Check if just chromosome is given. If it is, we should # convert this to the correct accession version if chromosome == "X": chromosome = 23 elif chromosome == "Y": chromosome = 24 else: chromosome = int(chromosome) except ValueError: # Check if valid accession is given if not await self.uta_db.validate_genomic_ac(chromosome): return self._return_warnings( resp, f"Invalid chromosome: {chromosome}") if isinstance(chromosome, str): # Accession given genes_alt_acs, warning = \ await self.uta_db.chr_to_gene_and_accessions( chromosome, pos, strand=strand, alt_ac=chromosome, gene=gene) else: # Number given genes_alt_acs, warning = \ await self.uta_db.chr_to_gene_and_accessions( chromosome, pos, strand=strand, alt_ac=None, gene=gene) if not genes_alt_acs: return self._return_warnings(resp, warning) gene_alt_ac, warning = self._get_gene_and_alt_ac(genes_alt_acs, gene) if not gene_alt_ac: return self._return_warnings(resp, warning) gene, alt_ac = gene_alt_ac if transcript is None: warnings = await self._set_mane_genomic_data( params, gene, alt_ac, pos, strand, is_start, residue_mode) if warnings: return self._return_warnings(resp, warnings) else: params["transcript"] = transcript params["gene"] = gene params["pos"] = pos params["chr"] = alt_ac warning = await self._set_genomic_data(params, strand, is_start) if warning: return self._return_warnings(resp, warning) resp.transcript_exon_data = TranscriptExonData(**params) return resp @staticmethod def _get_gene_and_alt_ac( genes_alt_acs: Dict, gene: Optional[str] ) -> Tuple[Optional[Tuple[str, str]], Optional[str]]: """Return gene genomic accession :param Dict genes_alt_acs: Dictionary containing genes and genomic accessions :param Optional[str] gene: Gene symbol :return: [Gene, Genomic accession] if both exist """ alt_acs = genes_alt_acs["alt_acs"] len_alt_acs = len(alt_acs) if len_alt_acs > 1: return None, f"Found more than one accessions: {alt_acs}" elif len_alt_acs == 0: return None, "No genomic accessions found" alt_ac = next(iter(alt_acs)) genes = genes_alt_acs["genes"] len_genes = len(genes) input_gene = gene output_gene = None if len_genes == 1: output_gene = next(iter(genes)) elif len_genes > 1: return None, f"Found more than one gene: {genes}" elif len_genes == 0: return None, "No genes found" if input_gene is not None: if output_gene != input_gene.upper(): return None, f"Input gene, {input_gene}, does not match " \ f"expected output gene, {output_gene}" gene = output_gene if output_gene else input_gene return (gene, alt_ac), None async def _set_mane_genomic_data( self, params: Dict, gene: str, alt_ac: str, pos: int, strand: int, is_start: bool, residue_mode: str ) -> Optional[str]: """Set genomic data in `params` found from MANE. :param Dict params: Parameters for response :param str gene: Gene symbol :param str alt_ac: Genomic accession :param int pos: Genomic position :param int strand: Strand :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :param str residue_mode: Residue mode for start/end positions Must be either `inter-residue` or `residue` :return: Warnings if found """ mane_data = await self.mane_transcript.get_mane_transcript( alt_ac, pos, "g", gene=gene, try_longest_compatible=True, residue_mode=residue_mode ) if not mane_data: msg = f"Unable to find mane data for {alt_ac} with position {pos}" if gene: msg += f" on gene {gene}" logger.warning(msg) return msg if mane_data["strand"] == "-": mane_data["strand"] = -1 elif mane_data["strand"] == "+": mane_data["strand"] = 1 params["gene"] = mane_data["gene"] params["transcript"] = mane_data["refseq"] if mane_data["refseq"] \ else mane_data["ensembl"] if mane_data["ensembl"] else None tx_exons = await self._structure_exons(params["transcript"]) if not tx_exons: return f"Unable to get exons for {params['transcript']}" tx_pos = mane_data["pos"][0] + mane_data["coding_start_site"] params["exon"] = self._get_exon_number(tx_exons, tx_pos) try: tx_exon = tx_exons[params["exon"] - 1] except IndexError: msg = f"{params['transcript']} with position {tx_pos} "\ f"does not exist on exons: {tx_exons}" logger.warning(msg) return msg strand_to_use = strand if strand is not None else mane_data["strand"] params["strand"] = strand_to_use self._set_exon_offset(params, tx_exon[0], tx_exon[1], tx_pos, is_start=is_start, strand=strand_to_use) # Need to check if we need to change pos for liftover genomic_data, warnings = await self.uta_db.get_alt_ac_start_or_end( params["transcript"], tx_pos, tx_pos, gene) if genomic_data is None: return warnings params["chr"] = genomic_data[1] genomic_coords = genomic_data[2], genomic_data[3] genomic_pos = genomic_coords[1] if is_start else genomic_coords[0] params["pos"] = genomic_pos - params["exon_offset"] if \ strand_to_use == -1 else genomic_pos + params["exon_offset"] return None async def _set_genomic_data(self, params: Dict, strand: int, is_start: bool) -> Optional[str]: """Set genomic data in `params`. :param Dict params: Parameters for response :param int strand: Strand :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position. :return: Warnings if found """ # We should always try to liftover grch38_ac = await self.uta_db.get_newest_assembly_ac(params["chr"]) if not grch38_ac: return f"Invalid genomic accession: {params['chr']}" grch38_ac = grch38_ac[0][0] if grch38_ac != params["chr"]: # Liftover to 38 descr = await self.uta_db.get_chr_assembly(params["chr"]) if descr is None: return f"Unable to get chromosome and assembly for " \ f"{params['chr']}" chromosome_number, assembly = descr liftover_data = self.uta_db.get_liftover( chromosome_number, params["pos"], Assembly.GRCH38) if liftover_data is None: return f"Position {params['pos']} does not exist on " \ f"chromosome {chromosome_number}" params["pos"] = liftover_data[1] params["chr"] = grch38_ac tx_exons = await self._structure_exons(params["transcript"]) if not tx_exons: return f"Unable to get exons for {params['transcript']}" data = await self.uta_db.get_tx_exon_aln_v_data( params["transcript"], params["pos"], params["pos"], alt_ac=params["chr"], use_tx_pos=False) if len(data) != 1: return f"Must find exactly one row for genomic data, " \ f"but found: {len(data)}" # Find exon number data = data[0] data_exons = data[2], data[3] i = 1 found_tx_exon = False for exon in tx_exons: if data_exons == exon: found_tx_exon = True break i += 1 if not found_tx_exon: # Either first or last i = 1 if data_exons == (0, tx_exons[0][1]) else i - 1 params["exon"] = i strand_to_use = strand if strand is not None else data[7] params["strand"] = strand_to_use self._set_exon_offset(params, data[5], data[6], params["pos"], is_start=is_start, strand=strand_to_use) return None @staticmethod def _set_exon_offset(params: Dict, start: int, end: int, pos: int, is_start: bool, strand: int) -> None: """Set `exon_offset` in params. :param Dict params: Parameters for response :param int start: Start exon coord (can be transcript or genomic) :param int end: End exon coord (can be transcript or genomic) :param int pos: Position change (can be transcript or genomic) :param bool is_start: `True` if `pos` is start position. `False` if `pos` is end position :param int strand: Strand """ if is_start: if strand == -1: params["exon_offset"] = end - pos else: params["exon_offset"] = pos - end else: if strand == -1: params["exon_offset"] = start - pos else: params["exon_offset"] = pos - start async def _structure_exons(self, transcript: str) -> List[Tuple[int, int]]: """Structure exons as list of tuples. :param str transcript: Transcript accession :return: List of tuples containing transcript exon coordinates """ result = list() tx_exons, _ = await self.uta_db.get_tx_exons(transcript) if not tx_exons: return result for tx_exon in tx_exons: coords = tx_exon.split(",") result.append((int(coords[0]), int(coords[1]))) return result @staticmethod def _get_exon_number(tx_exons: List, tx_pos: int) -> int: """Find exon number. :param List tx_exons: List of exon coordinates :param int tx_pos: Transcript position change :return: Exon number associated to transcript position change """ i = 1 for coords in tx_exons: if coords[0] <= tx_pos <= coords[1]: break i += 1 return i
"""This is a cog for a discord.py bot. It will add some management commands to a bot. Commands: version show the hash of the latest commit load load an extension / cog unload unload an extension / cog reload reload an extension / cog cogs show currently active extensions / cogs list make felix compute a list └duplicates find duplicate usernames pull pull latest changes from github (superuser only) error print the traceback of the last unhandled error to chat Only users that have an admin role can use the commands. """ import subprocess import json import traceback import typing from datetime import datetime, timezone from os import path, listdir from discord import Activity, Embed, Member from discord.ext import commands class Management(commands.Cog, name='Management'): def __init__(self, client): self.client = client self.reload_config() async def cog_check(self, ctx): return self.client.user_is_admin(ctx.author) @commands.Cog.listener() async def on_ready(self): loaded = self.client.extensions unloaded = [x for x in self.crawl_cogs() if x not in loaded] # Cogs without extra in their name should be loaded at startup so if # any cog without "extra" in it's name is unloaded here -> Error in cog if any('extra' not in cog_name for cog_name in unloaded): activity_name = 'ERROR in cog' activity_type = 3 else: felix_version = self.get_version_info()[0][:7] activity_name = f'on {felix_version}' activity_type = 0 await self.client.change_presence( activity=Activity(name=activity_name, type=activity_type) ) @commands.Cog.listener() async def on_member_join(self, member): if self.client.flood_mode: return await self.client.main_guild.system_channel.send( f'Welcome to the Engineer Man Discord Server, {member.mention}\n' 'Please take a moment to review the server rules in <#484103976296644608>, ' 'say `felix help` to learn about available commands, ' 'and finally, please be kind and decent to one another and enjoy your stay.' ) # ---------------------------------------------- # Error handler # ---------------------------------------------- @commands.Cog.listener() async def on_command_error(self, ctx, error): if isinstance(error, commands.CommandNotFound): return if isinstance(error, commands.CommandOnCooldown): await ctx.send(error) return if isinstance(error, commands.MissingRequiredArgument): par = str(error.param) missing = par.split(": ")[0] if ':' in par: missing_type = ' (' + str(par).split(": ")[1] + ')' else: missing_type = '' await ctx.send( f'Missing parameter: `{missing}{missing_type}`' + f'\nIf you are not sure how to use the command, try running ' + f'`felix help {ctx.command.qualified_name}`' ) return if isinstance(error, commands.CheckFailure): await ctx.send('Sorry, you are not allowed to run this command.') return if isinstance(error, commands.BadArgument): # It's in an embed to prevent mentions from working embed = Embed( title='Error', description=str(error), color=0x2ECC71 ) await ctx.send(embed=embed) return if isinstance(error, commands.UnexpectedQuoteError): await ctx.send('`Unexpected quote encountered`') return # In case of an unhandled error -> Save the error + current datetime + ctx + original text # so it can be accessed later with the error command await ctx.send('Sorry, something went wrong. The Error was saved - we will look into it.') self.client.last_errors.append((error, datetime.utcnow(), ctx, ctx.message.content)) await self.client.change_presence( activity=Activity(name='ERROR encountered', url=None, type=3) ) print(f'Ignoring exception in command {ctx.command}:', flush=True) traceback.print_exception( type(error), error, error.__traceback__ ) print('-------------------------------------------------------------', flush=True) def reload_config(self): with open("../config.json") as conffile: self.client.config = json.load(conffile) def get_version_info(self): version = 'unknown' date = 'unknown' try: gitlog = subprocess.check_output( ['git', 'log', '-n', '1', '--date=iso']).decode() for line in gitlog.split('\n'): if line.startswith('commit'): version = line.split(' ')[1] elif line.startswith('Date'): date = line[5:].strip() date = date.replace(' +', '+').replace(' ', 'T') else: pass except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), None)) raise e return (version, date) async def get_remote_commits(self): last_commit = self.get_version_info()[0] ext = f'?per_page=10&sha=master' repo = 'engineer-man/felix' nxt = f'https://api.github.com/repos/{repo}/commits{ext}' repo_data = [] repo_shas = [] while last_commit not in repo_shas: async with self.client.session.get(nxt) as response: r = await response.json() repo_data += r repo_shas = [x['sha'] for x in repo_data] try: nxt = r.links['next']['url'] except: nxt = '' num_comm = repo_shas.index(last_commit) return (num_comm, repo_data[0:(num_comm if num_comm > 10 else 10)]) def crawl_cogs(self, directory='cogs'): cogs = [] for element in listdir(directory): if element == 'samples': continue abs_el = path.join(directory, element) if path.isdir(abs_el): cogs += self.crawl_cogs(abs_el) else: filename, ext = path.splitext(element) if ext == '.py': dot_dir = directory.replace('\\', '.') dot_dir = dot_dir.replace('/', '.') cogs.append(f'{dot_dir}.' + filename) return cogs # ---------------------------------------------- # Function to disply the version # ---------------------------------------------- @commands.command( name='version', brief='Show current version of felix', description='Show current version and changelog of felix', hidden=True, ) async def version(self, ctx): #await ctx.trigger_typing() version, date = self.get_version_info() num_commits, remote_data = await self.get_remote_commits() status = "I am up to date with 'origin/master'" changelog = 'Changelog:\n' if num_commits: status = f"I am [{num_commits}] commits behind 'origin/master'"\ f" [{remote_data[0]["commit"]["author"]["date"]}]" for i, commit in enumerate(remote_data): commitmessage = commit['commit']['message'] if 'merge pull' in commitmessage.lower(): continue changelog += ('+ ' if i < num_commits else '* ') \ + commitmessage.split('\n')[0] + '\n' await ctx.send( f'```css\nCurrent Version: [{version[:7]}].from [{date}]' + f'\n{status}``````diff\n{changelog}```' ) # ---------------------------------------------- # Function to load extensions # ---------------------------------------------- @commands.command( name='load', brief='Load bot extension', description='Load bot extension\n\nExample: felix load cogs.stats', hidden=True, ) async def load_extension(self, ctx, extension_name): for cog_name in self.crawl_cogs(): if extension_name in cog_name: target_extension = cog_name break try: self.client.load_extension(target_extension) except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), ctx)) await ctx.send(f'```py\n{type(e).__name__}: {str(e)}\n```') return await ctx.send(f'```css\nExtension [{target_extension}] loaded.```') # ---------------------------------------------- # Function to unload extensions # ---------------------------------------------- @commands.command( name='unload', brief='Unload bot extension', description='Unload bot extension\n\nExample: felix unload cogs.stats', hidden=True, ) async def unload_extension(self, ctx, extension_name): for cog_name in self.client.extensions: if extension_name in cog_name: target_extension = cog_name break if target_extension.lower() in 'cogs.management': await ctx.send( f"```diff\n- {target_extension} can't be unloaded" + f"\n+ try felix reload {target_extension}!```" ) return if self.client.extensions.get(target_extension) is None: return self.client.unload_extension(target_extension) await ctx.send(f'```css\nExtension [{target_extension}] unloaded.```') # ---------------------------------------------- # Function to reload extensions # ---------------------------------------------- @commands.command( name='reload', brief='Reload bot extension', description='Reload bot extension\n\nExample: felix reload cogs.stats', hidden=True, aliases=['re'] ) async def reload_extension(self, ctx, extension_name): target_extensions = [] if extension_name == 'all': target_extensions = [__name__] + \ [x for x in self.client.extensions if not x == __name__] else: for cog_name in self.client.extensions: if extension_name in cog_name: target_extensions = [cog_name] break if not target_extensions: return result = [] for ext in target_extensions: try: self.client.reload_extension(ext) result.append(f'Extension [{ext}] reloaded.') except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), ctx)) result.append(f'#ERROR loading [{ext}]') continue result = '\n'.join(result) await ctx.send(f'```css\n{result}```') # ---------------------------------------------- # Function to get bot extensions # ---------------------------------------------- @commands.command( name='cogs', brief='Get loaded cogs', description='Get loaded cogs', aliases=['extensions'], hidden=True, ) async def print_cogs(self, ctx): loaded = self.client.extensions unloaded = [x for x in self.crawl_cogs() if x not in loaded] response = ['\n[Loaded extensions]'] + ['\n ' + x for x in loaded] response += ['\n[Unloaded extensions]'] + \ ['\n ' + x for x in unloaded] await ctx.send(f'```css{''.join(response)}```') return True # ---------------------------------------------- # Function Group to clear channel of messages # ---------------------------------------------- @commands.group( invoke_without_command=True, name='list', hidden=True ) async def _list(self, ctx): """List stuff""" await ctx.send_help('list') return True @_list.command( name='duplicates' ) async def duplicates(self, ctx): """List duplicate usernames""" name_count = {} aka = {} pages = [] usernames = [(x.name, x.display_name) for x in ctx.guild.members] l_max = max([len(x[0]) for x in usernames]) + 1 for name, display_name in usernames: name_count[name] = name_count.get(name, 0) + 1 if not name == display_name: aka[name] = aka.get(name, []) + [display_name] page = [] for key, value in sorted(name_count.items(), key=lambda x: x[1], reverse=True): if value == 1: break if len('\n'.join(page)) > 1900: pages.append(page) page = [] a = ', '.join(aka.get(key, [])) page.append(f'{value}x {key.ljust(l_max)}' + f'aka: {a}' * bool(a)) if page: pages.append(page) if not pages: await ctx.send('No duplicate usernames found') for n, page in enumerate(pages): await ctx.send(f'{n+1}/{len(pages)}\n```' + '\n'.join(page) + '```') @_list.command( name='earliest' ) async def earliest(self, ctx, n: int = 50, start: int = 0): """List earliest Members""" sorted_members = sorted(self.client.main_guild.members, key=lambda x: x.joined_at) await ctx.send( '```\n' + '\n'.join(f'{x.name} ({x.joined_at.strftime('%Y-%m-%d')})' for x in sorted_members[start:start+n]) + '\n```' ) @_list.command( name='oldest' ) async def oldest(self, ctx, n: int = 50, start: int = 0): """List oldest Member accounts""" sorted_members = sorted(self.client.main_guild.members, key=lambda x: x.created_at) await ctx.send( '```\n' + '\n'.join(f'{x.name} ({x.created_at.strftime('%Y-%m-%d')})' for x in sorted_members[start:start+n]) + '\n```' ) # ---------------------------------------------- # Function to get the date a member joined # ---------------------------------------------- @commands.command( name='joined', hidden=True, ) async def joined(self, ctx, members: commands.Greedy[Member]): """Print the date a member joined""" if not members: raise commands.BadArgument('Please specify at least 1 member') #await ctx.trigger_typing() result = [] now = datetime.now(tz=timezone.utc) for member in members: join = member.joined_at if not join: result.append(f'No join date found for {member.name}') continue difference = now - join result.append( f'{member.name} joined [{join.isoformat().split('.')[0]}] - ' f'[{difference.days}] days and ' f'[{difference.seconds / 3600:.1f}] hours ago' ) if not result: return await ctx.send('```css\n' + '\n'.join(result) + '\n```') @commands.group( invoke_without_command=True, name='error', hidden=True, aliases=['errors'] ) async def error(self, ctx, n: typing.Optional[int] = None): """Show a concise list of stored errors""" if n is not None: await self.print_traceback(ctx, n) return NUM_ERRORS_PER_PAGE = 15 error_log = self.client.last_errors if not error_log: await ctx.send('Error log is empty') return response = [f'```css\nNumber of stored errors: {len(error_log)}'] for i, exc_tuple in enumerate(error_log): exc, date, error_source, *_ = exc_tuple call_info = ( f'CMD: {error_source.invoked_with}' if isinstance(error_source, commands.Context) else 'outside command' ) response.append( f'{i}: [' + date.isoformat().split('.')[0] + '] - [' + call_info + f']\nException: {exc}' ) if i % NUM_ERRORS_PER_PAGE == NUM_ERRORS_PER_PAGE-1: response.append('```') await ctx.send('\n'.join(response)) response = [f'```css'] if len(response) > 1: response.append('```') await ctx.send('\n'.join(response)) @error.command( name='clear', aliases=['delete'], ) async def error_clear(self, ctx, n: int = None): """Clear error with index [n]""" if n is None: self.client.last_errors = [] await ctx.send('Error log cleared') else: self.client.last_errors.pop(n) await ctx.send(f'Deleted error #{n}') @error.command( name='traceback', aliases=['tb'], ) async def error_traceback(self, ctx, n: int = None): """Print the traceback of error [n] from the error log""" await self.print_traceback(ctx, n) async def print_traceback(self, ctx, n): error_log = self.client.last_errors if not error_log: await ctx.send('Error log is empty') return if n is None: await ctx.send('Please specify an error index') await self.client.get_command('error').invoke(ctx) return if n >= len(error_log) or n < 0: await ctx.send('Error index does not exist') return exc, date, error_source, orig_content = error_log[n] delta = (datetime.utcnow() - date).total_seconds() hours = int(delta // 3600) seconds = int(delta - (hours * 3600)) delta_str = f'{hours} hours and {seconds} seconds ago' tb = ''.join( traceback.format_exception(type(exc), exc, exc.__traceback__) ) response = [f'`Error occured {delta_str}`'] if error_source is not None: response.append( f'`Server:{error_source.guild.name} | Channel: {error_source.channel.name}`' ) response.append( f'`User: {error_source.author.name}#{error_source.author.discriminator}`' ) if isinstance(error_source, commands.Context): response.append(f'`Command: {error_source.invoked_with}`') response.append(error_source.message.jump_url) else: response.append(f'`Command: No Command`') response.append(error_source.jump_url) response.append(f'```python\n') num_chars = sum(len(line) for line in response) for line in tb.split('\n'): num_chars += len(line) response.append(line) if num_chars > 1900: response.append('```') await ctx.send('\n'.join(response)) response = ['```python\n'] num_chars = 0 response.append('```') await ctx.send('\n'.join(response)) if error_source is not None: e = Embed(title='Full command that caused the error:', description=orig_content) e.set_footer(text=error_source.author.display_name, icon_url=error_source.author.display_avatar) await ctx.send(embed=e) def setup(client): client.add_cog(Management(client))
"""This is a cog for a discord.py bot. It will add some management commands to a bot. Commands: version show the hash of the latest commit load load an extension / cog unload unload an extension / cog reload reload an extension / cog cogs show currently active extensions / cogs list make felix compute a list └duplicates find duplicate usernames pull pull latest changes from github (superuser only) error print the traceback of the last unhandled error to chat Only users that have an admin role can use the commands. """ import subprocess import json import traceback import typing from datetime import datetime, timezone from os import path, listdir from discord import Activity, Embed, Member from discord.ext import commands class Management(commands.Cog, name='Management'): def __init__(self, client): self.client = client self.reload_config() async def cog_check(self, ctx): return self.client.user_is_admin(ctx.author) @commands.Cog.listener() async def on_ready(self): loaded = self.client.extensions unloaded = [x for x in self.crawl_cogs() if x not in loaded] # Cogs without extra in their name should be loaded at startup so if # any cog without "extra" in it's name is unloaded here -> Error in cog if any('extra' not in cog_name for cog_name in unloaded): activity_name = 'ERROR in cog' activity_type = 3 else: felix_version = self.get_version_info()[0][:7] activity_name = f'on {felix_version}' activity_type = 0 await self.client.change_presence( activity=Activity(name=activity_name, type=activity_type) ) @commands.Cog.listener() async def on_member_join(self, member): if self.client.flood_mode: return await self.client.main_guild.system_channel.send( f'Welcome to the Engineer Man Discord Server, {member.mention}\n' 'Please take a moment to review the server rules in <#484103976296644608>, ' 'say `felix help` to learn about available commands, ' 'and finally, please be kind and decent to one another and enjoy your stay.' ) # ---------------------------------------------- # Error handler # ---------------------------------------------- @commands.Cog.listener() async def on_command_error(self, ctx, error): if isinstance(error, commands.CommandNotFound): return if isinstance(error, commands.CommandOnCooldown): await ctx.send(error) return if isinstance(error, commands.MissingRequiredArgument): par = str(error.param) missing = par.split(": ")[0] if ':' in par: missing_type = ' (' + str(par).split(": ")[1] + ')' else: missing_type = '' await ctx.send( f'Missing parameter: `{missing}{missing_type}`' + f'\nIf you are not sure how to use the command, try running ' + f'`felix help {ctx.command.qualified_name}`' ) return if isinstance(error, commands.CheckFailure): await ctx.send('Sorry, you are not allowed to run this command.') return if isinstance(error, commands.BadArgument): # It's in an embed to prevent mentions from working embed = Embed( title='Error', description=str(error), color=0x2ECC71 ) await ctx.send(embed=embed) return if isinstance(error, commands.UnexpectedQuoteError): await ctx.send('`Unexpected quote encountered`') return # In case of an unhandled error -> Save the error + current datetime + ctx + original text # so it can be accessed later with the error command await ctx.send('Sorry, something went wrong. The Error was saved - we will look into it.') self.client.last_errors.append((error, datetime.utcnow(), ctx, ctx.message.content)) await self.client.change_presence( activity=Activity(name='ERROR encountered', url=None, type=3) ) print(f'Ignoring exception in command {ctx.command}:', flush=True) traceback.print_exception( type(error), error, error.__traceback__ ) print('-------------------------------------------------------------', flush=True) def reload_config(self): with open("../config.json") as conffile: self.client.config = json.load(conffile) def get_version_info(self): version = 'unknown' date = 'unknown' try: gitlog = subprocess.check_output( ['git', 'log', '-n', '1', '--date=iso']).decode() for line in gitlog.split('\n'): if line.startswith('commit'): version = line.split(' ')[1] elif line.startswith('Date'): date = line[5:].strip() date = date.replace(' +', '+').replace(' ', 'T') else: pass except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), None)) raise e return (version, date) async def get_remote_commits(self): last_commit = self.get_version_info()[0] ext = f'?per_page=10&sha=master' repo = 'engineer-man/felix' nxt = f'https://api.github.com/repos/{repo}/commits{ext}' repo_data = [] repo_shas = [] while last_commit not in repo_shas: async with self.client.session.get(nxt) as response: r = await response.json() repo_data += r repo_shas = [x['sha'] for x in repo_data] try: nxt = r.links['next']['url'] except: nxt = '' num_comm = repo_shas.index(last_commit) return (num_comm, repo_data[0:(num_comm if num_comm > 10 else 10)]) def crawl_cogs(self, directory='cogs'): cogs = [] for element in listdir(directory): if element == 'samples': continue abs_el = path.join(directory, element) if path.isdir(abs_el): cogs += self.crawl_cogs(abs_el) else: filename, ext = path.splitext(element) if ext == '.py': dot_dir = directory.replace('\\', '.') dot_dir = dot_dir.replace('/', '.') cogs.append(f'{dot_dir}.' + filename) return cogs # ---------------------------------------------- # Function to disply the version # ---------------------------------------------- @commands.command( name='version', brief='Show current version of felix', description='Show current version and changelog of felix', hidden=True, ) async def version(self, ctx): #await ctx.trigger_typing() version, date = self.get_version_info() num_commits, remote_data = await self.get_remote_commits() status = "I am up to date with 'origin/master'" changelog = 'Changelog:\n' if num_commits: status = f"I am [{num_commits}] commits behind 'origin/master'"\ f" [{remote_data[0]['commit']['author']['date']}]" for i, commit in enumerate(remote_data): commitmessage = commit['commit']['message'] if 'merge pull' in commitmessage.lower(): continue changelog += ('+ ' if i < num_commits else '* ') \ + commitmessage.split('\n')[0] + '\n' await ctx.send( f'```css\nCurrent Version: [{version[:7]}].from [{date}]' + f'\n{status}``````diff\n{changelog}```' ) # ---------------------------------------------- # Function to load extensions # ---------------------------------------------- @commands.command( name='load', brief='Load bot extension', description='Load bot extension\n\nExample: felix load cogs.stats', hidden=True, ) async def load_extension(self, ctx, extension_name): for cog_name in self.crawl_cogs(): if extension_name in cog_name: target_extension = cog_name break try: self.client.load_extension(target_extension) except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), ctx)) await ctx.send(f'```py\n{type(e).__name__}: {str(e)}\n```') return await ctx.send(f'```css\nExtension [{target_extension}] loaded.```') # ---------------------------------------------- # Function to unload extensions # ---------------------------------------------- @commands.command( name='unload', brief='Unload bot extension', description='Unload bot extension\n\nExample: felix unload cogs.stats', hidden=True, ) async def unload_extension(self, ctx, extension_name): for cog_name in self.client.extensions: if extension_name in cog_name: target_extension = cog_name break if target_extension.lower() in 'cogs.management': await ctx.send( f"```diff\n- {target_extension} can't be unloaded" + f"\n+ try felix reload {target_extension}!```" ) return if self.client.extensions.get(target_extension) is None: return self.client.unload_extension(target_extension) await ctx.send(f'```css\nExtension [{target_extension}] unloaded.```') # ---------------------------------------------- # Function to reload extensions # ---------------------------------------------- @commands.command( name='reload', brief='Reload bot extension', description='Reload bot extension\n\nExample: felix reload cogs.stats', hidden=True, aliases=['re'] ) async def reload_extension(self, ctx, extension_name): target_extensions = [] if extension_name == 'all': target_extensions = [__name__] + \ [x for x in self.client.extensions if not x == __name__] else: for cog_name in self.client.extensions: if extension_name in cog_name: target_extensions = [cog_name] break if not target_extensions: return result = [] for ext in target_extensions: try: self.client.reload_extension(ext) result.append(f'Extension [{ext}] reloaded.') except Exception as e: self.client.last_errors.append((e, datetime.utcnow(), ctx)) result.append(f'#ERROR loading [{ext}]') continue result = '\n'.join(result) await ctx.send(f'```css\n{result}```') # ---------------------------------------------- # Function to get bot extensions # ---------------------------------------------- @commands.command( name='cogs', brief='Get loaded cogs', description='Get loaded cogs', aliases=['extensions'], hidden=True, ) async def print_cogs(self, ctx): loaded = self.client.extensions unloaded = [x for x in self.crawl_cogs() if x not in loaded] response = ['\n[Loaded extensions]'] + ['\n ' + x for x in loaded] response += ['\n[Unloaded extensions]'] + \ ['\n ' + x for x in unloaded] await ctx.send(f'```css{"".join(response)}```') return True # ---------------------------------------------- # Function Group to clear channel of messages # ---------------------------------------------- @commands.group( invoke_without_command=True, name='list', hidden=True ) async def _list(self, ctx): """List stuff""" await ctx.send_help('list') return True @_list.command( name='duplicates' ) async def duplicates(self, ctx): """List duplicate usernames""" name_count = {} aka = {} pages = [] usernames = [(x.name, x.display_name) for x in ctx.guild.members] l_max = max([len(x[0]) for x in usernames]) + 1 for name, display_name in usernames: name_count[name] = name_count.get(name, 0) + 1 if not name == display_name: aka[name] = aka.get(name, []) + [display_name] page = [] for key, value in sorted(name_count.items(), key=lambda x: x[1], reverse=True): if value == 1: break if len('\n'.join(page)) > 1900: pages.append(page) page = [] a = ', '.join(aka.get(key, [])) page.append(f'{value}x {key.ljust(l_max)}' + f'aka: {a}' * bool(a)) if page: pages.append(page) if not pages: await ctx.send('No duplicate usernames found') for n, page in enumerate(pages): await ctx.send(f'{n+1}/{len(pages)}\n```' + '\n'.join(page) + '```') @_list.command( name='earliest' ) async def earliest(self, ctx, n: int = 50, start: int = 0): """List earliest Members""" sorted_members = sorted(self.client.main_guild.members, key=lambda x: x.joined_at) await ctx.send( '```\n' + '\n'.join(f'{x.name} ({x.joined_at.strftime("%Y-%m-%d")})' for x in sorted_members[start:start+n]) + '\n```' ) @_list.command( name='oldest' ) async def oldest(self, ctx, n: int = 50, start: int = 0): """List oldest Member accounts""" sorted_members = sorted(self.client.main_guild.members, key=lambda x: x.created_at) await ctx.send( '```\n' + '\n'.join(f'{x.name} ({x.created_at.strftime("%Y-%m-%d")})' for x in sorted_members[start:start+n]) + '\n```' ) # ---------------------------------------------- # Function to get the date a member joined # ---------------------------------------------- @commands.command( name='joined', hidden=True, ) async def joined(self, ctx, members: commands.Greedy[Member]): """Print the date a member joined""" if not members: raise commands.BadArgument('Please specify at least 1 member') #await ctx.trigger_typing() result = [] now = datetime.now(tz=timezone.utc) for member in members: join = member.joined_at if not join: result.append(f'No join date found for {member.name}') continue difference = now - join result.append( f'{member.name} joined [{join.isoformat().split(".")[0]}] - ' f'[{difference.days}] days and ' f'[{difference.seconds / 3600:.1f}] hours ago' ) if not result: return await ctx.send('```css\n' + '\n'.join(result) + '\n```') @commands.group( invoke_without_command=True, name='error', hidden=True, aliases=['errors'] ) async def error(self, ctx, n: typing.Optional[int] = None): """Show a concise list of stored errors""" if n is not None: await self.print_traceback(ctx, n) return NUM_ERRORS_PER_PAGE = 15 error_log = self.client.last_errors if not error_log: await ctx.send('Error log is empty') return response = [f'```css\nNumber of stored errors: {len(error_log)}'] for i, exc_tuple in enumerate(error_log): exc, date, error_source, *_ = exc_tuple call_info = ( f'CMD: {error_source.invoked_with}' if isinstance(error_source, commands.Context) else 'outside command' ) response.append( f'{i}: [' + date.isoformat().split('.')[0] + '] - [' + call_info + f']\nException: {exc}' ) if i % NUM_ERRORS_PER_PAGE == NUM_ERRORS_PER_PAGE-1: response.append('```') await ctx.send('\n'.join(response)) response = [f'```css'] if len(response) > 1: response.append('```') await ctx.send('\n'.join(response)) @error.command( name='clear', aliases=['delete'], ) async def error_clear(self, ctx, n: int = None): """Clear error with index [n]""" if n is None: self.client.last_errors = [] await ctx.send('Error log cleared') else: self.client.last_errors.pop(n) await ctx.send(f'Deleted error #{n}') @error.command( name='traceback', aliases=['tb'], ) async def error_traceback(self, ctx, n: int = None): """Print the traceback of error [n] from the error log""" await self.print_traceback(ctx, n) async def print_traceback(self, ctx, n): error_log = self.client.last_errors if not error_log: await ctx.send('Error log is empty') return if n is None: await ctx.send('Please specify an error index') await self.client.get_command('error').invoke(ctx) return if n >= len(error_log) or n < 0: await ctx.send('Error index does not exist') return exc, date, error_source, orig_content = error_log[n] delta = (datetime.utcnow() - date).total_seconds() hours = int(delta // 3600) seconds = int(delta - (hours * 3600)) delta_str = f'{hours} hours and {seconds} seconds ago' tb = ''.join( traceback.format_exception(type(exc), exc, exc.__traceback__) ) response = [f'`Error occured {delta_str}`'] if error_source is not None: response.append( f'`Server:{error_source.guild.name} | Channel: {error_source.channel.name}`' ) response.append( f'`User: {error_source.author.name}#{error_source.author.discriminator}`' ) if isinstance(error_source, commands.Context): response.append(f'`Command: {error_source.invoked_with}`') response.append(error_source.message.jump_url) else: response.append(f'`Command: No Command`') response.append(error_source.jump_url) response.append(f'```python\n') num_chars = sum(len(line) for line in response) for line in tb.split('\n'): num_chars += len(line) response.append(line) if num_chars > 1900: response.append('```') await ctx.send('\n'.join(response)) response = ['```python\n'] num_chars = 0 response.append('```') await ctx.send('\n'.join(response)) if error_source is not None: e = Embed(title='Full command that caused the error:', description=orig_content) e.set_footer(text=error_source.author.display_name, icon_url=error_source.author.display_avatar) await ctx.send(embed=e) def setup(client): client.add_cog(Management(client))
from importlib import import_module from app.objects.secondclass.c_fact import Fact from app.objects.secondclass.c_relationship import Relationship from app.objects.secondclass.c_rule import Rule from app.service.interfaces.i_knowledge_svc import KnowledgeServiceInterface from app.utility.base_knowledge_svc import BaseKnowledgeService from app.utility.base_service import BaseService class KnowledgeService(KnowledgeServiceInterface, BaseService): def __init__(self): self.log = self.add_service('knowledge_svc', self) target_module = self.get_config('app.knowledge_svc.module') try: self.__loaded_knowledge_module = self._load_module(target_module, {}) except Exception as e: self.log.warning(f"Unable to properly load knowledge service module " f"{self.get_config("app.knowledge_svc.module")} ({e}). Reverting to default.") self.__loaded_knowledge_module = BaseKnowledgeService() @staticmethod def _load_module(module_type, module_info): module = import_module(module_info['module']) return getattr(module, module_type)(module_info) # -- Fact API -- async def add_fact(self, fact, constraints=None): # Add a new fact to the knowledge service if isinstance(fact, Fact): return self.__loaded_knowledge_module._add_fact(fact, constraints) async def update_fact(self, criteria, updates): # Update an existing fact return self.__loaded_knowledge_module._update_fact(criteria, updates) async def get_facts(self, criteria, restrictions=None): ### # Becomes a powerful function, because it sorts and filters out facts based on # input (values, groupings) as well as underlying mechanisms such as fact mutexs ### return self.__loaded_knowledge_module._get_facts(criteria, restrictions) async def delete_fact(self, criteria): # Delete existing fact based on provided information return self.__loaded_knowledge_module._delete_fact(criteria) async def get_meta_facts(self, meta_fact=None, agent=None, group=None): # Returns the complete set of facts associated with a meta-fact construct return self.__loaded_knowledge_module._get_meta_facts(meta_fact, agent, group) async def get_fact_origin(self, fact): ### # Retrieve the specific origin of a fact. If it was learned in the current operation, parse through # links to identify the host it was discovered on. ### return self.__loaded_knowledge_module._get_fact_origin(fact) # -- Relationships API -- async def get_relationships(self, criteria, restrictions=None): # Retrieve a relationship from the knowledge service return self.__loaded_knowledge_module._get_relationships(criteria, restrictions) async def add_relationship(self, relationship, constraints=None): # Add a relationship to the knowledge service if isinstance(relationship, Relationship): return self.__loaded_knowledge_module._add_relationship(relationship, constraints) async def update_relationship(self, criteria, updates): # Update a relationship return self.__loaded_knowledge_module._update_relationship(criteria, updates) async def delete_relationship(self, criteria): # Remove a relationship from the knowledge service return self.__loaded_knowledge_module._delete_relationship(criteria) # --- Rule API --- async def add_rule(self, rule, constraints=None): ### # Add a rule to the knowledge service # Args: # rule.action: [DENY, ALLOW, EXCLUSIVE, EXCLUSIVE_TRAIT, EXCLUSIVE_VALUE], 'EXCLUSIVE_*' actions denote that # the trait/value will be made mutually exclusive in its use to the agent/group/operation that is # specified for. Essentially a fact is binded to mutex, and only one action can be using the fact # at any one time. ### if isinstance(rule, Rule): return self.__loaded_knowledge_module._add_rule(rule, constraints) async def get_rules(self, criteria, restrictions=None): # Retrieve rules from the knowledge service return self.__loaded_knowledge_module._get_rules(criteria) async def delete_rule(self, criteria): # Delete a rule from the knowledge service return self.__loaded_knowledge_module._delete_rule(criteria) # --- New Inferencing API --- # NOT IMPLEMENTED YET async def similar_facts(self, fact, agent, group): ### # return facts that are close to supplied fact. # # Ex: # - other facts for an agent with given trait/value # - other facts for the group/agent # - other facts with same value ### return self.__loaded_knowledge_module._similar_facts(fact, agent, group) async def fact_value_distribution(self, criteria, agent=None, group=None): ### # return the value distribution for the given fact, and further filtered down # to agent/group if supplied # # Ex: fact value distribution for 'host.user.name' on group 'workstations': # --> [{'admin': .4}, {'system': .4}, {'michael': .1}, {'workstation1': .1}] ### return self.__loaded_knowledge_module._fact_value_distribution(criteria, agent, group) async def best_guess(self, criteria, agent=None, group=None): # wrapper around 'fact_value_distribution', just returning highest probable value return self.__loaded_knowledge_module._best_guess(criteria, agent, group) async def best_facts(self, agent=None, group=None, metric='usage_success'): ### # best facts based on requested metric # Args: # metric: ['usage_success', 'most_recent_success', ...] ### return self.__loaded_knowledge_module._best_guess(agent, group, metric) async def save_state(self): # Save knowledge service state to disc return await self.__loaded_knowledge_module._save_state() async def restore_state(self): # Restore knowledge service state from disc return await self.__loaded_knowledge_module._restore_state()
from importlib import import_module from app.objects.secondclass.c_fact import Fact from app.objects.secondclass.c_relationship import Relationship from app.objects.secondclass.c_rule import Rule from app.service.interfaces.i_knowledge_svc import KnowledgeServiceInterface from app.utility.base_knowledge_svc import BaseKnowledgeService from app.utility.base_service import BaseService class KnowledgeService(KnowledgeServiceInterface, BaseService): def __init__(self): self.log = self.add_service('knowledge_svc', self) target_module = self.get_config('app.knowledge_svc.module') try: self.__loaded_knowledge_module = self._load_module(target_module, {}) except Exception as e: self.log.warning(f"Unable to properly load knowledge service module " f"{self.get_config('app.knowledge_svc.module')} ({e}). Reverting to default.") self.__loaded_knowledge_module = BaseKnowledgeService() @staticmethod def _load_module(module_type, module_info): module = import_module(module_info['module']) return getattr(module, module_type)(module_info) # -- Fact API -- async def add_fact(self, fact, constraints=None): # Add a new fact to the knowledge service if isinstance(fact, Fact): return self.__loaded_knowledge_module._add_fact(fact, constraints) async def update_fact(self, criteria, updates): # Update an existing fact return self.__loaded_knowledge_module._update_fact(criteria, updates) async def get_facts(self, criteria, restrictions=None): ### # Becomes a powerful function, because it sorts and filters out facts based on # input (values, groupings) as well as underlying mechanisms such as fact mutexs ### return self.__loaded_knowledge_module._get_facts(criteria, restrictions) async def delete_fact(self, criteria): # Delete existing fact based on provided information return self.__loaded_knowledge_module._delete_fact(criteria) async def get_meta_facts(self, meta_fact=None, agent=None, group=None): # Returns the complete set of facts associated with a meta-fact construct return self.__loaded_knowledge_module._get_meta_facts(meta_fact, agent, group) async def get_fact_origin(self, fact): ### # Retrieve the specific origin of a fact. If it was learned in the current operation, parse through # links to identify the host it was discovered on. ### return self.__loaded_knowledge_module._get_fact_origin(fact) # -- Relationships API -- async def get_relationships(self, criteria, restrictions=None): # Retrieve a relationship from the knowledge service return self.__loaded_knowledge_module._get_relationships(criteria, restrictions) async def add_relationship(self, relationship, constraints=None): # Add a relationship to the knowledge service if isinstance(relationship, Relationship): return self.__loaded_knowledge_module._add_relationship(relationship, constraints) async def update_relationship(self, criteria, updates): # Update a relationship return self.__loaded_knowledge_module._update_relationship(criteria, updates) async def delete_relationship(self, criteria): # Remove a relationship from the knowledge service return self.__loaded_knowledge_module._delete_relationship(criteria) # --- Rule API --- async def add_rule(self, rule, constraints=None): ### # Add a rule to the knowledge service # Args: # rule.action: [DENY, ALLOW, EXCLUSIVE, EXCLUSIVE_TRAIT, EXCLUSIVE_VALUE], 'EXCLUSIVE_*' actions denote that # the trait/value will be made mutually exclusive in its use to the agent/group/operation that is # specified for. Essentially a fact is binded to mutex, and only one action can be using the fact # at any one time. ### if isinstance(rule, Rule): return self.__loaded_knowledge_module._add_rule(rule, constraints) async def get_rules(self, criteria, restrictions=None): # Retrieve rules from the knowledge service return self.__loaded_knowledge_module._get_rules(criteria) async def delete_rule(self, criteria): # Delete a rule from the knowledge service return self.__loaded_knowledge_module._delete_rule(criteria) # --- New Inferencing API --- # NOT IMPLEMENTED YET async def similar_facts(self, fact, agent, group): ### # return facts that are close to supplied fact. # # Ex: # - other facts for an agent with given trait/value # - other facts for the group/agent # - other facts with same value ### return self.__loaded_knowledge_module._similar_facts(fact, agent, group) async def fact_value_distribution(self, criteria, agent=None, group=None): ### # return the value distribution for the given fact, and further filtered down # to agent/group if supplied # # Ex: fact value distribution for 'host.user.name' on group 'workstations': # --> [{'admin': .4}, {'system': .4}, {'michael': .1}, {'workstation1': .1}] ### return self.__loaded_knowledge_module._fact_value_distribution(criteria, agent, group) async def best_guess(self, criteria, agent=None, group=None): # wrapper around 'fact_value_distribution', just returning highest probable value return self.__loaded_knowledge_module._best_guess(criteria, agent, group) async def best_facts(self, agent=None, group=None, metric='usage_success'): ### # best facts based on requested metric # Args: # metric: ['usage_success', 'most_recent_success', ...] ### return self.__loaded_knowledge_module._best_guess(agent, group, metric) async def save_state(self): # Save knowledge service state to disc return await self.__loaded_knowledge_module._save_state() async def restore_state(self): # Restore knowledge service state from disc return await self.__loaded_knowledge_module._restore_state()
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clr import AddReference AddReference("System") AddReference("QuantConnect.Algorithm") AddReference("QuantConnect.Common") from System import * from QuantConnect import * from QuantConnect.Orders import * from QuantConnect.Algorithm import * from QuantConnect.Algorithm.Framework import * from QuantConnect.Algorithm.Framework.Selection import * from Alphas.RsiAlphaModel import RsiAlphaModel from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel from Execution.VolumeWeightedAveragePriceExecutionModel import VolumeWeightedAveragePriceExecutionModel from datetime import timedelta ### <summary> ### Regression algorithm for the VolumeWeightedAveragePriceExecutionModel. ### This algorithm shows how the execution model works to split up orders and ### submit them only when the price is on the favorable side of the intraday VWAP. ### </summary> ### <meta name="tag" content="using data" /> ### <meta name="tag" content="using quantconnect" /> ### <meta name="tag" content="trading and orders" /> class VolumeWeightedAveragePriceExecutionModelRegressionAlgorithm(QCAlgorithmFramework): '''Regression algorithm for the VolumeWeightedAveragePriceExecutionModel. This algorithm shows how the execution model works to split up orders and submit them only when the price is on the favorable side of the intraday VWAP.''' def Initialize(self): self.UniverseSettings.Resolution = Resolution.Minute self.SetStartDate(2013,10,7) self.SetEndDate(2013,10,11) self.SetCash(1000000) self.SetUniverseSelection(ManualUniverseSelectionModel([ Symbol.Create('AIG', SecurityType.Equity, Market.USA), Symbol.Create('BAC', SecurityType.Equity, Market.USA), Symbol.Create('IBM', SecurityType.Equity, Market.USA), Symbol.Create('SPY', SecurityType.Equity, Market.USA) ])) # using hourly rsi to generate more insights self.SetAlpha(RsiAlphaModel(14, Resolution.Hour)) self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel()) self.SetExecution(VolumeWeightedAveragePriceExecutionModel()) self.InsightsGenerated += self.OnInsightsGenerated def OnInsightsGenerated(self, algorithm, data): self.Log(f"{self.Time}: {", ".join(str(x) for x in data.Insights)}") def OnOrderEvent(self, orderEvent): self.Log(f"{self.Time}: {orderEvent}")
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clr import AddReference AddReference("System") AddReference("QuantConnect.Algorithm") AddReference("QuantConnect.Common") from System import * from QuantConnect import * from QuantConnect.Orders import * from QuantConnect.Algorithm import * from QuantConnect.Algorithm.Framework import * from QuantConnect.Algorithm.Framework.Selection import * from Alphas.RsiAlphaModel import RsiAlphaModel from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel from Execution.VolumeWeightedAveragePriceExecutionModel import VolumeWeightedAveragePriceExecutionModel from datetime import timedelta ### <summary> ### Regression algorithm for the VolumeWeightedAveragePriceExecutionModel. ### This algorithm shows how the execution model works to split up orders and ### submit them only when the price is on the favorable side of the intraday VWAP. ### </summary> ### <meta name="tag" content="using data" /> ### <meta name="tag" content="using quantconnect" /> ### <meta name="tag" content="trading and orders" /> class VolumeWeightedAveragePriceExecutionModelRegressionAlgorithm(QCAlgorithmFramework): '''Regression algorithm for the VolumeWeightedAveragePriceExecutionModel. This algorithm shows how the execution model works to split up orders and submit them only when the price is on the favorable side of the intraday VWAP.''' def Initialize(self): self.UniverseSettings.Resolution = Resolution.Minute self.SetStartDate(2013,10,7) self.SetEndDate(2013,10,11) self.SetCash(1000000) self.SetUniverseSelection(ManualUniverseSelectionModel([ Symbol.Create('AIG', SecurityType.Equity, Market.USA), Symbol.Create('BAC', SecurityType.Equity, Market.USA), Symbol.Create('IBM', SecurityType.Equity, Market.USA), Symbol.Create('SPY', SecurityType.Equity, Market.USA) ])) # using hourly rsi to generate more insights self.SetAlpha(RsiAlphaModel(14, Resolution.Hour)) self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel()) self.SetExecution(VolumeWeightedAveragePriceExecutionModel()) self.InsightsGenerated += self.OnInsightsGenerated def OnInsightsGenerated(self, algorithm, data): self.Log(f"{self.Time}: {', '.join(str(x) for x in data.Insights)}") def OnOrderEvent(self, orderEvent): self.Log(f"{self.Time}: {orderEvent}")
import abc import importlib import json import math import os import random import sys import time import datetime import colorama import click import infirunner.capsule import numpy as np import scipy.stats as sps from colorama import Style, Fore from statsmodels.nonparametric.api import KDEMultivariate from infirunner.util import make_trial_id, UniformBernoulli from infirunner.generator import Generator from infirunner.watch import ExperimentWatcher, FastTSVTail colorama.init() def int_ceil(x): return int(math.ceil(x)) def int_floor(x): return int(math.floor(x)) def log_print(*args): print(Fore.LIGHTBLACK_EX + f'[{datetime.datetime.now()}]' + Style.RESET_ALL, *args, file=sys.stderr) # Note that the metric is always assumed to be optimized towards minimum. class BracketElement: def __init__(self, bracket, round, budget, metric, trial, active, promoted): self.bracket = bracket self.round = round self.budget = budget self.metric = metric self.trial = trial self.active = active self.promoted = promoted def __eq__(self, other): return self is other or (self.bracket == other.bracket and self.round == other.round and self.budget == other.budget and self.metric == other.metric and self.trial == other.trial and self.active == other.active and self.promoted == other.promoted) def __repr__(self): return (f'<BracketElement bracket={self.bracket} round={self.round} budget={self.budget} ' f'metric={self.metric} trial={self.trial} active={self.active} promoted={self.promoted}>') def serialize(self): return { 'bracket': self.bracket, 'round': self.round, 'budget': self.budget, 'trial': self.trial, 'active': self.active, 'promoted': self.promoted } @staticmethod def deserialize(data): return BracketElement(bracket=data['bracket'], round=data['round'], budget=data['budget'], metric=data['metric'], trial=data['trial'], active=data['active'], promoted=data['promoted']) class ParamGen(abc.ABC): def __init__(self, module, experiment_dir): importlib.import_module(module) self.capsule = infirunner.capsule.active_capsule self.experiment_dir = experiment_dir def get_next_parameter(self): return self.capsule.gen_params(use_default=False, skip_const=True) class RandomParamGen(ParamGen): pass class BOHBParamGen(ParamGen): def __init__(self, module, experiment_dir, random_ratio, random_sample_size, guided_ratio, guided_sample_size, result_size_threshold=None, good_ratio=0.15, early_stop_ratio=1 / math.e, model_cache_time=30, mode='minimize', min_bandwidth=1e-3, bandwidth_estimation='normal_reference', bandwidth_factor=3.): super().__init__(module, experiment_dir) assert 0 <= random_ratio <= 1 assert 0 <= guided_ratio <= 1 assert 0 <= random_ratio + guided_ratio <= 1 sample_params = super().get_next_parameter() if result_size_threshold is None: result_size_threshold = len(sample_params) + 1 else: result_size_threshold = max(len(sample_params) + 1, result_size_threshold) log_print(Fore.LIGHTBLACK_EX + 'model-based threshold is', result_size_threshold) self.random_dice = UniformBernoulli(random_ratio) self.guided_dice = UniformBernoulli(guided_ratio / (1 - random_ratio)) self.good_ratio = good_ratio self.early_stop_ratio = early_stop_ratio self.random_sample_size = random_sample_size self.guided_sample_size = guided_sample_size self.result_size_threshold = result_size_threshold self.model_cache_time = model_cache_time self.last_stats_collect_time = 0. self.last_stats = (None, None) self.last_models = (None, None) self.is_maximize = mode == 'maximize' self.min_bandwidth = min_bandwidth self.bandwidth_estimation = bandwidth_estimation self.bandwidth_factor = bandwidth_factor self.kde_vartypes, self.kde_data_encoder, self.kde_data_decoder, self.kde_data_bounds = self.make_kde_helpers() def make_kde_helpers(self): param_keys = list(super().get_next_parameter().keys()) param_keys.sort() param_gen = self.capsule.param_gen var_types = ''.join(param_gen[key].var_type for key in param_keys) data_bounds = [param_gen[key].encoded_bounds() for key in param_keys] def data_encoder(data): return [param_gen[key].encode_as_numerical(data[key]) for key in param_keys] def data_decoder(data, old_params): ret = {} for idx, key in enumerate(param_keys): decoded = param_gen[key].decode_from_numerical(data[idx]) if var_types[idx] == 'u' and decoded != old_params[key]: while True: decoded = param_gen[key].get_next_value() if decoded != old_params[key]: break ret[key] = decoded return ret return var_types, data_encoder, data_decoder, data_bounds def get_trial_params(self, trial): with open(os.path.join(self.experiment_dir, trial, f'last_state.json'), 'r', encoding='utf-8') as f: old_params = json.load(f)['params'] return old_params def guided_modify_parameter(self, trial, model): old_params = self.get_trial_params(trial) old_params_encoded = self.kde_data_encoder(old_params) new_params_encoded = [] for old_param, bw_, (lbound, ubound), vartype in zip(old_params_encoded, model.bw, self.kde_data_bounds, self.kde_vartypes): bw = bw_ * self.bandwidth_factor if lbound is None or ubound is None: new_params = np.random.normal(loc=old_param, scale=bw) else: new_params = sps.truncnorm.rvs((lbound - old_param) / bw, (ubound - old_param) / bw, loc=old_param, scale=bw) new_params_encoded.append(new_params) return self.kde_data_decoder(new_params_encoded, old_params) def get_suggested_next_parameter(self, goods, bads): good_model, bad_model = self.last_models if good_model is None or bad_model is None: good_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in goods], var_type=self.kde_vartypes, bw=self.bandwidth_estimation) bad_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in bads], var_type=self.kde_vartypes, bw=self.bandwidth_estimation) good_model.bw = np.clip(good_model.bw, self.min_bandwidth, None) bad_model.bw = np.clip(bad_model.bw, self.min_bandwidth, None) self.last_models = good_model, bad_model best_score = float('-inf') best_candidate = None use_guided = self.guided_dice() for _ in range(self.guided_sample_size if use_guided else self.random_sample_size): if use_guided: next_param = self.guided_modify_parameter(random.choice(goods)[0], good_model) else: next_param = super().get_next_parameter() good_score = np.log(np.clip(good_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None)) bad_score = np.log(np.clip(bad_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None)) score = good_score - bad_score if score > best_score: best_score = score best_candidate = next_param log_print(Fore.LIGHTBLACK_EX + 'proposing', 'guided' if use_guided else 'sieved', 'parameter with score', best_score) return best_candidate def collect_stats(self): now = time.time() if now - self.last_stats_collect_time > self.model_cache_time: metrics = list(self.get_all_budget_metrics().items()) metrics.sort(key=lambda x: x[0], reverse=True) goods = None bads = None for budget, trial_data in metrics: bads_ = [(trial, metric) for trial, metric in trial_data if metric is None or not math.isfinite(metric)] goods_ = [(trial, metric) for trial, metric in trial_data if metric is not None and math.isfinite(metric)] if len(goods_) >= self.result_size_threshold and len(goods_) + len(bads_) > self.result_size_threshold: goods_.sort(key=lambda x: x[1], reverse=self.is_maximize) good_size = int_ceil(len(goods_) * self.good_ratio) bads_ = list(reversed(goods_))[ :max(len(goods_) - good_size, self.result_size_threshold - len(bads_))] + bads_ goods_ = goods_[:max(good_size, self.result_size_threshold)] log_print(Fore.LIGHTBLACK_EX + f'collected stats for budget {budget} with {len(goods_)} goods, ' f'{len(bads_)} bads') log_print(Fore.LIGHTBLACK_EX + f'best good: {goods_[0][1]:10.4f}, best bad: {bads_[0][1]:10.4f}') goods = goods_ bads = bads_ break if self.last_stats != (goods, bads): self.last_stats = (goods, bads) self.last_models = (None, None) self.last_stats_collect_time = now return goods, bads else: return self.last_stats def get_all_budget_metrics(self): active_dirs = [] for parent, dirs, files in os.walk(self.experiment_dir): active_dirs = dirs break metrics = {} nan_metrics = [] for dir in active_dirs: try: with open(os.path.join(self.experiment_dir, dir, 'metric.tsv'), 'rb') as f: for l in f: budget, metric_time, metric_res = l.split(b'\t') budget = int(budget) budget_metric = metrics.setdefault(budget, []) metric = float(metric_res) budget_metric.append((dir, metric)) if not math.isfinite(metric): nan_metrics.append((budget, dir)) except FileNotFoundError: continue for budget in metrics.keys(): for trial_budget, dir in nan_metrics: if budget > trial_budget: metrics[budget].append((dir, float('nan'))) return metrics def should_terminate_trials(self, trials): if not trials: return [] trials = set(trials) should_terminate = set() metrics = self.get_all_budget_metrics() good_metrics_threshold = {} for budget, trials_data in metrics.items(): budget_data = [] for _, metric in trials_data: if math.isfinite(metric): budget_data.append(metric) if len(budget_data) > self.result_size_threshold: budget_data.sort() # ascending sort budget_data = budget_data[:int_ceil(len(budget_data) * self.early_stop_ratio)] good_metrics_threshold[budget] = budget_data[-1] for budget, trials_data in sorted(metrics.items(), key=lambda x: x[0], reverse=True): if budget == 0: continue prev_threshold = good_metrics_threshold.get(budget - 1) if prev_threshold is None: continue for trial, metric in trials_data: if trial not in trials: continue if metric < prev_threshold: continue log_print(Fore.LIGHTBLACK_EX + 'adding trial', trial, 'to termination list.', 'budget', budget, 'metric', metric, 'threshold', prev_threshold) should_terminate.add(trial) return list(should_terminate) def get_next_parameter(self): if self.random_dice(): log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because dice says so') return super().get_next_parameter() else: goods, bads = self.collect_stats() if goods and bads: return self.get_suggested_next_parameter(goods, bads) else: log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because not enough samples') return super().get_next_parameter() class Hyperband: def __init__(self, min_budget, max_budget, reset_nan_trial=True, reduction_ratio=math.e): self.min_budget = min_budget self.max_budget = max_budget self.reduction_ratio = reduction_ratio self.bracket_max = int_floor(math.log(max_budget / min_budget, reduction_ratio)) self.brackets = self.make_brackets() self.cur_bracket_idx = 0 self.cur_round_idx = 0 self.reset_nan_trial = reset_nan_trial def serialize(self): return { 'min_budget': self.min_budget, 'max_budget': self.max_budget, 'reduction_ratio': self.reduction_ratio, 'bracket_max': self.bracket_max, 'brackets': [[[btrial.serialize() for btrial in round] for round in bracket] for bracket in self.brackets], 'cur_bracket_idx': self.cur_bracket_idx, 'cur_round_idx': self.cur_round_idx, 'reset_nan_trial': self.reset_nan_trial } @staticmethod def deserialize(data): self = Hyperband(min_budget=data['min_budget'], max_budget=data['max_budget'], reduction_ratio=data['reduction_ratio'], reset_nan_trial=data['reset_nan_trial']) self.brackets = [[[BracketElement.deserialize(btrial) for btrial in round] for round in bracket] for bracket in data['brackets']] self.cur_round_idx = data['cur_round_idx'] self.cur_bracket_idx = data['cur_bracket_idx'] self.bracket_max = data['bracket_max'] return self def pprint_brackets(self): for bracket_idx, bracket in enumerate(self.brackets): log_print(Fore.LIGHTBLACK_EX + f'bracket {bracket_idx}:') for round_idx, round in enumerate(bracket): actives = [e for e in round if e.active] dones = [e for e in round if e.metric is not None] good_dones = [e for e in dones if math.isfinite(e.metric)] if not round: continue budget = round[0].budget to_print = (f'\tround {round_idx:1}: {len(round):3} trials with {budget:3} budgets, ' + f'{len(actives):3} active, {len(dones):3} complete') if good_dones: best_metric = min(e.metric for e in good_dones) best_trial = [e.trial for e in dones if e.metric == best_metric][0] to_print += f', {best_metric:10.4f} best {best_trial}' else: to_print = Fore.LIGHTBLACK_EX + to_print log_print(to_print) def make_brackets(self): brackets = [] for bracket_idx in range(self.bracket_max, -1, -1): bracket = [] init_n_trials = (self.bracket_max + 1) / (bracket_idx + 1) * (self.reduction_ratio ** bracket_idx) init_budget = self.max_budget / (self.reduction_ratio ** bracket_idx) for i in range(bracket_idx + 1): n_trials = int_ceil(init_n_trials / (self.reduction_ratio ** i)) if i == bracket_idx: budget = self.max_budget elif bracket_idx == self.bracket_max and i == 0: budget = self.min_budget n_trials = int_ceil(self.max_budget / self.min_budget) else: budget = int_ceil(init_budget * (self.reduction_ratio ** i)) bracket_trials = [] for _ in range(n_trials): bracket_trials.append(BracketElement(bracket=self.bracket_max - bracket_idx, round=i, budget=budget, metric=None, trial=None, active=False, promoted=False)) bracket.append(bracket_trials) brackets.append(bracket) return brackets def is_round_complete(self, bracket_id, round_id): return all(e.trial is not None and not e.active for e in self.brackets[bracket_id][round_id]) def is_complete(self): return all(e.trial is not None and not e.active for e in self.brackets[-1][-1]) def request_trial(self): # if all brackets are complete, raise StopIteration # if caller should wait, return None # return: BracketElement, note that if el.trial is empty the caller is responsible for filling it if self.cur_bracket_idx > self.bracket_max: self.mark_all_brackets() log_print(Fore.LIGHTGREEN_EX + 'All brackets complete') raise StopIteration cur_bracket = self.brackets[self.cur_bracket_idx] cur_round = cur_bracket[self.cur_round_idx] inactive_without_trial = [e for e in cur_round if e.trial is None and not e.active] if inactive_without_trial: ret = inactive_without_trial[0] if self.cur_round_idx == 0: ret.active = True return ret else: last_round_completed_trials = [e for e in cur_bracket[self.cur_round_idx - 1] if e.metric is not None and math.isfinite(e.metric) and not e.promoted] if last_round_completed_trials: last_round_completed_trials.sort(key=lambda e: e.metric) best_available_trial = last_round_completed_trials[0] best_available_trial.promoted = True log_print(Fore.LIGHTBLACK_EX + 'promote best available trial', best_available_trial.trial, best_available_trial.metric, '(worst is', last_round_completed_trials[-1].metric, ')') ret.trial = best_available_trial.trial ret.active = True # if no trial is present, the caller is responsible for filling it it return ret elif not self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx - 1): return None else: log_print(Fore.LIGHTRED_EX + 'Insufficient previous rounders to continue', id(self)) self.mark_all_brackets() raise StopIteration if self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx): if self.cur_round_idx == len(cur_bracket) - 1: self.cur_round_idx = 0 self.cur_bracket_idx += 1 else: self.cur_round_idx += 1 log_print(Fore.LIGHTBLACK_EX + str(id(self)), 'proceed to bracket', self.cur_bracket_idx, 'round', self.cur_round_idx) return self.request_trial() else: return None def mark_all_brackets(self): self.brackets = [self.mark_bracket_failed(bracket) for bracket in self.brackets] def mark_bracket_failed(self, bracket): cleaned_bracket = [self.mark_round_failed(round) for round in bracket] return cleaned_bracket def mark_round_failed(self, round): return [t for t in round if t.trial is not None] def report_trial(self, bracket_idx, round_idx, trial, metric): # mark inactive, set metric # make verdict for all completed rounds requested_round = self.brackets[bracket_idx][round_idx] requested_element = None for el in requested_round: if el.trial == trial: requested_element = el assert requested_element requested_element.metric = metric requested_element.active = False if math.isfinite(metric): log_print('hyperband received report', bracket_idx, round_idx, trial, metric) else: log_print(Fore.LIGHTRED_EX + 'hyperband received report', bracket_idx, round_idx, trial, metric) # reset first rounder null results if self.reset_nan_trial: log_print(Fore.LIGHTRED_EX + 'nan trial') requested_element.trial = None requested_element.metric = None class HyperbandDriver: def __init__(self, experiment_dir, trial_generator, param_generator, min_budget, max_budget, reduction_ratio, sleep_interval, max_hyperbands, mode, reset_nan_trial, early_stop_min_budget, early_stop_threshold): self.experiment_dir = experiment_dir self.min_budget = min_budget self.max_budget = max_budget self.reduction_ratio = reduction_ratio self.sleep_interval = sleep_interval self.watcher = ExperimentWatcher(experiment_dir) self.hyperbands = [] self.watch_active_trials = [] self.trial_generator = trial_generator self.param_generator = param_generator self.max_hyperbands = max_hyperbands self.is_maximize = mode == 'maximize' self.reset_nan_trial = reset_nan_trial self.early_stop_min_budget = early_stop_min_budget self.early_stop_threshold = ((-early_stop_threshold if self.is_maximize else early_stop_threshold) if early_stop_threshold is not None else float('inf')) def generate_new_trial(self, end_budget, n_gpu=1): params = self.param_generator.get_next_parameter() new_id = make_trial_id() log_print(Fore.LIGHTGREEN_EX + f'generate new trial {new_id} with budget 0 -> {end_budget}') self.trial_generator.change_capsule_trial_id(new_id) self.trial_generator.save_start_state(start_budget=0, end_budget=end_budget, n_gpu=n_gpu, params=params) return new_id def amend_trial(self, old_trial, end_budget, n_gpu=1): self.trial_generator.change_capsule_trial_id(old_trial) new_state = self.trial_generator.amend_start_state(end_budget=end_budget, n_gpu=n_gpu) log_print( Fore.LIGHTBLUE_EX + f'amended trial {old_trial} with budget ' f'{new_state['start_budget']} -> {new_state['end_budget']}, params', new_state['params']) def get_next_hyperband_trial(self): for hyperband_idx, hyperband in enumerate(self.hyperbands): try: new_trial = hyperband.request_trial() except StopIteration: continue if new_trial is not None: if new_trial.trial is None: new_trial.trial = self.generate_new_trial(new_trial.budget) else: self.amend_trial(new_trial.trial, new_trial.budget) return hyperband_idx, new_trial if len(self.hyperbands) < self.max_hyperbands: self.hyperbands.append(Hyperband(min_budget=self.min_budget, max_budget=self.max_budget, reduction_ratio=self.reduction_ratio, reset_nan_trial=self.reset_nan_trial)) return self.get_next_hyperband_trial() return None, None def get_available_slots(self): slot_files = [] for parent, dirs, files in os.walk(self.experiment_dir): slot_files = [f for f in files if f.startswith('slots_')] break total_slots = 0 for slot_file in slot_files: with open(os.path.join(self.experiment_dir, slot_file), 'rb') as f: total_slots += int(f.read().strip()) return total_slots def check_for_completed_trials(self): completed_trials = [] watcher_result = {k: v for k, v in self.watcher.poll(slots=False, only=[t.trial for _, t in self.watch_active_trials], fields=False)['trials'] if not v['active']} for hyperband_idx, trial in self.watch_active_trials: if trial.trial in watcher_result: completed_trials.append(trial) trial_result = watcher_result[trial.trial] log_print(Fore.LIGHTBLACK_EX + f'obtained watcher result for {trial.trial}') if trial_result['budget'] != trial.budget: trial_result['metric'] = float('nan') if trial_result['metric'] is None: trial_result['metric'] = float('nan') metric = trial_result['metric'] if self.is_maximize: metric = -metric self.hyperbands[hyperband_idx].report_trial(trial.bracket, trial.round, trial.trial, metric) self.watch_active_trials = [t for t in self.watch_active_trials if t[1] not in completed_trials] def early_stop_trials(self): if self.early_stop_min_budget is None: return for _, trial_info in self.watch_active_trials: try: with FastTSVTail(os.path.join(self.experiment_dir, trial_info.trial, 'metric.tsv')) as f: budget, _, metric_res = f.tail() budget = int(budget) metric = float(metric_res) if self.is_maximize: metric = -metric if budget >= self.early_stop_min_budget and metric > self.early_stop_threshold: log_print(Fore.RED + 'requesting early stopping of trial', trial_info.trial, 'budget', budget, 'metric', metric) open(os.path.join(self.experiment_dir, trial_info.trial, 'terminate'), 'ab').close() except FileNotFoundError: continue def start_trials(self): n_slots = self.get_available_slots() for _ in range(n_slots): hyperband_idx, new_trial = self.get_next_hyperband_trial() if new_trial is not None: # launch new trial # add to trials being watched log_print(Fore.LIGHTBLACK_EX + f'watching trial {new_trial.trial} of band {hyperband_idx}') self.watch_active_trials.append((hyperband_idx, new_trial)) def save_hyberband_data(self): with open(os.path.join(self.experiment_dir, 'hyperbands.json'), 'w', encoding='utf-8') as f: h_data = { 'active': [(idx, trial.serialize()) for idx, trial in self.watch_active_trials], 'hyberbands': [hyperband.serialize() for hyperband in self.hyperbands] } json.dump(h_data, f, ensure_ascii=False, allow_nan=True, indent=2) def load_hyperband_data(self, path): log_print('loading hyperbands data from', path) with open(path, 'r', encoding='utf_8') as f: data = json.load(f) self.watch_active_trials = [BracketElement.deserialize(trial) for trial in data] self.hyperbands = [(idx, Hyperband.deserialize(hb)) for idx, hb in data['hyperbands']] def start(self): last_watching = set() while True: self.early_stop_trials() self.check_for_completed_trials() self.start_trials() cur_watching = set(t.trial for _, t in self.watch_active_trials) if last_watching != cur_watching: for idx, hb in enumerate(self.hyperbands): log_print(Fore.LIGHTBLACK_EX + '----- Hyperband', idx, id(hb), '-----') hb.pprint_brackets() self.save_hyberband_data() last_watching = cur_watching if len(self.hyperbands) == self.max_hyperbands and all(hb.is_complete() for hb in self.hyperbands): break time.sleep(self.sleep_interval) @click.command() @click.option('--exp-path', required=True) @click.option('--module', required=True) @click.option('--min-budget', type=int, default=1) @click.option('--max-budget', type=int, required=True) @click.option('--sleep-interval', type=float, default=20.) @click.option('--max-hyperbands', type=int, default=5) @click.option('--reduction-ratio', type=float, default=math.e) @click.option('--mode', type=click.Choice(['maximize', 'minimize']), default='minimize') @click.option('--bohb-random-ratio', type=float, default=0.2) @click.option('--bohb-guided-ratio', type=float, default=0.6) @click.option('--bohb-random-size', type=int, default=64) @click.option('--bohb-guided-size', type=int, default=64) @click.option('--bohb-result-size-threshold', type=int) @click.option('--bohb-good-ratio', type=float, default=0.30) @click.option('--bohb-model-cache-time', type=float, default=900.) @click.option('--bohb-min-bandwidth', type=float, default=1e-3) @click.option('--bohb-bandwidth-estimation', default='normal_reference') @click.option('--bohb-bandwidth-factor', type=float, default=3) @click.option('--reset-nan-trial/--no-reset-nan-trial', default=True) @click.option('--early-stop-min-budget', type=int) @click.option('--early-stop-threshold', type=float) @click.option('--load') def run(module, exp_path, min_budget, max_budget, reduction_ratio, max_hyperbands, sleep_interval, mode, bohb_random_ratio, bohb_guided_ratio, bohb_random_size, bohb_guided_size, bohb_result_size_threshold, bohb_good_ratio, bohb_model_cache_time, bohb_min_bandwidth, bohb_bandwidth_estimation, bohb_bandwidth_factor, reset_nan_trial, load, early_stop_min_budget, early_stop_threshold): exp_path = os.path.abspath(exp_path) trial_gen = Generator(module, exp_path) param_gen = BOHBParamGen(module, exp_path, random_ratio=bohb_random_ratio, random_sample_size=bohb_random_size, guided_ratio=bohb_guided_ratio, guided_sample_size=bohb_guided_size, result_size_threshold=bohb_result_size_threshold, good_ratio=bohb_good_ratio, model_cache_time=bohb_model_cache_time, mode=mode, min_bandwidth=bohb_min_bandwidth, bandwidth_estimation=bohb_bandwidth_estimation, bandwidth_factor=bohb_bandwidth_factor, early_stop_ratio=1. / reduction_ratio) driver = HyperbandDriver(experiment_dir=exp_path, trial_generator=trial_gen, param_generator=param_gen, min_budget=min_budget, max_budget=max_budget, reduction_ratio=reduction_ratio, sleep_interval=sleep_interval, max_hyperbands=max_hyperbands, mode=mode, reset_nan_trial=reset_nan_trial, early_stop_min_budget=early_stop_min_budget, early_stop_threshold=early_stop_threshold) if load is not None: driver.load_hyperband_data(load) driver.start() if __name__ == '__main__': run()
import abc import importlib import json import math import os import random import sys import time import datetime import colorama import click import infirunner.capsule import numpy as np import scipy.stats as sps from colorama import Style, Fore from statsmodels.nonparametric.api import KDEMultivariate from infirunner.util import make_trial_id, UniformBernoulli from infirunner.generator import Generator from infirunner.watch import ExperimentWatcher, FastTSVTail colorama.init() def int_ceil(x): return int(math.ceil(x)) def int_floor(x): return int(math.floor(x)) def log_print(*args): print(Fore.LIGHTBLACK_EX + f'[{datetime.datetime.now()}]' + Style.RESET_ALL, *args, file=sys.stderr) # Note that the metric is always assumed to be optimized towards minimum. class BracketElement: def __init__(self, bracket, round, budget, metric, trial, active, promoted): self.bracket = bracket self.round = round self.budget = budget self.metric = metric self.trial = trial self.active = active self.promoted = promoted def __eq__(self, other): return self is other or (self.bracket == other.bracket and self.round == other.round and self.budget == other.budget and self.metric == other.metric and self.trial == other.trial and self.active == other.active and self.promoted == other.promoted) def __repr__(self): return (f'<BracketElement bracket={self.bracket} round={self.round} budget={self.budget} ' f'metric={self.metric} trial={self.trial} active={self.active} promoted={self.promoted}>') def serialize(self): return { 'bracket': self.bracket, 'round': self.round, 'budget': self.budget, 'trial': self.trial, 'active': self.active, 'promoted': self.promoted } @staticmethod def deserialize(data): return BracketElement(bracket=data['bracket'], round=data['round'], budget=data['budget'], metric=data['metric'], trial=data['trial'], active=data['active'], promoted=data['promoted']) class ParamGen(abc.ABC): def __init__(self, module, experiment_dir): importlib.import_module(module) self.capsule = infirunner.capsule.active_capsule self.experiment_dir = experiment_dir def get_next_parameter(self): return self.capsule.gen_params(use_default=False, skip_const=True) class RandomParamGen(ParamGen): pass class BOHBParamGen(ParamGen): def __init__(self, module, experiment_dir, random_ratio, random_sample_size, guided_ratio, guided_sample_size, result_size_threshold=None, good_ratio=0.15, early_stop_ratio=1 / math.e, model_cache_time=30, mode='minimize', min_bandwidth=1e-3, bandwidth_estimation='normal_reference', bandwidth_factor=3.): super().__init__(module, experiment_dir) assert 0 <= random_ratio <= 1 assert 0 <= guided_ratio <= 1 assert 0 <= random_ratio + guided_ratio <= 1 sample_params = super().get_next_parameter() if result_size_threshold is None: result_size_threshold = len(sample_params) + 1 else: result_size_threshold = max(len(sample_params) + 1, result_size_threshold) log_print(Fore.LIGHTBLACK_EX + 'model-based threshold is', result_size_threshold) self.random_dice = UniformBernoulli(random_ratio) self.guided_dice = UniformBernoulli(guided_ratio / (1 - random_ratio)) self.good_ratio = good_ratio self.early_stop_ratio = early_stop_ratio self.random_sample_size = random_sample_size self.guided_sample_size = guided_sample_size self.result_size_threshold = result_size_threshold self.model_cache_time = model_cache_time self.last_stats_collect_time = 0. self.last_stats = (None, None) self.last_models = (None, None) self.is_maximize = mode == 'maximize' self.min_bandwidth = min_bandwidth self.bandwidth_estimation = bandwidth_estimation self.bandwidth_factor = bandwidth_factor self.kde_vartypes, self.kde_data_encoder, self.kde_data_decoder, self.kde_data_bounds = self.make_kde_helpers() def make_kde_helpers(self): param_keys = list(super().get_next_parameter().keys()) param_keys.sort() param_gen = self.capsule.param_gen var_types = ''.join(param_gen[key].var_type for key in param_keys) data_bounds = [param_gen[key].encoded_bounds() for key in param_keys] def data_encoder(data): return [param_gen[key].encode_as_numerical(data[key]) for key in param_keys] def data_decoder(data, old_params): ret = {} for idx, key in enumerate(param_keys): decoded = param_gen[key].decode_from_numerical(data[idx]) if var_types[idx] == 'u' and decoded != old_params[key]: while True: decoded = param_gen[key].get_next_value() if decoded != old_params[key]: break ret[key] = decoded return ret return var_types, data_encoder, data_decoder, data_bounds def get_trial_params(self, trial): with open(os.path.join(self.experiment_dir, trial, f'last_state.json'), 'r', encoding='utf-8') as f: old_params = json.load(f)['params'] return old_params def guided_modify_parameter(self, trial, model): old_params = self.get_trial_params(trial) old_params_encoded = self.kde_data_encoder(old_params) new_params_encoded = [] for old_param, bw_, (lbound, ubound), vartype in zip(old_params_encoded, model.bw, self.kde_data_bounds, self.kde_vartypes): bw = bw_ * self.bandwidth_factor if lbound is None or ubound is None: new_params = np.random.normal(loc=old_param, scale=bw) else: new_params = sps.truncnorm.rvs((lbound - old_param) / bw, (ubound - old_param) / bw, loc=old_param, scale=bw) new_params_encoded.append(new_params) return self.kde_data_decoder(new_params_encoded, old_params) def get_suggested_next_parameter(self, goods, bads): good_model, bad_model = self.last_models if good_model is None or bad_model is None: good_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in goods], var_type=self.kde_vartypes, bw=self.bandwidth_estimation) bad_model = KDEMultivariate(data=[self.kde_data_encoder(self.get_trial_params(t)) for t, _ in bads], var_type=self.kde_vartypes, bw=self.bandwidth_estimation) good_model.bw = np.clip(good_model.bw, self.min_bandwidth, None) bad_model.bw = np.clip(bad_model.bw, self.min_bandwidth, None) self.last_models = good_model, bad_model best_score = float('-inf') best_candidate = None use_guided = self.guided_dice() for _ in range(self.guided_sample_size if use_guided else self.random_sample_size): if use_guided: next_param = self.guided_modify_parameter(random.choice(goods)[0], good_model) else: next_param = super().get_next_parameter() good_score = np.log(np.clip(good_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None)) bad_score = np.log(np.clip(bad_model.pdf(self.kde_data_encoder(next_param)), 1e-32, None)) score = good_score - bad_score if score > best_score: best_score = score best_candidate = next_param log_print(Fore.LIGHTBLACK_EX + 'proposing', 'guided' if use_guided else 'sieved', 'parameter with score', best_score) return best_candidate def collect_stats(self): now = time.time() if now - self.last_stats_collect_time > self.model_cache_time: metrics = list(self.get_all_budget_metrics().items()) metrics.sort(key=lambda x: x[0], reverse=True) goods = None bads = None for budget, trial_data in metrics: bads_ = [(trial, metric) for trial, metric in trial_data if metric is None or not math.isfinite(metric)] goods_ = [(trial, metric) for trial, metric in trial_data if metric is not None and math.isfinite(metric)] if len(goods_) >= self.result_size_threshold and len(goods_) + len(bads_) > self.result_size_threshold: goods_.sort(key=lambda x: x[1], reverse=self.is_maximize) good_size = int_ceil(len(goods_) * self.good_ratio) bads_ = list(reversed(goods_))[ :max(len(goods_) - good_size, self.result_size_threshold - len(bads_))] + bads_ goods_ = goods_[:max(good_size, self.result_size_threshold)] log_print(Fore.LIGHTBLACK_EX + f'collected stats for budget {budget} with {len(goods_)} goods, ' f'{len(bads_)} bads') log_print(Fore.LIGHTBLACK_EX + f'best good: {goods_[0][1]:10.4f}, best bad: {bads_[0][1]:10.4f}') goods = goods_ bads = bads_ break if self.last_stats != (goods, bads): self.last_stats = (goods, bads) self.last_models = (None, None) self.last_stats_collect_time = now return goods, bads else: return self.last_stats def get_all_budget_metrics(self): active_dirs = [] for parent, dirs, files in os.walk(self.experiment_dir): active_dirs = dirs break metrics = {} nan_metrics = [] for dir in active_dirs: try: with open(os.path.join(self.experiment_dir, dir, 'metric.tsv'), 'rb') as f: for l in f: budget, metric_time, metric_res = l.split(b'\t') budget = int(budget) budget_metric = metrics.setdefault(budget, []) metric = float(metric_res) budget_metric.append((dir, metric)) if not math.isfinite(metric): nan_metrics.append((budget, dir)) except FileNotFoundError: continue for budget in metrics.keys(): for trial_budget, dir in nan_metrics: if budget > trial_budget: metrics[budget].append((dir, float('nan'))) return metrics def should_terminate_trials(self, trials): if not trials: return [] trials = set(trials) should_terminate = set() metrics = self.get_all_budget_metrics() good_metrics_threshold = {} for budget, trials_data in metrics.items(): budget_data = [] for _, metric in trials_data: if math.isfinite(metric): budget_data.append(metric) if len(budget_data) > self.result_size_threshold: budget_data.sort() # ascending sort budget_data = budget_data[:int_ceil(len(budget_data) * self.early_stop_ratio)] good_metrics_threshold[budget] = budget_data[-1] for budget, trials_data in sorted(metrics.items(), key=lambda x: x[0], reverse=True): if budget == 0: continue prev_threshold = good_metrics_threshold.get(budget - 1) if prev_threshold is None: continue for trial, metric in trials_data: if trial not in trials: continue if metric < prev_threshold: continue log_print(Fore.LIGHTBLACK_EX + 'adding trial', trial, 'to termination list.', 'budget', budget, 'metric', metric, 'threshold', prev_threshold) should_terminate.add(trial) return list(should_terminate) def get_next_parameter(self): if self.random_dice(): log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because dice says so') return super().get_next_parameter() else: goods, bads = self.collect_stats() if goods and bads: return self.get_suggested_next_parameter(goods, bads) else: log_print(Fore.LIGHTBLACK_EX + 'generate random parameter because not enough samples') return super().get_next_parameter() class Hyperband: def __init__(self, min_budget, max_budget, reset_nan_trial=True, reduction_ratio=math.e): self.min_budget = min_budget self.max_budget = max_budget self.reduction_ratio = reduction_ratio self.bracket_max = int_floor(math.log(max_budget / min_budget, reduction_ratio)) self.brackets = self.make_brackets() self.cur_bracket_idx = 0 self.cur_round_idx = 0 self.reset_nan_trial = reset_nan_trial def serialize(self): return { 'min_budget': self.min_budget, 'max_budget': self.max_budget, 'reduction_ratio': self.reduction_ratio, 'bracket_max': self.bracket_max, 'brackets': [[[btrial.serialize() for btrial in round] for round in bracket] for bracket in self.brackets], 'cur_bracket_idx': self.cur_bracket_idx, 'cur_round_idx': self.cur_round_idx, 'reset_nan_trial': self.reset_nan_trial } @staticmethod def deserialize(data): self = Hyperband(min_budget=data['min_budget'], max_budget=data['max_budget'], reduction_ratio=data['reduction_ratio'], reset_nan_trial=data['reset_nan_trial']) self.brackets = [[[BracketElement.deserialize(btrial) for btrial in round] for round in bracket] for bracket in data['brackets']] self.cur_round_idx = data['cur_round_idx'] self.cur_bracket_idx = data['cur_bracket_idx'] self.bracket_max = data['bracket_max'] return self def pprint_brackets(self): for bracket_idx, bracket in enumerate(self.brackets): log_print(Fore.LIGHTBLACK_EX + f'bracket {bracket_idx}:') for round_idx, round in enumerate(bracket): actives = [e for e in round if e.active] dones = [e for e in round if e.metric is not None] good_dones = [e for e in dones if math.isfinite(e.metric)] if not round: continue budget = round[0].budget to_print = (f'\tround {round_idx:1}: {len(round):3} trials with {budget:3} budgets, ' + f'{len(actives):3} active, {len(dones):3} complete') if good_dones: best_metric = min(e.metric for e in good_dones) best_trial = [e.trial for e in dones if e.metric == best_metric][0] to_print += f', {best_metric:10.4f} best {best_trial}' else: to_print = Fore.LIGHTBLACK_EX + to_print log_print(to_print) def make_brackets(self): brackets = [] for bracket_idx in range(self.bracket_max, -1, -1): bracket = [] init_n_trials = (self.bracket_max + 1) / (bracket_idx + 1) * (self.reduction_ratio ** bracket_idx) init_budget = self.max_budget / (self.reduction_ratio ** bracket_idx) for i in range(bracket_idx + 1): n_trials = int_ceil(init_n_trials / (self.reduction_ratio ** i)) if i == bracket_idx: budget = self.max_budget elif bracket_idx == self.bracket_max and i == 0: budget = self.min_budget n_trials = int_ceil(self.max_budget / self.min_budget) else: budget = int_ceil(init_budget * (self.reduction_ratio ** i)) bracket_trials = [] for _ in range(n_trials): bracket_trials.append(BracketElement(bracket=self.bracket_max - bracket_idx, round=i, budget=budget, metric=None, trial=None, active=False, promoted=False)) bracket.append(bracket_trials) brackets.append(bracket) return brackets def is_round_complete(self, bracket_id, round_id): return all(e.trial is not None and not e.active for e in self.brackets[bracket_id][round_id]) def is_complete(self): return all(e.trial is not None and not e.active for e in self.brackets[-1][-1]) def request_trial(self): # if all brackets are complete, raise StopIteration # if caller should wait, return None # return: BracketElement, note that if el.trial is empty the caller is responsible for filling it if self.cur_bracket_idx > self.bracket_max: self.mark_all_brackets() log_print(Fore.LIGHTGREEN_EX + 'All brackets complete') raise StopIteration cur_bracket = self.brackets[self.cur_bracket_idx] cur_round = cur_bracket[self.cur_round_idx] inactive_without_trial = [e for e in cur_round if e.trial is None and not e.active] if inactive_without_trial: ret = inactive_without_trial[0] if self.cur_round_idx == 0: ret.active = True return ret else: last_round_completed_trials = [e for e in cur_bracket[self.cur_round_idx - 1] if e.metric is not None and math.isfinite(e.metric) and not e.promoted] if last_round_completed_trials: last_round_completed_trials.sort(key=lambda e: e.metric) best_available_trial = last_round_completed_trials[0] best_available_trial.promoted = True log_print(Fore.LIGHTBLACK_EX + 'promote best available trial', best_available_trial.trial, best_available_trial.metric, '(worst is', last_round_completed_trials[-1].metric, ')') ret.trial = best_available_trial.trial ret.active = True # if no trial is present, the caller is responsible for filling it it return ret elif not self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx - 1): return None else: log_print(Fore.LIGHTRED_EX + 'Insufficient previous rounders to continue', id(self)) self.mark_all_brackets() raise StopIteration if self.is_round_complete(self.cur_bracket_idx, self.cur_round_idx): if self.cur_round_idx == len(cur_bracket) - 1: self.cur_round_idx = 0 self.cur_bracket_idx += 1 else: self.cur_round_idx += 1 log_print(Fore.LIGHTBLACK_EX + str(id(self)), 'proceed to bracket', self.cur_bracket_idx, 'round', self.cur_round_idx) return self.request_trial() else: return None def mark_all_brackets(self): self.brackets = [self.mark_bracket_failed(bracket) for bracket in self.brackets] def mark_bracket_failed(self, bracket): cleaned_bracket = [self.mark_round_failed(round) for round in bracket] return cleaned_bracket def mark_round_failed(self, round): return [t for t in round if t.trial is not None] def report_trial(self, bracket_idx, round_idx, trial, metric): # mark inactive, set metric # make verdict for all completed rounds requested_round = self.brackets[bracket_idx][round_idx] requested_element = None for el in requested_round: if el.trial == trial: requested_element = el assert requested_element requested_element.metric = metric requested_element.active = False if math.isfinite(metric): log_print('hyperband received report', bracket_idx, round_idx, trial, metric) else: log_print(Fore.LIGHTRED_EX + 'hyperband received report', bracket_idx, round_idx, trial, metric) # reset first rounder null results if self.reset_nan_trial: log_print(Fore.LIGHTRED_EX + 'nan trial') requested_element.trial = None requested_element.metric = None class HyperbandDriver: def __init__(self, experiment_dir, trial_generator, param_generator, min_budget, max_budget, reduction_ratio, sleep_interval, max_hyperbands, mode, reset_nan_trial, early_stop_min_budget, early_stop_threshold): self.experiment_dir = experiment_dir self.min_budget = min_budget self.max_budget = max_budget self.reduction_ratio = reduction_ratio self.sleep_interval = sleep_interval self.watcher = ExperimentWatcher(experiment_dir) self.hyperbands = [] self.watch_active_trials = [] self.trial_generator = trial_generator self.param_generator = param_generator self.max_hyperbands = max_hyperbands self.is_maximize = mode == 'maximize' self.reset_nan_trial = reset_nan_trial self.early_stop_min_budget = early_stop_min_budget self.early_stop_threshold = ((-early_stop_threshold if self.is_maximize else early_stop_threshold) if early_stop_threshold is not None else float('inf')) def generate_new_trial(self, end_budget, n_gpu=1): params = self.param_generator.get_next_parameter() new_id = make_trial_id() log_print(Fore.LIGHTGREEN_EX + f'generate new trial {new_id} with budget 0 -> {end_budget}') self.trial_generator.change_capsule_trial_id(new_id) self.trial_generator.save_start_state(start_budget=0, end_budget=end_budget, n_gpu=n_gpu, params=params) return new_id def amend_trial(self, old_trial, end_budget, n_gpu=1): self.trial_generator.change_capsule_trial_id(old_trial) new_state = self.trial_generator.amend_start_state(end_budget=end_budget, n_gpu=n_gpu) log_print( Fore.LIGHTBLUE_EX + f'amended trial {old_trial} with budget ' f'{new_state["start_budget"]} -> {new_state["end_budget"]}, params', new_state['params']) def get_next_hyperband_trial(self): for hyperband_idx, hyperband in enumerate(self.hyperbands): try: new_trial = hyperband.request_trial() except StopIteration: continue if new_trial is not None: if new_trial.trial is None: new_trial.trial = self.generate_new_trial(new_trial.budget) else: self.amend_trial(new_trial.trial, new_trial.budget) return hyperband_idx, new_trial if len(self.hyperbands) < self.max_hyperbands: self.hyperbands.append(Hyperband(min_budget=self.min_budget, max_budget=self.max_budget, reduction_ratio=self.reduction_ratio, reset_nan_trial=self.reset_nan_trial)) return self.get_next_hyperband_trial() return None, None def get_available_slots(self): slot_files = [] for parent, dirs, files in os.walk(self.experiment_dir): slot_files = [f for f in files if f.startswith('slots_')] break total_slots = 0 for slot_file in slot_files: with open(os.path.join(self.experiment_dir, slot_file), 'rb') as f: total_slots += int(f.read().strip()) return total_slots def check_for_completed_trials(self): completed_trials = [] watcher_result = {k: v for k, v in self.watcher.poll(slots=False, only=[t.trial for _, t in self.watch_active_trials], fields=False)['trials'] if not v['active']} for hyperband_idx, trial in self.watch_active_trials: if trial.trial in watcher_result: completed_trials.append(trial) trial_result = watcher_result[trial.trial] log_print(Fore.LIGHTBLACK_EX + f'obtained watcher result for {trial.trial}') if trial_result['budget'] != trial.budget: trial_result['metric'] = float('nan') if trial_result['metric'] is None: trial_result['metric'] = float('nan') metric = trial_result['metric'] if self.is_maximize: metric = -metric self.hyperbands[hyperband_idx].report_trial(trial.bracket, trial.round, trial.trial, metric) self.watch_active_trials = [t for t in self.watch_active_trials if t[1] not in completed_trials] def early_stop_trials(self): if self.early_stop_min_budget is None: return for _, trial_info in self.watch_active_trials: try: with FastTSVTail(os.path.join(self.experiment_dir, trial_info.trial, 'metric.tsv')) as f: budget, _, metric_res = f.tail() budget = int(budget) metric = float(metric_res) if self.is_maximize: metric = -metric if budget >= self.early_stop_min_budget and metric > self.early_stop_threshold: log_print(Fore.RED + 'requesting early stopping of trial', trial_info.trial, 'budget', budget, 'metric', metric) open(os.path.join(self.experiment_dir, trial_info.trial, 'terminate'), 'ab').close() except FileNotFoundError: continue def start_trials(self): n_slots = self.get_available_slots() for _ in range(n_slots): hyperband_idx, new_trial = self.get_next_hyperband_trial() if new_trial is not None: # launch new trial # add to trials being watched log_print(Fore.LIGHTBLACK_EX + f'watching trial {new_trial.trial} of band {hyperband_idx}') self.watch_active_trials.append((hyperband_idx, new_trial)) def save_hyberband_data(self): with open(os.path.join(self.experiment_dir, 'hyperbands.json'), 'w', encoding='utf-8') as f: h_data = { 'active': [(idx, trial.serialize()) for idx, trial in self.watch_active_trials], 'hyberbands': [hyperband.serialize() for hyperband in self.hyperbands] } json.dump(h_data, f, ensure_ascii=False, allow_nan=True, indent=2) def load_hyperband_data(self, path): log_print('loading hyperbands data from', path) with open(path, 'r', encoding='utf_8') as f: data = json.load(f) self.watch_active_trials = [BracketElement.deserialize(trial) for trial in data] self.hyperbands = [(idx, Hyperband.deserialize(hb)) for idx, hb in data['hyperbands']] def start(self): last_watching = set() while True: self.early_stop_trials() self.check_for_completed_trials() self.start_trials() cur_watching = set(t.trial for _, t in self.watch_active_trials) if last_watching != cur_watching: for idx, hb in enumerate(self.hyperbands): log_print(Fore.LIGHTBLACK_EX + '----- Hyperband', idx, id(hb), '-----') hb.pprint_brackets() self.save_hyberband_data() last_watching = cur_watching if len(self.hyperbands) == self.max_hyperbands and all(hb.is_complete() for hb in self.hyperbands): break time.sleep(self.sleep_interval) @click.command() @click.option('--exp-path', required=True) @click.option('--module', required=True) @click.option('--min-budget', type=int, default=1) @click.option('--max-budget', type=int, required=True) @click.option('--sleep-interval', type=float, default=20.) @click.option('--max-hyperbands', type=int, default=5) @click.option('--reduction-ratio', type=float, default=math.e) @click.option('--mode', type=click.Choice(['maximize', 'minimize']), default='minimize') @click.option('--bohb-random-ratio', type=float, default=0.2) @click.option('--bohb-guided-ratio', type=float, default=0.6) @click.option('--bohb-random-size', type=int, default=64) @click.option('--bohb-guided-size', type=int, default=64) @click.option('--bohb-result-size-threshold', type=int) @click.option('--bohb-good-ratio', type=float, default=0.30) @click.option('--bohb-model-cache-time', type=float, default=900.) @click.option('--bohb-min-bandwidth', type=float, default=1e-3) @click.option('--bohb-bandwidth-estimation', default='normal_reference') @click.option('--bohb-bandwidth-factor', type=float, default=3) @click.option('--reset-nan-trial/--no-reset-nan-trial', default=True) @click.option('--early-stop-min-budget', type=int) @click.option('--early-stop-threshold', type=float) @click.option('--load') def run(module, exp_path, min_budget, max_budget, reduction_ratio, max_hyperbands, sleep_interval, mode, bohb_random_ratio, bohb_guided_ratio, bohb_random_size, bohb_guided_size, bohb_result_size_threshold, bohb_good_ratio, bohb_model_cache_time, bohb_min_bandwidth, bohb_bandwidth_estimation, bohb_bandwidth_factor, reset_nan_trial, load, early_stop_min_budget, early_stop_threshold): exp_path = os.path.abspath(exp_path) trial_gen = Generator(module, exp_path) param_gen = BOHBParamGen(module, exp_path, random_ratio=bohb_random_ratio, random_sample_size=bohb_random_size, guided_ratio=bohb_guided_ratio, guided_sample_size=bohb_guided_size, result_size_threshold=bohb_result_size_threshold, good_ratio=bohb_good_ratio, model_cache_time=bohb_model_cache_time, mode=mode, min_bandwidth=bohb_min_bandwidth, bandwidth_estimation=bohb_bandwidth_estimation, bandwidth_factor=bohb_bandwidth_factor, early_stop_ratio=1. / reduction_ratio) driver = HyperbandDriver(experiment_dir=exp_path, trial_generator=trial_gen, param_generator=param_gen, min_budget=min_budget, max_budget=max_budget, reduction_ratio=reduction_ratio, sleep_interval=sleep_interval, max_hyperbands=max_hyperbands, mode=mode, reset_nan_trial=reset_nan_trial, early_stop_min_budget=early_stop_min_budget, early_stop_threshold=early_stop_threshold) if load is not None: driver.load_hyperband_data(load) driver.start() if __name__ == '__main__': run()
#!/usr/bin/env python import rospy from std_msgs.msg import String import speech_recognition as sr from blessed import Terminal from texttable import Texttable from spacy.matcher import Matcher import en_core_web_sm from inference import * import json nlp = en_core_web_sm.load() nlp.Defaults.stop_words -= {'give', 'Give', 'put', 'Put', 'this', 'This', 'that', 'That', 'here', 'Here', 'there', 'There'} matcher = Matcher(nlp.vocab) term = Terminal() # r = sr.Recognizer() # mic = sr.Microphone(device_index=9) # with mic as source: # audio = r.listen(source) # instruction = r.recognize_google(audio) # TODO: Add docstring # TODO: Add language pattern. class VerbalInstruction: """ Class for the verbal instruction node. It records the verbal instruction, transcribes and publishes it. """ def __init__(self, device_index: int, mode='both', print_=True): """ Initializes the instance. Parameters ---------- device_index : int, optional Index for the audion input that the speech recognizer would listen to """ self.speech_recognizer = sr.Recognizer() # self.mic = sr.Microphone(device_index=device_index) self.instruction_list = [] self.counter = 1 self.mode = mode self.print = print_ self.params = {} rospy.init_node("verbal_instruction_pub_node", anonymous=False) self.verbal_instruction_pub = rospy.Publisher( "/verbal_instruction", String, queue_size=10 ) self.instruction_msg = "" # Run the publisher after initiation self.count = 0 self.commands = [ "put the small red towels in the basket", "Give me the red onions", "Go left", "Grab the large green box on your right", "Put the jar on the table"] self.run() def run(self): """ Runs the publisher node. Publishes verbal instructions received from the user. """ # print('\n\n\n\n\n\n\n\n\n\n\n\n\n') rate = rospy.Rate(1) # 1hz while not rospy.is_shutdown(): # with self.mic as source: # audio = self.speech_recognizer.listen(source) # # if no instruction is received, go to the # # next iteration. # try: # self.instruction_msg = self.speech_recognizer.recognize_google(audio) # if self.instruction_msg in ["exit", "quit"]: # rospy.signal_shutdown("Exiting") # except sr.UnknownValueError: # rospy.loginfo("Speaker is quiet") # continue # print(term.on_dodgerblue4(f'{term.springgreen}Verbal command: {term.deepskyblue}"{self.instruction_msg}" {term.normal}')) if self.count < 5: self.instruction_msg = self.commands[self.count] self.count += 1 # if self.count == 5: # rospy.signal_shutdown("Exiting") if self.mode == 'pattern': self.params = self.extract_object_info(self.instruction_msg) elif self.mode == 'nn': self.params = predict(model, self.instruction_msg) self.params['no'] = self.counter - 1 elif self.mode == 'both': self.params['pattern'] = self.extract_object_info(self.instruction_msg) self.params['nn'] = predict(model, self.instruction_msg) self.params['nn']['no'] = self.counter self.counter += 1 if self.print: print(term.on_dodgerblue4(f'{term.springgreen}Verbal command: {term.deepskyblue}"{self.instruction_msg}" {term.normal}')) if self.mode == 'both': print(self.params['pattern']) print(self.params['nn']) else: print(self.params) self.verbal_instruction_pub.publish(json.dumps(self.params)) rate.sleep() def extract_object_info(self, instruction_msg, visualize=False): matcher.add("action", [[{"POS": "VERB"},{"POS": "PRON", "OP": "*"},{},{"POS": "ADJ", "OP": "*"},{"POS": "NOUN"}]]) matcher.add("navigation", [[{"LEMMA": {"IN": ["go", "come", "move", "turn"]}}]]) matcher.add("attr", [[{"TAG": "JJ", "OP": "+"}, {"POS": "NOUN"}]]) matcher.add("pos", [[{"LEMMA": {"IN": ["right", "left", "front", "back"]}}]]) doc = nlp(instruction_msg) matches = matcher(doc) object_info = {} object_name = "None" action = "None" attr = [] pos = "None" navigation = "None" is_navigation = False for match_id, start, end in matches: string_id = nlp.vocab.strings[match_id] # print(string_id) if string_id == "action": object_name = doc[end-1].text action = doc[start].text if string_id == "navigation": is_navigation = True navigation = doc[start].text if string_id == "attr": attr.append(doc[start].text) if string_id == "pos": pos = doc[start].text object_info["no"] = self.counter object_info["object"] = object_name object_info["action"] = navigation if is_navigation else action object_info["attr"] = "None" if len(attr) == 0 else attr object_info["pos"] = pos if visualize: if ('exit' and 'instruction') not in instruction_msg: # print(f'{term.cyan3}Object: {term.purple3}{object_info['object']} \n{term.cyan3}Action: {term.purple3}{object_info['action']} \n{term.cyan3}Attributes: {term.purple3}{object_info['attr']} \n{term.cyan3}Position: {term.purple3}{object_info['pos']} \n') print(f'{term.cyan3}Object: {term.purple3}{object_info['object']} {term.maroon2}| {term.cyan3}Action: {term.purple3}{object_info['action']} {term.maroon2}| {term.cyan3}Attributes: {term.purple3}{object_info['attr']} {term.maroon2}| {term.cyan3}Position: {term.purple3}{object_info['pos']} \n') self.instruction_list.append(object_info) elif 'instruction' in instruction_msg: table = Texttable() instructions_params = [ list(item.values()) for i, item in enumerate(self.instruction_list)] table.set_cols_align(["l", "l", "l", "l", "l"]) table.set_cols_valign(["m", "m","m", "m", "m"]) table.add_rows([["NO", "Object", "Action", "Attributes", "Position"], *instructions_params]) print(f'{term.purple3}{table.draw()}') print() return object_info if __name__ == "__main__": VerbalInstruction(device_index=1, mode='both') # Give me the plate # Bring me that red cup # Go left # Grab the large green box on your right # Put the jar on the table
#!/usr/bin/env python import rospy from std_msgs.msg import String import speech_recognition as sr from blessed import Terminal from texttable import Texttable from spacy.matcher import Matcher import en_core_web_sm from inference import * import json nlp = en_core_web_sm.load() nlp.Defaults.stop_words -= {'give', 'Give', 'put', 'Put', 'this', 'This', 'that', 'That', 'here', 'Here', 'there', 'There'} matcher = Matcher(nlp.vocab) term = Terminal() # r = sr.Recognizer() # mic = sr.Microphone(device_index=9) # with mic as source: # audio = r.listen(source) # instruction = r.recognize_google(audio) # TODO: Add docstring # TODO: Add language pattern. class VerbalInstruction: """ Class for the verbal instruction node. It records the verbal instruction, transcribes and publishes it. """ def __init__(self, device_index: int, mode='both', print_=True): """ Initializes the instance. Parameters ---------- device_index : int, optional Index for the audion input that the speech recognizer would listen to """ self.speech_recognizer = sr.Recognizer() # self.mic = sr.Microphone(device_index=device_index) self.instruction_list = [] self.counter = 1 self.mode = mode self.print = print_ self.params = {} rospy.init_node("verbal_instruction_pub_node", anonymous=False) self.verbal_instruction_pub = rospy.Publisher( "/verbal_instruction", String, queue_size=10 ) self.instruction_msg = "" # Run the publisher after initiation self.count = 0 self.commands = [ "put the small red towels in the basket", "Give me the red onions", "Go left", "Grab the large green box on your right", "Put the jar on the table"] self.run() def run(self): """ Runs the publisher node. Publishes verbal instructions received from the user. """ # print('\n\n\n\n\n\n\n\n\n\n\n\n\n') rate = rospy.Rate(1) # 1hz while not rospy.is_shutdown(): # with self.mic as source: # audio = self.speech_recognizer.listen(source) # # if no instruction is received, go to the # # next iteration. # try: # self.instruction_msg = self.speech_recognizer.recognize_google(audio) # if self.instruction_msg in ["exit", "quit"]: # rospy.signal_shutdown("Exiting") # except sr.UnknownValueError: # rospy.loginfo("Speaker is quiet") # continue # print(term.on_dodgerblue4(f'{term.springgreen}Verbal command: {term.deepskyblue}"{self.instruction_msg}" {term.normal}')) if self.count < 5: self.instruction_msg = self.commands[self.count] self.count += 1 # if self.count == 5: # rospy.signal_shutdown("Exiting") if self.mode == 'pattern': self.params = self.extract_object_info(self.instruction_msg) elif self.mode == 'nn': self.params = predict(model, self.instruction_msg) self.params['no'] = self.counter - 1 elif self.mode == 'both': self.params['pattern'] = self.extract_object_info(self.instruction_msg) self.params['nn'] = predict(model, self.instruction_msg) self.params['nn']['no'] = self.counter self.counter += 1 if self.print: print(term.on_dodgerblue4(f'{term.springgreen}Verbal command: {term.deepskyblue}"{self.instruction_msg}" {term.normal}')) if self.mode == 'both': print(self.params['pattern']) print(self.params['nn']) else: print(self.params) self.verbal_instruction_pub.publish(json.dumps(self.params)) rate.sleep() def extract_object_info(self, instruction_msg, visualize=False): matcher.add("action", [[{"POS": "VERB"},{"POS": "PRON", "OP": "*"},{},{"POS": "ADJ", "OP": "*"},{"POS": "NOUN"}]]) matcher.add("navigation", [[{"LEMMA": {"IN": ["go", "come", "move", "turn"]}}]]) matcher.add("attr", [[{"TAG": "JJ", "OP": "+"}, {"POS": "NOUN"}]]) matcher.add("pos", [[{"LEMMA": {"IN": ["right", "left", "front", "back"]}}]]) doc = nlp(instruction_msg) matches = matcher(doc) object_info = {} object_name = "None" action = "None" attr = [] pos = "None" navigation = "None" is_navigation = False for match_id, start, end in matches: string_id = nlp.vocab.strings[match_id] # print(string_id) if string_id == "action": object_name = doc[end-1].text action = doc[start].text if string_id == "navigation": is_navigation = True navigation = doc[start].text if string_id == "attr": attr.append(doc[start].text) if string_id == "pos": pos = doc[start].text object_info["no"] = self.counter object_info["object"] = object_name object_info["action"] = navigation if is_navigation else action object_info["attr"] = "None" if len(attr) == 0 else attr object_info["pos"] = pos if visualize: if ('exit' and 'instruction') not in instruction_msg: # print(f'{term.cyan3}Object: {term.purple3}{object_info["object"]} \n{term.cyan3}Action: {term.purple3}{object_info["action"]} \n{term.cyan3}Attributes: {term.purple3}{object_info["attr"]} \n{term.cyan3}Position: {term.purple3}{object_info["pos"]} \n') print(f'{term.cyan3}Object: {term.purple3}{object_info["object"]} {term.maroon2}| {term.cyan3}Action: {term.purple3}{object_info["action"]} {term.maroon2}| {term.cyan3}Attributes: {term.purple3}{object_info["attr"]} {term.maroon2}| {term.cyan3}Position: {term.purple3}{object_info["pos"]} \n') self.instruction_list.append(object_info) elif 'instruction' in instruction_msg: table = Texttable() instructions_params = [ list(item.values()) for i, item in enumerate(self.instruction_list)] table.set_cols_align(["l", "l", "l", "l", "l"]) table.set_cols_valign(["m", "m","m", "m", "m"]) table.add_rows([["NO", "Object", "Action", "Attributes", "Position"], *instructions_params]) print(f'{term.purple3}{table.draw()}') print() return object_info if __name__ == "__main__": VerbalInstruction(device_index=1, mode='both') # Give me the plate # Bring me that red cup # Go left # Grab the large green box on your right # Put the jar on the table
import json import logging import os import sys import tarfile from contextlib import contextmanager from ipaddress import ip_address from json import JSONDecodeError from pathlib import Path from random import randint from socket import getaddrinfo from tempfile import SpooledTemporaryFile, TemporaryDirectory from time import sleep import docker from dateutil.parser import isoparse from django.conf import settings from django.core.exceptions import ValidationError from django.core.files import File from django.db import transaction from django.utils._os import safe_join from docker.api.container import ContainerApiMixin from docker.errors import APIError, DockerException, ImageNotFound, NotFound from docker.tls import TLSConfig from docker.types import LogConfig from panimg.image_builders import image_builder_mhd, image_builder_tiff from requests import HTTPError from grandchallenge.cases.tasks import import_images from grandchallenge.components.backends.exceptions import ComponentException from grandchallenge.components.backends.utils import LOGLINES, user_error from grandchallenge.components.registry import _get_registry_auth_config logger = logging.getLogger(__name__) MAX_SPOOL_SIZE = 1_000_000_000 # 1GB class DockerConnection: """ Provides a client with a connection to a docker host, provisioned for running the container exec_image. """ def __init__( self, *, job_id: str, exec_image_sha256: str, exec_image_repo_tag: str, memory_limit: int, time_limit: int, requires_gpu: bool, ): super().__init__() self._job_id = job_id self._exec_image_sha256 = exec_image_sha256 self._exec_image_repo_tag = exec_image_repo_tag self._memory_limit = memory_limit self._requires_gpu = requires_gpu if time_limit != settings.CELERY_TASK_TIME_LIMIT: logger.warning("Time limits are not implemented in this backend") self.__client = None @property def _client(self): if self.__client is None: client_kwargs = {"base_url": settings.COMPONENTS_DOCKER_BASE_URL} if settings.COMPONENTS_DOCKER_TLS_VERIFY: tlsconfig = TLSConfig( verify=True, client_cert=( settings.COMPONENTS_DOCKER_TLS_CERT, settings.COMPONENTS_DOCKER_TLS_KEY, ), ca_cert=settings.COMPONENTS_DOCKER_CA_CERT, ) client_kwargs.update({"tls": tlsconfig}) self.__client = docker.DockerClient(**client_kwargs) return self.__client @property def _labels(self): return {"job": f"{self._job_id}", "traefik.enable": "false"} @property def _run_kwargs(self): return { "init": True, "network_disabled": True, "mem_limit": f"{self._memory_limit}g", # Set to the same as mem_limit to avoid using swap "memswap_limit": f"{self._memory_limit}g", "shm_size": f"{settings.COMPONENTS_SHARED_MEMORY_SIZE}m", "cpu_period": settings.COMPONENTS_CPU_PERIOD, "cpu_quota": settings.COMPONENTS_CPU_QUOTA, "cpu_shares": settings.COMPONENTS_CPU_SHARES, "cpuset_cpus": self._cpuset_cpus, "runtime": settings.COMPONENTS_DOCKER_RUNTIME, "cap_drop": ["all"], "security_opt": ["no-new-privileges"], "pids_limit": settings.COMPONENTS_PIDS_LIMIT, "log_config": LogConfig( type=LogConfig.types.JSON, config={"max-size": "1g"} ), } @property def _cpuset_cpus(self): """ The cpuset_cpus as a string. Returns ------- The setting COMPONENTS_CPUSET_CPUS if this is set to a none-empty string. Otherwise, works out the available cpu from the os. """ if settings.COMPONENTS_CPUSET_CPUS: return settings.COMPONENTS_CPUSET_CPUS else: # Get the cpu count, note that this is setting up the container # so that it can use all of the CPUs on the system. To limit # the containers execution set COMPONENTS_CPUSET_CPUS # externally. cpus = os.cpu_count() if cpus in [None, 1]: return "0" else: return f"0-{cpus - 1}" @staticmethod def __retry_docker_obj_prune(*, obj, filters: dict): # Retry and exponential backoff of the prune command as only 1 prune # operation can occur at a time on a docker host num_retries = 0 e = Exception while num_retries < 10: try: obj.prune(filters=filters) break except (APIError, HTTPError) as _e: num_retries += 1 e = _e sleep((2**num_retries) + (randint(0, 1000) / 1000)) else: raise e def stop_and_cleanup(self, timeout: int = 10): """Stops and prunes all containers associated with this job.""" flt = {"label": f"job={self._job_id}"} for c in self._client.containers.list(filters=flt): c.stop(timeout=timeout) self.__retry_docker_obj_prune(obj=self._client.containers, filters=flt) self.__retry_docker_obj_prune(obj=self._client.volumes, filters=flt) def _pull_images(self): try: self._client.images.get(name=self._exec_image_sha256) except ImageNotFound: # This can take a long time so increase the default timeout #1330 old_timeout = self._client.api.timeout self._client.api.timeout = 600 # 10 minutes self._client.images.pull( repository=self._exec_image_repo_tag, auth_config=_get_registry_auth_config(), ) self._client.api.timeout = old_timeout class DockerExecutor(DockerConnection): IS_EVENT_DRIVEN = False @staticmethod def get_job_params(*, event): raise NotImplementedError @classmethod def update_filesystem(cls): pass def provision(self, *, input_civs, input_prefixes): self._pull_images() self._create_io_volumes() self._provision_input_volume( input_civs=input_civs, input_prefixes=input_prefixes ) self._chmod_volumes() def execute(self): self._pull_images() self._execute_container() def handle_event(self): pass def get_outputs(self, *, output_interfaces): self._pull_images() return self._get_outputs(output_interfaces=output_interfaces) def deprovision(self): self.stop_and_cleanup() @property def stdout(self): try: container = self._execution_container return ( container.logs( stdout=True, stderr=False, timestamps=True, tail=LOGLINES ) .replace(b"\x00", b"") .decode("utf-8") ) except DockerException as e: logger.warning(f"Could not fetch stdout: {e}") return "" @property def stderr(self): try: container = self._execution_container return ( container.logs( stdout=False, stderr=True, timestamps=True, tail=LOGLINES ) .replace(b"\x00", b"") .decode("utf-8") ) except DockerException as e: logger.warning(f"Could not fetch stderr: {e}") return "" @property def duration(self): try: container = self._execution_container if container.status == "exited": state = self._client.api.inspect_container( container=container.id ) started_at = state["State"]["StartedAt"] finished_at = state["State"]["FinishedAt"] return isoparse(finished_at) - isoparse(started_at) else: return None except DockerException as e: logger.warning(f"Could not inspect container: {e}") return None @property def _input_volume_name(self): return f"{self._job_id}-input" @property def _output_volume_name(self): return f"{self._job_id}-output" @property def _execution_container_name(self): return f"{self._job_id}-executor" @property def _execution_container(self): return self._client.containers.get( container_id=self._execution_container_name ) def _pull_images(self): try: self._client.images.get(name=settings.COMPONENTS_IO_IMAGE) except ImageNotFound: self._client.images.pull(repository=settings.COMPONENTS_IO_IMAGE) super()._pull_images() def _create_io_volumes(self): for volume in [self._input_volume_name, self._output_volume_name]: self._client.volumes.create(name=volume, labels=self._labels) def _provision_input_volume(self, *, input_civs, input_prefixes): with stop( self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "rw"} }, name=f"{self._job_id}-writer", remove=True, detach=True, tty=True, labels=self._labels, **self._run_kwargs, ) ) as writer: self._copy_input_files( writer=writer, input_civs=input_civs, input_prefixes=input_prefixes, ) def _copy_input_files(self, *, writer, input_civs, input_prefixes): for civ in input_civs: prefix = "/input/" if str(civ.pk) in input_prefixes: prefix = safe_join(prefix, input_prefixes[str(civ.pk)]) if civ.decompress: dest = Path( safe_join("/tmp/", prefix.lstrip("/"), "submission-src") ) else: dest = Path(safe_join(prefix, civ.relative_path)) writer.exec_run(f"mkdir -p {dest.parent}") put_file(container=writer, src=civ.input_file, dest=dest) if civ.decompress: # Decompression is legacy for submission evaluations where # we offered to unzip prediction files for challenge admins if prefix[0] != "/" or prefix[-1] != "/": raise RuntimeError(f"Prefix {prefix} is not a full path") writer.exec_run(f"unzip {dest} -d {prefix} -x '__MACOSX/*'") # Remove a duplicated directory input_files = ( writer.exec_run(f"ls -1 {prefix}") .output.decode() .splitlines() ) if ( len(input_files) == 1 and not writer.exec_run( f"ls -d {prefix}{input_files[0]}/" ).exit_code ): writer.exec_run( f'/bin/sh -c "mv {prefix}{input_files[0]}/* {prefix} ' f'&& rm -r {prefix}{input_files[0]}/"' ) def _chmod_volumes(self): """Ensure that the i/o directories are writable.""" self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "rw"}, self._output_volume_name: {"bind": "/output/", "mode": "rw"}, }, name=f"{self._job_id}-chmod-volumes", command="chmod -R 0777 /input/ /output/", remove=True, labels=self._labels, **self._run_kwargs, ) def _execute_container(self) -> None: with stop( self._client.containers.run( image=self._exec_image_sha256, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "ro"}, self._output_volume_name: { "bind": "/output/", "mode": "rw", }, }, name=self._execution_container_name, detach=True, labels=self._labels, environment={ "NVIDIA_VISIBLE_DEVICES": settings.COMPONENTS_NVIDIA_VISIBLE_DEVICES }, **self._run_kwargs, ) ) as c: container_state = c.wait() exit_code = int(container_state["StatusCode"]) if exit_code == 137: raise ComponentException( "The container was killed as it exceeded the memory limit " f"of {self._run_kwargs["mem_limit"]}." ) elif exit_code != 0: raise ComponentException(user_error(self.stderr)) def _get_outputs(self, *, output_interfaces): """Create ComponentInterfaceValues from the output interfaces""" outputs = [] with stop( self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._output_volume_name: { "bind": "/output/", "mode": "ro", } }, name=f"{self._job_id}-reader", remove=True, detach=True, tty=True, labels=self._labels, **self._run_kwargs, ) ) as reader: with transaction.atomic(): # Atomic block required as create_instance needs to # create interfaces in order to store the files for interface in output_interfaces: if interface.is_image_kind: res = self._create_images_result( interface=interface, reader=reader ) elif interface.is_json_kind: res = self._create_json_result( interface=interface, reader=reader ) else: res = self._create_file_result( interface=interface, reader=reader ) outputs.append(res) return outputs def _create_images_result(self, *, interface, reader): base_dir = Path(safe_join("/output/", interface.relative_path)) found_files = reader.exec_run(f"find {base_dir} -type f") if found_files.exit_code != 0: raise ComponentException(f"Error listing {base_dir}") output_files = [ base_dir / Path(f) for f in found_files.output.decode().splitlines() ] if not output_files: raise ComponentException(f"{base_dir} is empty") with TemporaryDirectory() as tmpdir: for file in output_files: temp_file = Path(safe_join(tmpdir, file.relative_to(base_dir))) temp_file.parent.mkdir(parents=True, exist_ok=True) get_file(container=reader, src=file, dest=temp_file) importer_result = import_images( input_directory=tmpdir, builders=[image_builder_mhd, image_builder_tiff], ) if len(importer_result.new_images) == 0: raise ComponentException(f"No images imported from {base_dir}") elif len(importer_result.new_images) > 1: raise ComponentException( f"Only 1 image should be produced in {base_dir}, " f"we found {len(importer_result.new_images)}" ) try: civ = interface.create_instance( image=next(iter(importer_result.new_images)) ) except ValidationError: raise ComponentException( f"The image produced in {base_dir} is not valid" ) return civ def _create_json_result(self, *, interface, reader): output_file = Path(safe_join("/output/", interface.relative_path)) try: with TemporaryDirectory() as tmpdir: temp_file = Path(safe_join(tmpdir, "output.json")) get_file(container=reader, src=output_file, dest=temp_file) with open(temp_file, "rb") as file: result = json.loads( file.read().decode("utf-8"), parse_constant=lambda x: None, # Removes -inf, inf and NaN ) except NotFound: raise ComponentException(f"File {output_file} was not produced") except JSONDecodeError: raise ComponentException( f"The file produced at {output_file} is not valid json" ) try: civ = interface.create_instance(value=result) except ValidationError: raise ComponentException( f"The file produced at {output_file} is not valid" ) return civ def _create_file_result(self, *, interface, reader): output_file = Path(safe_join("/output/", interface.relative_path)) try: with TemporaryDirectory() as tmpdir: temp_file = Path(safe_join(tmpdir, interface.relative_path)) get_file(container=reader, src=output_file, dest=temp_file) with open(temp_file, "rb") as f: civ = interface.create_instance(fileobj=f) except NotFound: raise ComponentException(f"File {output_file} was not produced") except ValidationError: raise ComponentException( f"The file produced at {output_file} is not valid" ) return civ class Service(DockerConnection): @property def _run_kwargs(self): kwargs = super()._run_kwargs kwargs.update( { # Allow networking for service containers "network_disabled": False, "network": settings.WORKSTATIONS_NETWORK_NAME, } ) return kwargs @property def extra_hosts(self): if settings.DEBUG: # The workstation needs to communicate with the django api. In # production this happens automatically via the external DNS, but # when running in debug mode we need to pass through the developers # host via the workstations network gateway network = self._client.networks.list( names=[settings.WORKSTATIONS_NETWORK_NAME] )[0] return { "gc.localhost": network.attrs.get("IPAM")["Config"][0][ "Gateway" ] } else: return {} @property def container(self): return self._client.containers.get(f"{self._job_id}-service") def logs(self) -> str: """Get the container logs for this service.""" try: logs = self.container.logs().decode() except APIError as e: logs = str(e) return logs def start( self, http_port: int, websocket_port: int, hostname: str, environment: dict = None, ): self._pull_images() if "." in hostname: raise ValueError("Hostname cannot contain a '.'") traefik_labels = { "traefik.enable": "true", f"traefik.http.routers.{hostname}-http.rule": f"Host(`{hostname}`)", f"traefik.http.routers.{hostname}-http.service": f"{hostname}-http", f"traefik.http.routers.{hostname}-http.entrypoints": "workstation-http", f"traefik.http.services.{hostname}-http.loadbalancer.server.port": str( http_port ), f"traefik.http.routers.{hostname}-websocket.rule": f"Host(`{hostname}`)", f"traefik.http.routers.{hostname}-websocket.service": f"{hostname}-websocket", f"traefik.http.routers.{hostname}-websocket.entrypoints": "workstation-websocket", f"traefik.http.services.{hostname}-websocket.loadbalancer.server.port": str( websocket_port ), } if settings.COMPONENTS_PUBLISH_PORTS: bind_address = settings.COMPONENTS_PORT_ADDRESS try: ip_address(bind_address) except ValueError: # Not an IP address, lets look it up bind_address = getaddrinfo(bind_address, None)[0][4][0] ports = { http_port: (bind_address, None), websocket_port: (bind_address, None), } else: ports = {} self._client.containers.run( image=self._exec_image_sha256, name=f"{self._job_id}-service", remove=True, detach=True, labels={**self._labels, **traefik_labels}, environment=environment or {}, extra_hosts=self.extra_hosts, ports=ports, **self._run_kwargs, ) @contextmanager def stop(container: ContainerApiMixin): """ Stops a docker container which is running in detached mode :param container: An instance of a container :return: """ try: yield container finally: container.stop() def put_file(*, container: ContainerApiMixin, src: File, dest: Path) -> (): """ Puts a file on the host into a container. This method will create an in memory tar archive, add the src file to this and upload it to the docker container where it will be unarchived at dest. :param container: The container to write to :param src: The path to the source file on the host :param dest: The path to the target file in the container :return: """ with SpooledTemporaryFile(max_size=MAX_SPOOL_SIZE) as tar_b: tarinfo = tarfile.TarInfo(name=os.path.basename(dest)) tarinfo.size = getattr(src, "size", sys.getsizeof(src)) with tarfile.open(fileobj=tar_b, mode="w") as tar, src.open("rb") as f: tar.addfile(tarinfo, fileobj=f) tar_b.seek(0) container.put_archive(os.path.dirname(dest), tar_b) def get_file(*, container: ContainerApiMixin, src: Path, dest: Path): """Gets a file from src in the container and writes it to dest""" tarstrm, info = container.get_archive(src) with SpooledTemporaryFile(max_size=MAX_SPOOL_SIZE) as ftmp, open( dest, "wb" ) as outfile: for t in tarstrm: ftmp.write(t) ftmp.seek(0) tar = tarfile.open(mode="r", fileobj=ftmp) infile = tar.extractfile(src.name) buffer = True while buffer: buffer = infile.read(1024) outfile.write(buffer)
import json import logging import os import sys import tarfile from contextlib import contextmanager from ipaddress import ip_address from json import JSONDecodeError from pathlib import Path from random import randint from socket import getaddrinfo from tempfile import SpooledTemporaryFile, TemporaryDirectory from time import sleep import docker from dateutil.parser import isoparse from django.conf import settings from django.core.exceptions import ValidationError from django.core.files import File from django.db import transaction from django.utils._os import safe_join from docker.api.container import ContainerApiMixin from docker.errors import APIError, DockerException, ImageNotFound, NotFound from docker.tls import TLSConfig from docker.types import LogConfig from panimg.image_builders import image_builder_mhd, image_builder_tiff from requests import HTTPError from grandchallenge.cases.tasks import import_images from grandchallenge.components.backends.exceptions import ComponentException from grandchallenge.components.backends.utils import LOGLINES, user_error from grandchallenge.components.registry import _get_registry_auth_config logger = logging.getLogger(__name__) MAX_SPOOL_SIZE = 1_000_000_000 # 1GB class DockerConnection: """ Provides a client with a connection to a docker host, provisioned for running the container exec_image. """ def __init__( self, *, job_id: str, exec_image_sha256: str, exec_image_repo_tag: str, memory_limit: int, time_limit: int, requires_gpu: bool, ): super().__init__() self._job_id = job_id self._exec_image_sha256 = exec_image_sha256 self._exec_image_repo_tag = exec_image_repo_tag self._memory_limit = memory_limit self._requires_gpu = requires_gpu if time_limit != settings.CELERY_TASK_TIME_LIMIT: logger.warning("Time limits are not implemented in this backend") self.__client = None @property def _client(self): if self.__client is None: client_kwargs = {"base_url": settings.COMPONENTS_DOCKER_BASE_URL} if settings.COMPONENTS_DOCKER_TLS_VERIFY: tlsconfig = TLSConfig( verify=True, client_cert=( settings.COMPONENTS_DOCKER_TLS_CERT, settings.COMPONENTS_DOCKER_TLS_KEY, ), ca_cert=settings.COMPONENTS_DOCKER_CA_CERT, ) client_kwargs.update({"tls": tlsconfig}) self.__client = docker.DockerClient(**client_kwargs) return self.__client @property def _labels(self): return {"job": f"{self._job_id}", "traefik.enable": "false"} @property def _run_kwargs(self): return { "init": True, "network_disabled": True, "mem_limit": f"{self._memory_limit}g", # Set to the same as mem_limit to avoid using swap "memswap_limit": f"{self._memory_limit}g", "shm_size": f"{settings.COMPONENTS_SHARED_MEMORY_SIZE}m", "cpu_period": settings.COMPONENTS_CPU_PERIOD, "cpu_quota": settings.COMPONENTS_CPU_QUOTA, "cpu_shares": settings.COMPONENTS_CPU_SHARES, "cpuset_cpus": self._cpuset_cpus, "runtime": settings.COMPONENTS_DOCKER_RUNTIME, "cap_drop": ["all"], "security_opt": ["no-new-privileges"], "pids_limit": settings.COMPONENTS_PIDS_LIMIT, "log_config": LogConfig( type=LogConfig.types.JSON, config={"max-size": "1g"} ), } @property def _cpuset_cpus(self): """ The cpuset_cpus as a string. Returns ------- The setting COMPONENTS_CPUSET_CPUS if this is set to a none-empty string. Otherwise, works out the available cpu from the os. """ if settings.COMPONENTS_CPUSET_CPUS: return settings.COMPONENTS_CPUSET_CPUS else: # Get the cpu count, note that this is setting up the container # so that it can use all of the CPUs on the system. To limit # the containers execution set COMPONENTS_CPUSET_CPUS # externally. cpus = os.cpu_count() if cpus in [None, 1]: return "0" else: return f"0-{cpus - 1}" @staticmethod def __retry_docker_obj_prune(*, obj, filters: dict): # Retry and exponential backoff of the prune command as only 1 prune # operation can occur at a time on a docker host num_retries = 0 e = Exception while num_retries < 10: try: obj.prune(filters=filters) break except (APIError, HTTPError) as _e: num_retries += 1 e = _e sleep((2**num_retries) + (randint(0, 1000) / 1000)) else: raise e def stop_and_cleanup(self, timeout: int = 10): """Stops and prunes all containers associated with this job.""" flt = {"label": f"job={self._job_id}"} for c in self._client.containers.list(filters=flt): c.stop(timeout=timeout) self.__retry_docker_obj_prune(obj=self._client.containers, filters=flt) self.__retry_docker_obj_prune(obj=self._client.volumes, filters=flt) def _pull_images(self): try: self._client.images.get(name=self._exec_image_sha256) except ImageNotFound: # This can take a long time so increase the default timeout #1330 old_timeout = self._client.api.timeout self._client.api.timeout = 600 # 10 minutes self._client.images.pull( repository=self._exec_image_repo_tag, auth_config=_get_registry_auth_config(), ) self._client.api.timeout = old_timeout class DockerExecutor(DockerConnection): IS_EVENT_DRIVEN = False @staticmethod def get_job_params(*, event): raise NotImplementedError @classmethod def update_filesystem(cls): pass def provision(self, *, input_civs, input_prefixes): self._pull_images() self._create_io_volumes() self._provision_input_volume( input_civs=input_civs, input_prefixes=input_prefixes ) self._chmod_volumes() def execute(self): self._pull_images() self._execute_container() def handle_event(self): pass def get_outputs(self, *, output_interfaces): self._pull_images() return self._get_outputs(output_interfaces=output_interfaces) def deprovision(self): self.stop_and_cleanup() @property def stdout(self): try: container = self._execution_container return ( container.logs( stdout=True, stderr=False, timestamps=True, tail=LOGLINES ) .replace(b"\x00", b"") .decode("utf-8") ) except DockerException as e: logger.warning(f"Could not fetch stdout: {e}") return "" @property def stderr(self): try: container = self._execution_container return ( container.logs( stdout=False, stderr=True, timestamps=True, tail=LOGLINES ) .replace(b"\x00", b"") .decode("utf-8") ) except DockerException as e: logger.warning(f"Could not fetch stderr: {e}") return "" @property def duration(self): try: container = self._execution_container if container.status == "exited": state = self._client.api.inspect_container( container=container.id ) started_at = state["State"]["StartedAt"] finished_at = state["State"]["FinishedAt"] return isoparse(finished_at) - isoparse(started_at) else: return None except DockerException as e: logger.warning(f"Could not inspect container: {e}") return None @property def _input_volume_name(self): return f"{self._job_id}-input" @property def _output_volume_name(self): return f"{self._job_id}-output" @property def _execution_container_name(self): return f"{self._job_id}-executor" @property def _execution_container(self): return self._client.containers.get( container_id=self._execution_container_name ) def _pull_images(self): try: self._client.images.get(name=settings.COMPONENTS_IO_IMAGE) except ImageNotFound: self._client.images.pull(repository=settings.COMPONENTS_IO_IMAGE) super()._pull_images() def _create_io_volumes(self): for volume in [self._input_volume_name, self._output_volume_name]: self._client.volumes.create(name=volume, labels=self._labels) def _provision_input_volume(self, *, input_civs, input_prefixes): with stop( self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "rw"} }, name=f"{self._job_id}-writer", remove=True, detach=True, tty=True, labels=self._labels, **self._run_kwargs, ) ) as writer: self._copy_input_files( writer=writer, input_civs=input_civs, input_prefixes=input_prefixes, ) def _copy_input_files(self, *, writer, input_civs, input_prefixes): for civ in input_civs: prefix = "/input/" if str(civ.pk) in input_prefixes: prefix = safe_join(prefix, input_prefixes[str(civ.pk)]) if civ.decompress: dest = Path( safe_join("/tmp/", prefix.lstrip("/"), "submission-src") ) else: dest = Path(safe_join(prefix, civ.relative_path)) writer.exec_run(f"mkdir -p {dest.parent}") put_file(container=writer, src=civ.input_file, dest=dest) if civ.decompress: # Decompression is legacy for submission evaluations where # we offered to unzip prediction files for challenge admins if prefix[0] != "/" or prefix[-1] != "/": raise RuntimeError(f"Prefix {prefix} is not a full path") writer.exec_run(f"unzip {dest} -d {prefix} -x '__MACOSX/*'") # Remove a duplicated directory input_files = ( writer.exec_run(f"ls -1 {prefix}") .output.decode() .splitlines() ) if ( len(input_files) == 1 and not writer.exec_run( f"ls -d {prefix}{input_files[0]}/" ).exit_code ): writer.exec_run( f'/bin/sh -c "mv {prefix}{input_files[0]}/* {prefix} ' f'&& rm -r {prefix}{input_files[0]}/"' ) def _chmod_volumes(self): """Ensure that the i/o directories are writable.""" self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "rw"}, self._output_volume_name: {"bind": "/output/", "mode": "rw"}, }, name=f"{self._job_id}-chmod-volumes", command="chmod -R 0777 /input/ /output/", remove=True, labels=self._labels, **self._run_kwargs, ) def _execute_container(self) -> None: with stop( self._client.containers.run( image=self._exec_image_sha256, volumes={ self._input_volume_name: {"bind": "/input/", "mode": "ro"}, self._output_volume_name: { "bind": "/output/", "mode": "rw", }, }, name=self._execution_container_name, detach=True, labels=self._labels, environment={ "NVIDIA_VISIBLE_DEVICES": settings.COMPONENTS_NVIDIA_VISIBLE_DEVICES }, **self._run_kwargs, ) ) as c: container_state = c.wait() exit_code = int(container_state["StatusCode"]) if exit_code == 137: raise ComponentException( "The container was killed as it exceeded the memory limit " f"of {self._run_kwargs['mem_limit']}." ) elif exit_code != 0: raise ComponentException(user_error(self.stderr)) def _get_outputs(self, *, output_interfaces): """Create ComponentInterfaceValues from the output interfaces""" outputs = [] with stop( self._client.containers.run( image=settings.COMPONENTS_IO_IMAGE, volumes={ self._output_volume_name: { "bind": "/output/", "mode": "ro", } }, name=f"{self._job_id}-reader", remove=True, detach=True, tty=True, labels=self._labels, **self._run_kwargs, ) ) as reader: with transaction.atomic(): # Atomic block required as create_instance needs to # create interfaces in order to store the files for interface in output_interfaces: if interface.is_image_kind: res = self._create_images_result( interface=interface, reader=reader ) elif interface.is_json_kind: res = self._create_json_result( interface=interface, reader=reader ) else: res = self._create_file_result( interface=interface, reader=reader ) outputs.append(res) return outputs def _create_images_result(self, *, interface, reader): base_dir = Path(safe_join("/output/", interface.relative_path)) found_files = reader.exec_run(f"find {base_dir} -type f") if found_files.exit_code != 0: raise ComponentException(f"Error listing {base_dir}") output_files = [ base_dir / Path(f) for f in found_files.output.decode().splitlines() ] if not output_files: raise ComponentException(f"{base_dir} is empty") with TemporaryDirectory() as tmpdir: for file in output_files: temp_file = Path(safe_join(tmpdir, file.relative_to(base_dir))) temp_file.parent.mkdir(parents=True, exist_ok=True) get_file(container=reader, src=file, dest=temp_file) importer_result = import_images( input_directory=tmpdir, builders=[image_builder_mhd, image_builder_tiff], ) if len(importer_result.new_images) == 0: raise ComponentException(f"No images imported from {base_dir}") elif len(importer_result.new_images) > 1: raise ComponentException( f"Only 1 image should be produced in {base_dir}, " f"we found {len(importer_result.new_images)}" ) try: civ = interface.create_instance( image=next(iter(importer_result.new_images)) ) except ValidationError: raise ComponentException( f"The image produced in {base_dir} is not valid" ) return civ def _create_json_result(self, *, interface, reader): output_file = Path(safe_join("/output/", interface.relative_path)) try: with TemporaryDirectory() as tmpdir: temp_file = Path(safe_join(tmpdir, "output.json")) get_file(container=reader, src=output_file, dest=temp_file) with open(temp_file, "rb") as file: result = json.loads( file.read().decode("utf-8"), parse_constant=lambda x: None, # Removes -inf, inf and NaN ) except NotFound: raise ComponentException(f"File {output_file} was not produced") except JSONDecodeError: raise ComponentException( f"The file produced at {output_file} is not valid json" ) try: civ = interface.create_instance(value=result) except ValidationError: raise ComponentException( f"The file produced at {output_file} is not valid" ) return civ def _create_file_result(self, *, interface, reader): output_file = Path(safe_join("/output/", interface.relative_path)) try: with TemporaryDirectory() as tmpdir: temp_file = Path(safe_join(tmpdir, interface.relative_path)) get_file(container=reader, src=output_file, dest=temp_file) with open(temp_file, "rb") as f: civ = interface.create_instance(fileobj=f) except NotFound: raise ComponentException(f"File {output_file} was not produced") except ValidationError: raise ComponentException( f"The file produced at {output_file} is not valid" ) return civ class Service(DockerConnection): @property def _run_kwargs(self): kwargs = super()._run_kwargs kwargs.update( { # Allow networking for service containers "network_disabled": False, "network": settings.WORKSTATIONS_NETWORK_NAME, } ) return kwargs @property def extra_hosts(self): if settings.DEBUG: # The workstation needs to communicate with the django api. In # production this happens automatically via the external DNS, but # when running in debug mode we need to pass through the developers # host via the workstations network gateway network = self._client.networks.list( names=[settings.WORKSTATIONS_NETWORK_NAME] )[0] return { "gc.localhost": network.attrs.get("IPAM")["Config"][0][ "Gateway" ] } else: return {} @property def container(self): return self._client.containers.get(f"{self._job_id}-service") def logs(self) -> str: """Get the container logs for this service.""" try: logs = self.container.logs().decode() except APIError as e: logs = str(e) return logs def start( self, http_port: int, websocket_port: int, hostname: str, environment: dict = None, ): self._pull_images() if "." in hostname: raise ValueError("Hostname cannot contain a '.'") traefik_labels = { "traefik.enable": "true", f"traefik.http.routers.{hostname}-http.rule": f"Host(`{hostname}`)", f"traefik.http.routers.{hostname}-http.service": f"{hostname}-http", f"traefik.http.routers.{hostname}-http.entrypoints": "workstation-http", f"traefik.http.services.{hostname}-http.loadbalancer.server.port": str( http_port ), f"traefik.http.routers.{hostname}-websocket.rule": f"Host(`{hostname}`)", f"traefik.http.routers.{hostname}-websocket.service": f"{hostname}-websocket", f"traefik.http.routers.{hostname}-websocket.entrypoints": "workstation-websocket", f"traefik.http.services.{hostname}-websocket.loadbalancer.server.port": str( websocket_port ), } if settings.COMPONENTS_PUBLISH_PORTS: bind_address = settings.COMPONENTS_PORT_ADDRESS try: ip_address(bind_address) except ValueError: # Not an IP address, lets look it up bind_address = getaddrinfo(bind_address, None)[0][4][0] ports = { http_port: (bind_address, None), websocket_port: (bind_address, None), } else: ports = {} self._client.containers.run( image=self._exec_image_sha256, name=f"{self._job_id}-service", remove=True, detach=True, labels={**self._labels, **traefik_labels}, environment=environment or {}, extra_hosts=self.extra_hosts, ports=ports, **self._run_kwargs, ) @contextmanager def stop(container: ContainerApiMixin): """ Stops a docker container which is running in detached mode :param container: An instance of a container :return: """ try: yield container finally: container.stop() def put_file(*, container: ContainerApiMixin, src: File, dest: Path) -> (): """ Puts a file on the host into a container. This method will create an in memory tar archive, add the src file to this and upload it to the docker container where it will be unarchived at dest. :param container: The container to write to :param src: The path to the source file on the host :param dest: The path to the target file in the container :return: """ with SpooledTemporaryFile(max_size=MAX_SPOOL_SIZE) as tar_b: tarinfo = tarfile.TarInfo(name=os.path.basename(dest)) tarinfo.size = getattr(src, "size", sys.getsizeof(src)) with tarfile.open(fileobj=tar_b, mode="w") as tar, src.open("rb") as f: tar.addfile(tarinfo, fileobj=f) tar_b.seek(0) container.put_archive(os.path.dirname(dest), tar_b) def get_file(*, container: ContainerApiMixin, src: Path, dest: Path): """Gets a file from src in the container and writes it to dest""" tarstrm, info = container.get_archive(src) with SpooledTemporaryFile(max_size=MAX_SPOOL_SIZE) as ftmp, open( dest, "wb" ) as outfile: for t in tarstrm: ftmp.write(t) ftmp.seek(0) tar = tarfile.open(mode="r", fileobj=ftmp) infile = tar.extractfile(src.name) buffer = True while buffer: buffer = infile.read(1024) outfile.write(buffer)
""" TODO: - glaring unresolved ambiguity between selecting from mats and tbls - ** NOT NECESSARILY FORCED TO REFRESH ALL MATERIALIZATIONS TOGETHER... ** - baked anns? materializations <-> rows? table -> invalidation -> rows -> refresh -> ... | V materializations """ import logging import typing as ta from omnibus import check from omnibus import collections as ocol from omnibus import dataclasses as dc from omnibus import properties from .. import connectors as ctrs from .. import elements as els from .. import ops from .. import targets as tars from ... import metadata as md from ...trees import nodes as no from ...trees import rendering as ren from ...trees import transforms as ttfm from ...trees.types import AstQuery from ...types import QualifiedName from ..utils import parse_simple_select_table from ..utils import parse_simple_select_tables from .elements import Materializer log = logging.getLogger(__name__) class PlanningElementProcessor(els.InstanceElementProcessor): def __init__( self, ctors: ctrs.ConnectorSet, ) -> None: super().__init__() self._ctors = check.isinstance(ctors, ctrs.ConnectorSet) @classmethod def phases(cls) -> ta.Iterable[els.Phase]: return [els.Phases.PLAN] @property def ctors(self) -> ctrs.ConnectorSet: return self._ctors class Instance(els.InstanceElementProcessor.Instance['PlanningElementProcessor']): @properties.cached @property def matches(self) -> ta.AbstractSet[els.Element]: matr_mat_ids = {matr.target.id for matr in self.input.get_type_set(Materializer)} return ocol.IdentitySet([ mat for mat in self.input.get_type_set(tars.Materialization) if mat.id not in matr_mat_ids ]) @properties.cached @property def rows_sets_by_table_id(self) -> ta.Mapping[els.Id, ta.AbstractSet[tars.Rows]]: return ocol.set_dict(self.input.get_type_set(tars.Rows), lambda r: r.table.id, identity_set=True) @properties.cached @property def mat_sets_by_table_id(self) -> ta.Mapping[els.Id, ta.AbstractSet[tars.Materialization]]: return ocol.set_dict(self.input.get_type_set(tars.Materialization), lambda m: m.table.id, identity_set=True) def build_rows_op(self, rows: tars.Rows, dst: QualifiedName) -> ops.Op: query = ren.render_query(rows.query) try: src_name = parse_simple_select_table(query, star=True) except ValueError: pass else: src_query = f"select * from {".".join(src_name[1:])}" return ops.InsertIntoSelect(dst, src_name[0], src_query) try: tbl_names = parse_simple_select_tables(query) if tbl_names: raise ValueError except ValueError: pass else: return ops.InsertIntoEval(dst, query) try: ctor_ids = set() for src_tbl in self.input.analyze(tars.StrictTableDependenciesAnalysis).by_rows[rows].name_sets_by_table: # noqa for src_mat in self.mat_sets_by_table_id.get(src_tbl.id): ctor_ids.add(src_mat.connector.id) if len(ctor_ids) != 1 or check.single(ctor_ids) != dst[0]: raise ValueError except ValueError: pass else: qb = self.input.analyze(els.queries.QueryBasicAnalysis)[rows][rows.query] tqns = {t.name.name for t in qb.get_node_type_set(no.Table)} check.state(all(n[0] == dst[0] for n in tqns)) reps = {n: QualifiedName(n[1:]) for n in tqns} rq = ttfm.ReplaceNamesTransformer(reps)(check.isinstance(rows.query, AstQuery).root) from ...trees import alchemy as alch arq = alch.transmute(rq) if log.isEnabledFor(logging.DEBUG): log.debug(repr(arq)) eq = f'insert into {QualifiedName(dst[1:]).dotted} {ren.render(rq)}' return ops.Exec(dst[0], eq) raise ValueError(rows) @properties.stateful_cached @property def output(self) -> ta.Iterable[els.Element]: ret = list(self.input) for mat in self.input.get_type_set(tars.Materialization): ctr = self.owner._ctors[mat.connector.id] if not isinstance(ctr, ctrs.impls.sql.SqlConnector): # FIXME: lol matr = Materializer(mat, [], ops.List([])) ret.append(matr) continue dst = QualifiedName([mat.connector.id, *mat.name]) tbl = self.input[mat.table] mdt = check.isinstance(tbl.md, md.Table) plan = [ ops.DropTable(dst), ops.CreateTable(dc.replace(mdt, name=dst)), ] srcs: ta.Set[els.Id] = set() for rows in self.rows_sets_by_table_id.get(mat.table.id, []): plan.append(self.build_rows_op(rows, dst)) for src_tbl in self.input.analyze(tars.StrictTableDependenciesAnalysis).by_rows[rows].name_sets_by_table: # noqa for src_mat in self.mat_sets_by_table_id.get(src_tbl.id): srcs.add(src_mat.id) matr = Materializer(mat, srcs, ops.List(plan)) ret.append(matr) return ret
""" TODO: - glaring unresolved ambiguity between selecting from mats and tbls - ** NOT NECESSARILY FORCED TO REFRESH ALL MATERIALIZATIONS TOGETHER... ** - baked anns? materializations <-> rows? table -> invalidation -> rows -> refresh -> ... | V materializations """ import logging import typing as ta from omnibus import check from omnibus import collections as ocol from omnibus import dataclasses as dc from omnibus import properties from .. import connectors as ctrs from .. import elements as els from .. import ops from .. import targets as tars from ... import metadata as md from ...trees import nodes as no from ...trees import rendering as ren from ...trees import transforms as ttfm from ...trees.types import AstQuery from ...types import QualifiedName from ..utils import parse_simple_select_table from ..utils import parse_simple_select_tables from .elements import Materializer log = logging.getLogger(__name__) class PlanningElementProcessor(els.InstanceElementProcessor): def __init__( self, ctors: ctrs.ConnectorSet, ) -> None: super().__init__() self._ctors = check.isinstance(ctors, ctrs.ConnectorSet) @classmethod def phases(cls) -> ta.Iterable[els.Phase]: return [els.Phases.PLAN] @property def ctors(self) -> ctrs.ConnectorSet: return self._ctors class Instance(els.InstanceElementProcessor.Instance['PlanningElementProcessor']): @properties.cached @property def matches(self) -> ta.AbstractSet[els.Element]: matr_mat_ids = {matr.target.id for matr in self.input.get_type_set(Materializer)} return ocol.IdentitySet([ mat for mat in self.input.get_type_set(tars.Materialization) if mat.id not in matr_mat_ids ]) @properties.cached @property def rows_sets_by_table_id(self) -> ta.Mapping[els.Id, ta.AbstractSet[tars.Rows]]: return ocol.set_dict(self.input.get_type_set(tars.Rows), lambda r: r.table.id, identity_set=True) @properties.cached @property def mat_sets_by_table_id(self) -> ta.Mapping[els.Id, ta.AbstractSet[tars.Materialization]]: return ocol.set_dict(self.input.get_type_set(tars.Materialization), lambda m: m.table.id, identity_set=True) def build_rows_op(self, rows: tars.Rows, dst: QualifiedName) -> ops.Op: query = ren.render_query(rows.query) try: src_name = parse_simple_select_table(query, star=True) except ValueError: pass else: src_query = f"select * from {'.'.join(src_name[1:])}" return ops.InsertIntoSelect(dst, src_name[0], src_query) try: tbl_names = parse_simple_select_tables(query) if tbl_names: raise ValueError except ValueError: pass else: return ops.InsertIntoEval(dst, query) try: ctor_ids = set() for src_tbl in self.input.analyze(tars.StrictTableDependenciesAnalysis).by_rows[rows].name_sets_by_table: # noqa for src_mat in self.mat_sets_by_table_id.get(src_tbl.id): ctor_ids.add(src_mat.connector.id) if len(ctor_ids) != 1 or check.single(ctor_ids) != dst[0]: raise ValueError except ValueError: pass else: qb = self.input.analyze(els.queries.QueryBasicAnalysis)[rows][rows.query] tqns = {t.name.name for t in qb.get_node_type_set(no.Table)} check.state(all(n[0] == dst[0] for n in tqns)) reps = {n: QualifiedName(n[1:]) for n in tqns} rq = ttfm.ReplaceNamesTransformer(reps)(check.isinstance(rows.query, AstQuery).root) from ...trees import alchemy as alch arq = alch.transmute(rq) if log.isEnabledFor(logging.DEBUG): log.debug(repr(arq)) eq = f'insert into {QualifiedName(dst[1:]).dotted} {ren.render(rq)}' return ops.Exec(dst[0], eq) raise ValueError(rows) @properties.stateful_cached @property def output(self) -> ta.Iterable[els.Element]: ret = list(self.input) for mat in self.input.get_type_set(tars.Materialization): ctr = self.owner._ctors[mat.connector.id] if not isinstance(ctr, ctrs.impls.sql.SqlConnector): # FIXME: lol matr = Materializer(mat, [], ops.List([])) ret.append(matr) continue dst = QualifiedName([mat.connector.id, *mat.name]) tbl = self.input[mat.table] mdt = check.isinstance(tbl.md, md.Table) plan = [ ops.DropTable(dst), ops.CreateTable(dc.replace(mdt, name=dst)), ] srcs: ta.Set[els.Id] = set() for rows in self.rows_sets_by_table_id.get(mat.table.id, []): plan.append(self.build_rows_op(rows, dst)) for src_tbl in self.input.analyze(tars.StrictTableDependenciesAnalysis).by_rows[rows].name_sets_by_table: # noqa for src_mat in self.mat_sets_by_table_id.get(src_tbl.id): srcs.add(src_mat.id) matr = Materializer(mat, srcs, ops.List(plan)) ret.append(matr) return ret
# Standard library import io import logging import math import os import time # Third party import azure.functions as func import pandas as pd import psycopg2 from shapely.geometry import Polygon import xarray # Local from .utils import batches, human_readable, mean_step_size class Processor: def __init__(self, batch_size): """Constructor.""" self.batch_size = batch_size self.cnxn_ = None self.cursor_ = None self.tables = { "geom": "cell", "predictions": "prediction", "latest": "prediction_latest", } self.xr = None def __del__(self): """Destructor.""" if self.cnxn_: self.cnxn_.close() @property def cnxn(self): """Connect to the database or return an existing connection.""" if not self.cnxn_: try: db_host = os.getenv("PSQL_HOST") db_name = os.getenv("PSQL_DB") db_user = os.getenv("PSQL_USER") db_pwd = os.getenv("PSQL_PWD") self.cnxn_ = psycopg2.connect( dbname=db_name, port="5432", user=f"{db_user}@{db_host}", password=db_pwd, host=db_host, ) logging.info(f"Connected to database {db_name} on {db_host}.") except psycopg2.OperationalError: logging.error(f"Failed to connect to database {db_name} on {db_host}!") raise return self.cnxn_ @property def cursor(self): """Construct a database cursor or return an existing cursor.""" if not self.cursor_: self.cursor_ = self.cnxn.cursor() return self.cursor_ def load(self, inputBlob: func.InputStream) -> None: """Load data from a file into an xarray.""" logging.info(f"Attempting to load {inputBlob.name}...") try: self.xr = xarray.open_dataset(io.BytesIO(inputBlob.read())) logging.info( f"Loaded NetCDF data into array with dimensions: {self.xr.dims}." ) except ValueError as exc: logging.error(f"Could not load NetCDF data from {inputBlob.name}!") logging.error(exc) def update_geometries(self) -> None: """Update the table of geometries, creating it if necessary.""" # Ensure that geometry table exists logging.info( f"Ensuring that geometries table '{self.tables["geom"]}' exists..." ) self.cursor.execute( f""" CREATE TABLE IF NOT EXISTS {self.tables['geom']} ( cell_id SERIAL PRIMARY KEY, centroid_x int4, centroid_y int4, geom_6931 geometry, geom_4326 geometry, UNIQUE (centroid_x, centroid_y) ); """ ) self.cnxn.commit() logging.info(f"Ensured that geometries table '{self.tables["geom"]}' exists.") # Calculate the size of the grid cells logging.info("Identifying cell geometries from input data...") centroids_x_km, centroids_y_km = self.xr.xc.values, self.xr.yc.values x_delta_m = 1000 * int(0.5 * mean_step_size(centroids_x_km)) y_delta_m = 1000 * int(0.5 * mean_step_size(centroids_y_km)) # Construct list of geometry records records = [] for centroid_x_km in centroids_x_km: centroid_x_m = int(1000 * centroid_x_km) for centroid_y_km in centroids_y_km: centroid_y_m = int(1000 * centroid_y_km) x_min_m, x_max_m = centroid_x_m - x_delta_m, centroid_x_m + x_delta_m y_min_m, y_max_m = centroid_y_m - y_delta_m, centroid_y_m + y_delta_m geometry = Polygon( [ [x_min_m, y_max_m], [x_max_m, y_max_m], [x_max_m, y_min_m], [x_min_m, y_min_m], [x_min_m, y_max_m], ] ) records.append((centroid_x_m, centroid_y_m, geometry.wkt, geometry.wkt)) logging.info(f"Identified {len(records)} cell geometries.") # Insert geometries into the database logging.info( f"Ensuring that '{self.tables["geom"]}' contains all {len(records)} geometries..." ) n_batches = int(math.ceil(len(records) / self.batch_size)) start_time = time.monotonic() for idx, record_batch in enumerate(batches(records, self.batch_size), start=1): logging.info( f"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} geometries..." ) for record in record_batch: self.cursor.execute( f""" INSERT INTO {self.tables['geom']} (cell_id, centroid_x, centroid_y, geom_6931, geom_4326) VALUES(DEFAULT, %s, %s, ST_GeomFromText(%s, 6931), ST_Transform(ST_GeomFromText(%s, 6931), 4326)) ON CONFLICT DO NOTHING; """, record, ) self.cnxn.commit() remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1) logging.info( f"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} geometries. Time remaining {human_readable(remaining_time)}." ) logging.info(f"Ensured that '{self.tables["geom"]}' contains all geometries.") def update_predictions(self) -> None: """Update the table of predictions, creating it if necessary""" # Ensure that prediction table exists logging.info( f"Ensuring that predictions table '{self.tables["predictions"]}' exists..." ) self.cursor.execute( f""" CREATE TABLE IF NOT EXISTS {self.tables['predictions']} ( prediction_id SERIAL PRIMARY KEY, date date, leadtime int4, cell_id int4, mean float4, stddev float4, UNIQUE (date, leadtime, cell_id), CONSTRAINT fk_cell_id FOREIGN KEY(cell_id) REFERENCES {self.tables['geom']}(cell_id) ); """ ) self.cnxn.commit() logging.info( f"Ensured that predictions table '{self.tables["predictions"]}' exists." ) # Construct a list of values logging.info("Loading predictions from input data...") df_predictions = ( self.xr.where(self.xr["mean"] > 0).to_dataframe().dropna().reset_index() ) df_predictions["xc_m"] = pd.to_numeric( 1000 * df_predictions["xc"], downcast="integer" ) df_predictions["yc_m"] = pd.to_numeric( 1000 * df_predictions["yc"], downcast="integer" ) logging.info(f"Loaded {df_predictions.shape[0]} predictions from input data.") # Get cell IDs by loading existing cells and merging onto list of predictions logging.info("Identifying cell IDs for all predictions...") df_cells = pd.io.sql.read_sql_query( f"SELECT cell_id, centroid_x, centroid_y FROM {self.tables["geom"]};", self.cnxn, ) df_merged = pd.merge( df_predictions, df_cells, how="left", left_on=["xc_m", "yc_m"], right_on=["centroid_x", "centroid_y"], ) logging.info(f"Identified cell IDs for {df_merged.shape[0]} predictions.") # Insert predictions into the database logging.info( f"Ensuring that table '{self.tables["predictions"]}' contains all {df_merged.shape[0]} predictions..." ) n_batches = int(math.ceil(df_merged.shape[0] / self.batch_size)) start_time = time.monotonic() for idx, record_batch in enumerate( batches(df_merged, self.batch_size), start=1 ): logging.info( f"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} predictions..." ) for record in record_batch: self.cursor.execute( f""" INSERT INTO {self.tables['predictions']} (prediction_id, date, leadtime, cell_id, mean, stddev) VALUES( DEFAULT, %s, %s, %s, %s, %s ) ON CONFLICT DO NOTHING; """, [ record.time.date(), record.leadtime, record.cell_id, record.mean, record.stddev, ], ) self.cnxn.commit() remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1) logging.info( f"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} predictions. Time remaining {human_readable(remaining_time)}." ) logging.info( f"Ensured that table '{self.tables["predictions"]}' contains all {df_merged.shape[0]} predictions." ) def update_latest_prediction(self) -> None: """Update the 'latest prediction' view, creating it if necessary""" # Ensure that view table exists logging.info(f"Updating materialised view '{self.tables["latest"]}'...") self.cursor.execute( f""" DROP MATERIALIZED VIEW {self.tables['latest']}; CREATE MATERIALIZED VIEW {self.tables['latest']} AS SELECT row_number() OVER (PARTITION BY true) as prediction_latest_id, {self.tables['predictions']}.date, {self.tables['predictions']}.leadtime, {self.tables['predictions']}.mean, {self.tables['predictions']}.stddev, {self.tables['geom']}.cell_id, {self.tables['geom']}.centroid_x, {self.tables['geom']}.centroid_y, {self.tables['geom']}.geom_6931, {self.tables['geom']}.geom_4326 FROM {self.tables['predictions']} FULL OUTER JOIN cell ON {self.tables['predictions']}.cell_id = {self.tables['geom']}.cell_id WHERE date = (SELECT max(date) FROM {self.tables['predictions']}) GROUP BY {self.tables['geom']}.cell_id, date, leadtime, centroid_x, centroid_y, mean, stddev, geom_6931, geom_4326; """ ) self.cnxn.commit() logging.info(f"Updated materialised view '{self.tables["latest"]}'.")
# Standard library import io import logging import math import os import time # Third party import azure.functions as func import pandas as pd import psycopg2 from shapely.geometry import Polygon import xarray # Local from .utils import batches, human_readable, mean_step_size class Processor: def __init__(self, batch_size): """Constructor.""" self.batch_size = batch_size self.cnxn_ = None self.cursor_ = None self.tables = { "geom": "cell", "predictions": "prediction", "latest": "prediction_latest", } self.xr = None def __del__(self): """Destructor.""" if self.cnxn_: self.cnxn_.close() @property def cnxn(self): """Connect to the database or return an existing connection.""" if not self.cnxn_: try: db_host = os.getenv("PSQL_HOST") db_name = os.getenv("PSQL_DB") db_user = os.getenv("PSQL_USER") db_pwd = os.getenv("PSQL_PWD") self.cnxn_ = psycopg2.connect( dbname=db_name, port="5432", user=f"{db_user}@{db_host}", password=db_pwd, host=db_host, ) logging.info(f"Connected to database {db_name} on {db_host}.") except psycopg2.OperationalError: logging.error(f"Failed to connect to database {db_name} on {db_host}!") raise return self.cnxn_ @property def cursor(self): """Construct a database cursor or return an existing cursor.""" if not self.cursor_: self.cursor_ = self.cnxn.cursor() return self.cursor_ def load(self, inputBlob: func.InputStream) -> None: """Load data from a file into an xarray.""" logging.info(f"Attempting to load {inputBlob.name}...") try: self.xr = xarray.open_dataset(io.BytesIO(inputBlob.read())) logging.info( f"Loaded NetCDF data into array with dimensions: {self.xr.dims}." ) except ValueError as exc: logging.error(f"Could not load NetCDF data from {inputBlob.name}!") logging.error(exc) def update_geometries(self) -> None: """Update the table of geometries, creating it if necessary.""" # Ensure that geometry table exists logging.info( f"Ensuring that geometries table '{self.tables['geom']}' exists..." ) self.cursor.execute( f""" CREATE TABLE IF NOT EXISTS {self.tables['geom']} ( cell_id SERIAL PRIMARY KEY, centroid_x int4, centroid_y int4, geom_6931 geometry, geom_4326 geometry, UNIQUE (centroid_x, centroid_y) ); """ ) self.cnxn.commit() logging.info(f"Ensured that geometries table '{self.tables['geom']}' exists.") # Calculate the size of the grid cells logging.info("Identifying cell geometries from input data...") centroids_x_km, centroids_y_km = self.xr.xc.values, self.xr.yc.values x_delta_m = 1000 * int(0.5 * mean_step_size(centroids_x_km)) y_delta_m = 1000 * int(0.5 * mean_step_size(centroids_y_km)) # Construct list of geometry records records = [] for centroid_x_km in centroids_x_km: centroid_x_m = int(1000 * centroid_x_km) for centroid_y_km in centroids_y_km: centroid_y_m = int(1000 * centroid_y_km) x_min_m, x_max_m = centroid_x_m - x_delta_m, centroid_x_m + x_delta_m y_min_m, y_max_m = centroid_y_m - y_delta_m, centroid_y_m + y_delta_m geometry = Polygon( [ [x_min_m, y_max_m], [x_max_m, y_max_m], [x_max_m, y_min_m], [x_min_m, y_min_m], [x_min_m, y_max_m], ] ) records.append((centroid_x_m, centroid_y_m, geometry.wkt, geometry.wkt)) logging.info(f"Identified {len(records)} cell geometries.") # Insert geometries into the database logging.info( f"Ensuring that '{self.tables['geom']}' contains all {len(records)} geometries..." ) n_batches = int(math.ceil(len(records) / self.batch_size)) start_time = time.monotonic() for idx, record_batch in enumerate(batches(records, self.batch_size), start=1): logging.info( f"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} geometries..." ) for record in record_batch: self.cursor.execute( f""" INSERT INTO {self.tables['geom']} (cell_id, centroid_x, centroid_y, geom_6931, geom_4326) VALUES(DEFAULT, %s, %s, ST_GeomFromText(%s, 6931), ST_Transform(ST_GeomFromText(%s, 6931), 4326)) ON CONFLICT DO NOTHING; """, record, ) self.cnxn.commit() remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1) logging.info( f"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} geometries. Time remaining {human_readable(remaining_time)}." ) logging.info(f"Ensured that '{self.tables['geom']}' contains all geometries.") def update_predictions(self) -> None: """Update the table of predictions, creating it if necessary""" # Ensure that prediction table exists logging.info( f"Ensuring that predictions table '{self.tables['predictions']}' exists..." ) self.cursor.execute( f""" CREATE TABLE IF NOT EXISTS {self.tables['predictions']} ( prediction_id SERIAL PRIMARY KEY, date date, leadtime int4, cell_id int4, mean float4, stddev float4, UNIQUE (date, leadtime, cell_id), CONSTRAINT fk_cell_id FOREIGN KEY(cell_id) REFERENCES {self.tables['geom']}(cell_id) ); """ ) self.cnxn.commit() logging.info( f"Ensured that predictions table '{self.tables['predictions']}' exists." ) # Construct a list of values logging.info("Loading predictions from input data...") df_predictions = ( self.xr.where(self.xr["mean"] > 0).to_dataframe().dropna().reset_index() ) df_predictions["xc_m"] = pd.to_numeric( 1000 * df_predictions["xc"], downcast="integer" ) df_predictions["yc_m"] = pd.to_numeric( 1000 * df_predictions["yc"], downcast="integer" ) logging.info(f"Loaded {df_predictions.shape[0]} predictions from input data.") # Get cell IDs by loading existing cells and merging onto list of predictions logging.info("Identifying cell IDs for all predictions...") df_cells = pd.io.sql.read_sql_query( f"SELECT cell_id, centroid_x, centroid_y FROM {self.tables['geom']};", self.cnxn, ) df_merged = pd.merge( df_predictions, df_cells, how="left", left_on=["xc_m", "yc_m"], right_on=["centroid_x", "centroid_y"], ) logging.info(f"Identified cell IDs for {df_merged.shape[0]} predictions.") # Insert predictions into the database logging.info( f"Ensuring that table '{self.tables['predictions']}' contains all {df_merged.shape[0]} predictions..." ) n_batches = int(math.ceil(df_merged.shape[0] / self.batch_size)) start_time = time.monotonic() for idx, record_batch in enumerate( batches(df_merged, self.batch_size), start=1 ): logging.info( f"Batch {idx}/{n_batches}. Preparing to insert/update {len(record_batch)} predictions..." ) for record in record_batch: self.cursor.execute( f""" INSERT INTO {self.tables['predictions']} (prediction_id, date, leadtime, cell_id, mean, stddev) VALUES( DEFAULT, %s, %s, %s, %s, %s ) ON CONFLICT DO NOTHING; """, [ record.time.date(), record.leadtime, record.cell_id, record.mean, record.stddev, ], ) self.cnxn.commit() remaining_time = (time.monotonic() - start_time) * (n_batches / idx - 1) logging.info( f"Batch {idx}/{n_batches}. Inserted/updated {len(record_batch)} predictions. Time remaining {human_readable(remaining_time)}." ) logging.info( f"Ensured that table '{self.tables['predictions']}' contains all {df_merged.shape[0]} predictions." ) def update_latest_prediction(self) -> None: """Update the 'latest prediction' view, creating it if necessary""" # Ensure that view table exists logging.info(f"Updating materialised view '{self.tables['latest']}'...") self.cursor.execute( f""" DROP MATERIALIZED VIEW {self.tables['latest']}; CREATE MATERIALIZED VIEW {self.tables['latest']} AS SELECT row_number() OVER (PARTITION BY true) as prediction_latest_id, {self.tables['predictions']}.date, {self.tables['predictions']}.leadtime, {self.tables['predictions']}.mean, {self.tables['predictions']}.stddev, {self.tables['geom']}.cell_id, {self.tables['geom']}.centroid_x, {self.tables['geom']}.centroid_y, {self.tables['geom']}.geom_6931, {self.tables['geom']}.geom_4326 FROM {self.tables['predictions']} FULL OUTER JOIN cell ON {self.tables['predictions']}.cell_id = {self.tables['geom']}.cell_id WHERE date = (SELECT max(date) FROM {self.tables['predictions']}) GROUP BY {self.tables['geom']}.cell_id, date, leadtime, centroid_x, centroid_y, mean, stddev, geom_6931, geom_4326; """ ) self.cnxn.commit() logging.info(f"Updated materialised view '{self.tables['latest']}'.")
"""Partial derivatives for the torch.nn.RNN layer.""" from typing import List, Tuple from torch import Tensor, cat, einsum, zeros from torch.nn import RNN from backpack.core.derivatives.basederivatives import BaseParameterDerivatives from backpack.utils.subsampling import subsample class RNNDerivatives(BaseParameterDerivatives): """Partial derivatives for the torch.nn.RNN layer. a_t = W_ih x_t + b_ih + W_hh h_{t-1} + b_hh h_t = tanh(a_t) We assume that it is always batch axis first. Index conventions: ------------------ * t: Sequence dimension * v: Free dimension * n: Batch dimension * h: Output dimension * i: Input dimension """ @staticmethod def _check_parameters(module: RNN) -> None: """Check the parameters of module. Args: module: module which to check Raises: NotImplementedError: If any parameter of module does not match expectation """ if not module.batch_first: raise NotImplementedError("Batch axis must be first.") if module.num_layers > 1: raise NotImplementedError("only num_layers = 1 is supported") if not module.nonlinearity == "tanh": raise NotImplementedError("only nonlinearity = tanh is supported") if module.bias is not True: raise NotImplementedError("only bias = True is supported") if not module.dropout == 0: raise NotImplementedError("only dropout = 0 is supported") if module.bidirectional is not False: raise NotImplementedError("only bidirectional = False is supported") def hessian_is_zero(self, module: RNN) -> bool: # noqa: D102 return False @classmethod def _a_jac_t_mat_prod( cls, module: RNN, weight_hh_l0: Tensor, mat: Tensor, subsampling: List[int] = None, ) -> Tensor: """Calculates jacobian vector product wrt a. Args: module: RNN module weight_hh_l0: weight matrix hidden-to-hidden mat: matrix to multiply subsampling: subsampling Returns: jacobian vector product wrt a """ V, N, T, H = mat.shape output = subsample(module.output, dim=0, subsampling=subsampling) a_jac_t_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype) for t in reversed(range(T)): if t == (T - 1): a_jac_t_mat_prod[:, :, t] = einsum( "vnh,nh->vnh", mat[:, :, t], 1 - output[:, t] ** 2 ) else: a_jac_t_mat_prod[:, :, t] = einsum( "vnh,nh->vnh", mat[:, :, t] + einsum( "vng,gh->vnh", a_jac_t_mat_prod[:, :, t + 1], weight_hh_l0, ), 1 - output[:, t] ** 2, ) return a_jac_t_mat_prod def _jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, subsampling: List[int] = None, ) -> Tensor: self._check_parameters(module) return einsum( f"vnth,hk->v{"nt" if module.batch_first else "tn"}k", self._a_jac_t_mat_prod( module, module.weight_hh_l0, mat, subsampling, ), module.weight_ih_l0, ) def _jac_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor ) -> Tensor: self._check_parameters(module) H: int = module.hidden_size V, N, T, _ = mat.shape _jac_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype) for t in range(T): if t == 0: _jac_mat_prod[:, :, t] = einsum( "nh,hi,vni->vnh", 1 - module.output[:, t] ** 2, module.weight_ih_l0, mat[:, :, t], ) else: _jac_mat_prod[:, :, t] = einsum( "nh,vnh->vnh", 1 - module.output[:, t] ** 2, einsum( "hi,vni->vnh", module.weight_ih_l0, mat[:, :, t], ) + einsum( "hk,vnk->vnh", module.weight_hh_l0, _jac_mat_prod[:, :, t - 1], ), ) return _jac_mat_prod def _bias_ih_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. bias_ih_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) if sum_batch: dim: List[int] = [1, 2] else: dim: int = 2 return self._a_jac_t_mat_prod( module, module.weight_hh_l0, mat, subsampling, ).sum(dim=dim) def _bias_hh_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. bias_hh_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ return self._bias_ih_l0_jac_t_mat_prod( module, g_inp, g_out, mat, sum_batch=sum_batch, subsampling=subsampling ) def _weight_ih_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. weight_ih_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) return einsum( f"vnth,ntj->v{"" if sum_batch else "n"}hj", self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling), subsample(module.input0, dim=0, subsampling=subsampling), ) def _weight_hh_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. weight_hh_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) _, N, _, H = mat.shape output = subsample(module.output, dim=0, subsampling=subsampling) single_step = zeros(N, 1, H, device=mat.device, dtype=mat.dtype) output_shifted = cat([single_step, output[:, :-1]], dim=1) return einsum( f"vnth,ntk->v{"" if sum_batch else "n"}hk", self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling), output_shifted, )
"""Partial derivatives for the torch.nn.RNN layer.""" from typing import List, Tuple from torch import Tensor, cat, einsum, zeros from torch.nn import RNN from backpack.core.derivatives.basederivatives import BaseParameterDerivatives from backpack.utils.subsampling import subsample class RNNDerivatives(BaseParameterDerivatives): """Partial derivatives for the torch.nn.RNN layer. a_t = W_ih x_t + b_ih + W_hh h_{t-1} + b_hh h_t = tanh(a_t) We assume that it is always batch axis first. Index conventions: ------------------ * t: Sequence dimension * v: Free dimension * n: Batch dimension * h: Output dimension * i: Input dimension """ @staticmethod def _check_parameters(module: RNN) -> None: """Check the parameters of module. Args: module: module which to check Raises: NotImplementedError: If any parameter of module does not match expectation """ if not module.batch_first: raise NotImplementedError("Batch axis must be first.") if module.num_layers > 1: raise NotImplementedError("only num_layers = 1 is supported") if not module.nonlinearity == "tanh": raise NotImplementedError("only nonlinearity = tanh is supported") if module.bias is not True: raise NotImplementedError("only bias = True is supported") if not module.dropout == 0: raise NotImplementedError("only dropout = 0 is supported") if module.bidirectional is not False: raise NotImplementedError("only bidirectional = False is supported") def hessian_is_zero(self, module: RNN) -> bool: # noqa: D102 return False @classmethod def _a_jac_t_mat_prod( cls, module: RNN, weight_hh_l0: Tensor, mat: Tensor, subsampling: List[int] = None, ) -> Tensor: """Calculates jacobian vector product wrt a. Args: module: RNN module weight_hh_l0: weight matrix hidden-to-hidden mat: matrix to multiply subsampling: subsampling Returns: jacobian vector product wrt a """ V, N, T, H = mat.shape output = subsample(module.output, dim=0, subsampling=subsampling) a_jac_t_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype) for t in reversed(range(T)): if t == (T - 1): a_jac_t_mat_prod[:, :, t] = einsum( "vnh,nh->vnh", mat[:, :, t], 1 - output[:, t] ** 2 ) else: a_jac_t_mat_prod[:, :, t] = einsum( "vnh,nh->vnh", mat[:, :, t] + einsum( "vng,gh->vnh", a_jac_t_mat_prod[:, :, t + 1], weight_hh_l0, ), 1 - output[:, t] ** 2, ) return a_jac_t_mat_prod def _jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, subsampling: List[int] = None, ) -> Tensor: self._check_parameters(module) return einsum( f"vnth,hk->v{'nt' if module.batch_first else 'tn'}k", self._a_jac_t_mat_prod( module, module.weight_hh_l0, mat, subsampling, ), module.weight_ih_l0, ) def _jac_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor ) -> Tensor: self._check_parameters(module) H: int = module.hidden_size V, N, T, _ = mat.shape _jac_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype) for t in range(T): if t == 0: _jac_mat_prod[:, :, t] = einsum( "nh,hi,vni->vnh", 1 - module.output[:, t] ** 2, module.weight_ih_l0, mat[:, :, t], ) else: _jac_mat_prod[:, :, t] = einsum( "nh,vnh->vnh", 1 - module.output[:, t] ** 2, einsum( "hi,vni->vnh", module.weight_ih_l0, mat[:, :, t], ) + einsum( "hk,vnk->vnh", module.weight_hh_l0, _jac_mat_prod[:, :, t - 1], ), ) return _jac_mat_prod def _bias_ih_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. bias_ih_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) if sum_batch: dim: List[int] = [1, 2] else: dim: int = 2 return self._a_jac_t_mat_prod( module, module.weight_hh_l0, mat, subsampling, ).sum(dim=dim) def _bias_hh_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. bias_hh_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ return self._bias_ih_l0_jac_t_mat_prod( module, g_inp, g_out, mat, sum_batch=sum_batch, subsampling=subsampling ) def _weight_ih_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. weight_ih_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) return einsum( f"vnth,ntj->v{'' if sum_batch else 'n'}hj", self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling), subsample(module.input0, dim=0, subsampling=subsampling), ) def _weight_hh_l0_jac_t_mat_prod( self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor, sum_batch: bool = True, subsampling: List[int] = None, ) -> Tensor: """Apply transposed Jacobian of the output w.r.t. weight_hh_l0. Args: module: extended module g_inp: input gradient g_out: output gradient mat: matrix to multiply sum_batch: Whether to sum along batch axis. Defaults to True. subsampling: Indices of active samples. Defaults to ``None`` (all samples). Returns: product """ self._check_parameters(module) _, N, _, H = mat.shape output = subsample(module.output, dim=0, subsampling=subsampling) single_step = zeros(N, 1, H, device=mat.device, dtype=mat.dtype) output_shifted = cat([single_step, output[:, :-1]], dim=1) return einsum( f"vnth,ntk->v{'' if sum_batch else 'n'}hk", self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling), output_shifted, )
# https://docs.scrapy.org/en/latest/topics/settings.html import logging from pathlib import Path from datetime import datetime BOT_NAME = "ebk" SPIDER_MODULES = ["ebk.spiders"] NEWSPIDER_MODULE = "ebk.spiders" # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" # USER_AGENT = "scrapy ebk by mo" # USER_AGENT = "not the Goolgebot" USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" ROBOTSTXT_OBEY = True DOWNLOAD_DELAY = 0.1 # The download delay setting will honor only one of: # CONCURRENT_REQUESTS_PER_DOMAIN = 16 # CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) COOKIES_ENABLED = True # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # SPIDER_MIDDLEWARES = { # 'ebk.middlewares.EbkSpiderMiddleware': 543, # } # DOWNLOADER_MIDDLEWARES = { # "scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": None, # "ebk.middlewares.RotatingUserAgentsMiddleware": 500, # } # ROTATING_USER_AGENTS = Path(__file__).parent / "useragents.json" # ROTATING_USER_AGENTS_SHUFFLE = False # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } ITEM_PIPELINES = { "ebk.pipelines.DatabaseWriterPipe": 400, } database_path = ( Path(__file__).parent.parent.parent / "data" / f"ebk_data__{datetime.now().strftime("%Y_%m_%d")}.db" ) DATABASE_URL = f"sqlite:///{database_path}" DATABASE_COMMIT_DELTA = 1000 CRAWLING_META_PATH = Path(__file__).parent / "log" / "crawling_statistics.csv" LOGSTATS_INTERVAL = 10 # these settings only affect the output to stdout, we keep them enabled so we can # use commadn line flags etc (This woulndt be possible if we set a streamlogger manually) LOG_ENABLED = True # keep this enabled to still have the "default" stdout ouput LOG_LEVEL = logging.INFO # this is the actual formatting string which is used in the logging.Formatter of scrapys logging handler LOG_FORMAT = "%(asctime)s [%(name)s] %(levelname)s: %(message)s" # this only defines how the different messages type (crawl, item found, ...) look like and which level they have LOG_FORMATTER = "ebk.logging.CustomLogFormatter" # for other handlers than the one to stdout simply add the handlers to the root # logger. It seems like all scrapy loggers use the handlers from the root logger. # The settings here do not affect the general logging settings at all. # configure_logging(install_root_handler=False) root_logger = logging.getLogger() # root_logger.setLevel(logging.DEBUG) rotating_handler = logging.handlers.TimedRotatingFileHandler( Path(__file__).parent / "log" / "EbkScraperLog", when="midnight", backupCount=30, ) rotating_handler.setLevel(logging.INFO) rotating_handler.setFormatter(logging.Formatter(LOG_FORMAT)) # use the same log_format root_logger.addHandler(rotating_handler)
# https://docs.scrapy.org/en/latest/topics/settings.html import logging from pathlib import Path from datetime import datetime BOT_NAME = "ebk" SPIDER_MODULES = ["ebk.spiders"] NEWSPIDER_MODULE = "ebk.spiders" # Crawl responsibly by identifying yourself (and your website) on the user-agent # USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36" # USER_AGENT = "scrapy ebk by mo" # USER_AGENT = "not the Goolgebot" USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" ROBOTSTXT_OBEY = True DOWNLOAD_DELAY = 0.1 # The download delay setting will honor only one of: # CONCURRENT_REQUESTS_PER_DOMAIN = 16 # CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) COOKIES_ENABLED = True # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # SPIDER_MIDDLEWARES = { # 'ebk.middlewares.EbkSpiderMiddleware': 543, # } # DOWNLOADER_MIDDLEWARES = { # "scrapy.downloadermiddlewares.useragent.UserAgentMiddleware": None, # "ebk.middlewares.RotatingUserAgentsMiddleware": 500, # } # ROTATING_USER_AGENTS = Path(__file__).parent / "useragents.json" # ROTATING_USER_AGENTS_SHUFFLE = False # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } ITEM_PIPELINES = { "ebk.pipelines.DatabaseWriterPipe": 400, } database_path = ( Path(__file__).parent.parent.parent / "data" / f"ebk_data__{datetime.now().strftime('%Y_%m_%d')}.db" ) DATABASE_URL = f"sqlite:///{database_path}" DATABASE_COMMIT_DELTA = 1000 CRAWLING_META_PATH = Path(__file__).parent / "log" / "crawling_statistics.csv" LOGSTATS_INTERVAL = 10 # these settings only affect the output to stdout, we keep them enabled so we can # use commadn line flags etc (This woulndt be possible if we set a streamlogger manually) LOG_ENABLED = True # keep this enabled to still have the "default" stdout ouput LOG_LEVEL = logging.INFO # this is the actual formatting string which is used in the logging.Formatter of scrapys logging handler LOG_FORMAT = "%(asctime)s [%(name)s] %(levelname)s: %(message)s" # this only defines how the different messages type (crawl, item found, ...) look like and which level they have LOG_FORMATTER = "ebk.logging.CustomLogFormatter" # for other handlers than the one to stdout simply add the handlers to the root # logger. It seems like all scrapy loggers use the handlers from the root logger. # The settings here do not affect the general logging settings at all. # configure_logging(install_root_handler=False) root_logger = logging.getLogger() # root_logger.setLevel(logging.DEBUG) rotating_handler = logging.handlers.TimedRotatingFileHandler( Path(__file__).parent / "log" / "EbkScraperLog", when="midnight", backupCount=30, ) rotating_handler.setLevel(logging.INFO) rotating_handler.setFormatter(logging.Formatter(LOG_FORMAT)) # use the same log_format root_logger.addHandler(rotating_handler)
import random, pickle, pyglet from tkinter import * from PIL import Image, ImageTk # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||| ||||| # # ||| FUNCTIONS ||||| # # ||| ||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # class Timer: def __init__(self, label): self.label = label self.seconds = 0 self.isRunning = False def counter_label(self): def count(): if self.isRunning: self.label['text'] = self.seconds self.label.after(1000, count) self.seconds += 1 count() def start(self): self.isRunning = True self.counter_label() def reset(self): self.isRunning = False self.seconds = 0 def menuMaker(): mnu = Menu(root) root.config(menu = mnu) file_menu = Menu(mnu, tearoff = False) mnu.add_cascade(label = "File", menu = file_menu) file_menu.add_command(label = "New Game (F2)", command = restart) file_menu.add_separator() file_menu.add_command(label = "Options (F5)", command = firstScreen) file_menu.add_separator() file_menu.add_command(label = "Exit", command = root.quit) def minesAdder(x_dim, y_dim, count): global main_list, mines for i in range(count): while True: x_geese = random.randint(0, (x_dim - 1)) y_geese = random.randint(0, y_dim - 1) if (x_geese, y_geese) not in mines: break main_list[x_geese][y_geese] = 9 mines.append((x_geese, y_geese)) def show(x_coord, y_coord, event): global main_list, printable_list, revealPerTurn, revealed, timer if not timer.isRunning: timer.start() if ((x_coord, y_coord) not in marked) and ((x_coord, y_coord) not in revealed): buttonUpdater(event.widget, main_list[x_coord][y_coord]) revealPerTurn.append((x_coord, y_coord)) if main_list[x_coord][y_coord] == 0: zeroReveal(x_coord, y_coord) revealPerTurn = [(x, y) for x, y in revealPerTurn if (x, y) not in marked] for x, y in revealPerTurn: normalReveal(x, y) revealed.extend(revealPerTurn) revealed = list(set(revealed)) revealPerTurn = [] markAll() winCondition() def normalReveal(x_coord, y_coord): global main_list, printable_list, revealPerTurn buttonUpdater(tiles[x_coord][y_coord], main_list[x_coord][y_coord]) def zeroReveal(x_coord, y_coord): global zeroes, revealPerTurn neighbors = neighborCalc(x_coord, y_coord) for x, y in neighbors: if (x, y) not in zeroes: revealPerTurn.append((x, y)) if main_list[x][y] == 0: zeroes.append((x, y)) zeroReveal(x, y) def mark(x, y, event): global printable_list, marked if ((x, y) not in marked) and ((x, y) not in revealed): buttonUpdater(event.widget, -1) marked.append((x, y)) markAll() elif ((x, y) in marked) and ((x, y) not in revealed): buttonUpdater(event.widget, -2) marked.remove((x, y)) printable_list[x][y] = '*' mine_label["text"] = count - len(marked) winCondition() def markAll(): global marked, printable_list for x, y in marked: printable_list[x][y] = 'M' def tileList(): global tiles tiles = [x for x in game.winfo_children()] tiles = list(tiles[i:i + y_dim] for i in range(0, len(tiles), y_dim)) def restart(): global root root.destroy() main() def neighborCalc(x_coord, y_coord): global x_dim, y_dim, main_list if (x_coord, y_coord) == (0, 0): return [(0, 1), (1, 0), (1, 1)] elif (x_coord, y_coord) == (x_dim - 1, y_dim - 1): return [(x_dim - 1, y_dim - 2), (x_dim - 2, y_dim - 1), (x_dim - 2, y_dim - 2)] elif (x_coord, y_coord) == (0, y_dim - 1): return [(0, y_dim - 2), (1, y_dim - 1), (1, y_dim - 2)] elif (x_coord, y_coord) == (x_dim - 1, 0): return [(x_dim - 1, 1), (x_dim - 2, 0), (x_dim - 2, 1)] elif x_coord == 0: return [(0, y_coord - 1), (0, y_coord + 1), (1, y_coord - 1), (1, y_coord + 1), (1, y_coord)] elif x_coord == (x_dim - 1): return ([(x_dim - 1, y_coord - 1), (x_dim - 1, y_coord + 1), (x_dim - 2, y_coord - 1), (x_dim - 2, y_coord + 1), (x_dim - 2, y_coord)]) elif y_coord == 0: return [(x_coord - 1, 0), (x_coord + 1, 0), (x_coord - 1, 1), (x_coord + 1, 1), (x_coord, 1)] elif y_coord == (y_dim - 1): return ([(x_coord, y_dim - 2), (x_coord - 1, y_dim - 2), (x_coord + 1, y_dim - 2), (x_coord - 1, y_dim - 1), (x_coord + 1, y_dim - 1)]) else: return [(x, y) for x in range(x_coord - 1, x_coord + 2) for y in range(y_coord - 1, y_coord + 2)] def countAdd(): global main_list, printable_list, mines for i, j in mines: neighbors = neighborCalc(i, j) for x, y in neighbors: main_list[x][y] += 1 for i, j in mines: main_list[i][j] = 9 def isGameWon(): for i in mines: if i in revealed: return 0 if count + len(revealed) == x_dim * y_dim: return 2 return 1 def winCondition(): global timer winVar = isGameWon() if winVar in (0, 2): timer.reset() window = Toplevel() window.grab_set() window.configure(bg = '#515151') window.geometry("330x250") window.iconbitmap('assets/Sprites/logo.ico') stats = Frame(window, bg = '#515151') stats.grid(row = 0, column = 0, columnspan = 2, padx = 20, pady = 20) tot, win, perc = statChanger(winVar) Label(stats, text = f"Games Played: {tot}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 1, column = 0, padx = 5, pady = 5) Label(stats, text = f"Games Won: {win}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 2, column = 0, padx = 5, pady = 5) Label(stats, text = f"Percentage: {perc}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 1, column = 1, padx = 5, pady = 5) exitBtn = Label(window, text = " Exit ", font = ('Calibri 26 bold'), borderwidth = 3, relief = 'raised', bg = '#A10000', fg = '#fff') exitBtn.grid(row = 1, column = 0, padx = 7, pady = 7) exitBtn.bind('<Button-1>', lambda x: root.destroy()) restBtn = Label(window, text = "Play Again", font = ('Calibri 26 bold'), borderwidth = 3, relief = 'raised', bg = '#A10000', fg = '#fff') restBtn.grid(row = 1, column = 1, padx = 7, pady = 7) restBtn.bind('<Button-1>', lambda x: restart()) if winVar == 2: window.title("You Win!") Label(stats, text = "Congratulations! You Won the Game!!", bg = '#515151', fg = '#fff', font = ('Calibri 12 bold')).grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) elif winVar == 0: window.title("You Lose!") Label(stats, text = "Sorry you Lost, Better Luck Next Time!", bg = '#515151', fg = '#fff', font = ('Calibri 12 bold')).grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) def buttonFunc(i, j): height, width = (572 // y_dim), (572 // x_dim) image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/unrevealed_tile.png').resize((height, width))) lbl = Label(game, image = image) lbl.image = image lbl.grid(row = i, column = j, padx = 0.5, pady = 0.5) lbl.bind("<Button-1>", lambda x: show(i, j, x)) lbl.bind("<Button-2>", lambda x: mark(i, j, x)) lbl.bind("<Button-3>", lambda x: mark(i, j, x)) def buttonUpdater(lbl, number): if number == 0: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/zero.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 1: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/one.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 2: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/two.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 3: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/three.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 4: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/four.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 5: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/five.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 6: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/six.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 7: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/seven.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 8: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/eight.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 9: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/bomb.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == -1: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/flag.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == -2: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/unrevealed_tile.png').resize(((572 // y_dim), (572 // x_dim)))) lbl['image'] = image lbl.image = image def statChanger(x): with open('assets/data', 'rb') as f: data = pickle.load(f) tot, win, perc = data['Stats']['Total'], data['Stats']['Win'], data['Stats']['Percentage'] if x: win += 1; perc = f"{(win * 100) // tot}%" data['Stats']['Total'], data['Stats']['Win'], data['Stats']['Percentage'] = tot, win, perc with open('assets/data', 'wb') as f: writeObject = pickle.dump(data, f) return tot, win, perc def dimSet(height, width, mines, diff): global x_dim, y_dim, count value = diff.get() toRestart = False if value == 1: x_dim = 9 y_dim = 9 count = 10 toRestart = True elif value == 2: x_dim = 16 y_dim = 16 count = 40 toRestart = True elif value == 3: x_dim = 24 y_dim = 24 count = 99 toRestart = True elif value == 0: x_dim = height.get() y_dim = width.get() count = mines.get() x_dim = int(x_dim) y_dim = int(y_dim) count = int(count) if (x_dim >= 9) and (y_dim >= 9) and (10 <= count <= x_dim * y_dim): toRestart = True else: print('\a') if toRestart: with open('assets/data', 'rb') as f: data = pickle.load(f) data['Dimensions']['xdim'], data['Dimensions']['ydim'], data['Dimensions']['mines'] = x_dim, y_dim, count with open('assets/data', 'wb') as f: pickle.dump(data, f) restart() def firstScreen(): config = Toplevel() config.grab_set() config.iconbitmap('assets/Sprites/logo.ico') difficulty = LabelFrame(config, text = 'Difficulty') difficulty.pack(padx = 20, pady = 20) Button(config, text = "OK", command = lambda: dimSet(height, width, mines, diff)).pack() firstThreeDifficulty = Frame(difficulty) firstThreeDifficulty.grid(column = 0, row = 0, padx = 10) customDifficulty = Frame(difficulty) customDifficulty.grid(column = 1, row = 0, padx = 10) customDifficultyButton = Frame(customDifficulty) customDifficultyButton.grid(row = 0, column = 0, columnspan = 2) diff = IntVar() diff.set(1) bbtn = Radiobutton(firstThreeDifficulty, text = "Beginner \n10 Mines\n9x9 Grid ", variable = diff, value = 1) bbtn.pack(anchor = W) ibtn = Radiobutton(firstThreeDifficulty, text = "Intermediate\n40 Mines\n16x16 Grid", variable = diff, value = 2) ibtn.pack(anchor = W) abtn = Radiobutton(firstThreeDifficulty, text = "Advanced \n99 Mines\n24x24 Grid", variable = diff, value = 3) abtn.pack(anchor = W) customBtn = Radiobutton(customDifficultyButton, text = "Custom", variable = diff, value = 0) customBtn.pack(anchor = W) def validate_entry(text, min, max, widget): if text == "": return True try: value = int(text) except ValueError: # oops, couldn't convert to int print('\a') return False if 0 <= value <= max: return True else: print('\a') widget.delete(0, END) widget.insert(0, max) height.config(validate = "key", validatecommand = hcmd) width.config(validate = "key", validatecommand = wcmd) mines.config(validate = "key", validatecommand = mcmd) return False hcmd = (root.register(lambda x: validate_entry(x, 9, 24, height)), "%P") wcmd = (root.register(lambda x: validate_entry(x, 9, 24, width)), "%P") mcmd = (root.register(lambda x: validate_entry(x, 10, min(int(height.get()) * int(width.get()), 400), mines)), "%P") hlbl = Label(customDifficulty, state = DISABLED, text = "Height (9-24)") hlbl.grid(row = 1, column = 0) height = Entry(customDifficulty, state = DISABLED) height.grid(row = 1, column = 1) wlbl = Label(customDifficulty, state = DISABLED, text = "Width (9-24)") wlbl.grid(row = 2, column = 0) width = Entry(customDifficulty, state = DISABLED) width.grid(row = 2, column = 1) mlbl = Label(customDifficulty, state = DISABLED, text = "Mines (10-400)") mlbl.grid(row = 3, column = 0) mines = Entry(customDifficulty, state = DISABLED) mines.grid(row = 3, column = 1) def enabler(event): hlbl['state'] = NORMAL height['state'] = NORMAL height.config(validate = "key", validatecommand = hcmd) wlbl['state'] = NORMAL width['state'] = NORMAL width.config(validate = "key", validatecommand = wcmd) mlbl['state'] = NORMAL mines['state'] = NORMAL mines.config(validate = "key", validatecommand = mcmd) def disabler(event): hlbl['state'] = DISABLED height['state'] = DISABLED wlbl['state'] = DISABLED width['state'] = DISABLED mlbl['state'] = DISABLED mines['state'] = DISABLED bbtn.bind("<Button-1>", lambda x: disabler(x)) ibtn.bind("<Button-1>", lambda x: disabler(x)) abtn.bind("<Button-1>", lambda x: disabler(x)) customBtn.bind("<Button-1>", lambda x: enabler(x)) # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # def main(): global x_dim, y_dim, count, F, root, main_list, printable_list, mines, zeroes, revealPerTurn, marked, revealed, game, L, x_var, y_var, tiles, timer, timer_label, stuff, mine_label try: with open('assets/data', 'rb') as f: data = pickle.load(f) x_dim, y_dim, count = data['Dimensions']['xdim'], data['Dimensions']['ydim'], data['Dimensions']['mines'] root = Tk() root.title("Minesweeper") root.minsize(684, 700) root.configure(bg = '#a1a1a1') root.iconbitmap('assets/Sprites/logo.ico') main_list = [[0 for y in range(y_dim)] for x in range(x_dim)] printable_list = [["*" for y in range(y_dim)] for x in range(x_dim)] mines, zeroes, revealPerTurn, marked, revealed = [], [], [], [], [] game = Frame(root, bg = '#000', height = 572, width = 572) game.pack(pady = 5) stuff = Frame(root, bg = '#a1a1a1', width = 572) stuff.pack() pyglet.font.add_file("assets/Font/DS-DIGI.TTF") timer_label = Label(stuff, text = "0", fg = "white", bg = "#515151", font = "DS-Digital 40", width = 5) timer_label.grid(row = 0, column = 0, padx = 30) timer = Timer(timer_label) mine_label = Label(stuff, text = count, fg = "white", bg = "#515151", font = "DS-Digital 40", width = 5) mine_label.grid(row = 0, column = 1, padx = 30) with open('assets/data', 'rb') as f: data = pickle.load(f) data['Stats']['Total'] += 1 data['Stats']['Percentage'] = f"{(data["Stats"]["Win"] * 100) // data["Stats"]["Total"]}%" with open('assets/data', 'wb') as f: pickle.dump(data, f) menuMaker() minesAdder(x_dim, y_dim, count) countAdd() for x_var in range(x_dim): for y_var in range(y_dim): buttonFunc(x_var, y_var) tiles = [x for x in game.winfo_children()] tiles = list(tiles[i:i + y_dim] for i in range(0, len(tiles), y_dim)) if [x_dim, y_dim, count] == [5, 5, 0]: firstScreen() root.bind("<F5>", lambda x: firstScreen()) root.bind("<F2>", lambda x: restart()) root.mainloop() except (FileExistsError, FileNotFoundError): with open("assets/data", "wb") as f: data = { "Dimensions": { "xdim" : 5, "ydim" : 5, "mines": 0 }, "Stats" : { "Total": 0, "Percentage": "0%", "Win": 0 } } pickle.dump(data, f) main() except ModuleNotFoundError: root.destroy() main() if __name__ == '__main__': main()
import random, pickle, pyglet from tkinter import * from PIL import Image, ImageTk # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||| ||||| # # ||| FUNCTIONS ||||| # # ||| ||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # class Timer: def __init__(self, label): self.label = label self.seconds = 0 self.isRunning = False def counter_label(self): def count(): if self.isRunning: self.label['text'] = self.seconds self.label.after(1000, count) self.seconds += 1 count() def start(self): self.isRunning = True self.counter_label() def reset(self): self.isRunning = False self.seconds = 0 def menuMaker(): mnu = Menu(root) root.config(menu = mnu) file_menu = Menu(mnu, tearoff = False) mnu.add_cascade(label = "File", menu = file_menu) file_menu.add_command(label = "New Game (F2)", command = restart) file_menu.add_separator() file_menu.add_command(label = "Options (F5)", command = firstScreen) file_menu.add_separator() file_menu.add_command(label = "Exit", command = root.quit) def minesAdder(x_dim, y_dim, count): global main_list, mines for i in range(count): while True: x_geese = random.randint(0, (x_dim - 1)) y_geese = random.randint(0, y_dim - 1) if (x_geese, y_geese) not in mines: break main_list[x_geese][y_geese] = 9 mines.append((x_geese, y_geese)) def show(x_coord, y_coord, event): global main_list, printable_list, revealPerTurn, revealed, timer if not timer.isRunning: timer.start() if ((x_coord, y_coord) not in marked) and ((x_coord, y_coord) not in revealed): buttonUpdater(event.widget, main_list[x_coord][y_coord]) revealPerTurn.append((x_coord, y_coord)) if main_list[x_coord][y_coord] == 0: zeroReveal(x_coord, y_coord) revealPerTurn = [(x, y) for x, y in revealPerTurn if (x, y) not in marked] for x, y in revealPerTurn: normalReveal(x, y) revealed.extend(revealPerTurn) revealed = list(set(revealed)) revealPerTurn = [] markAll() winCondition() def normalReveal(x_coord, y_coord): global main_list, printable_list, revealPerTurn buttonUpdater(tiles[x_coord][y_coord], main_list[x_coord][y_coord]) def zeroReveal(x_coord, y_coord): global zeroes, revealPerTurn neighbors = neighborCalc(x_coord, y_coord) for x, y in neighbors: if (x, y) not in zeroes: revealPerTurn.append((x, y)) if main_list[x][y] == 0: zeroes.append((x, y)) zeroReveal(x, y) def mark(x, y, event): global printable_list, marked if ((x, y) not in marked) and ((x, y) not in revealed): buttonUpdater(event.widget, -1) marked.append((x, y)) markAll() elif ((x, y) in marked) and ((x, y) not in revealed): buttonUpdater(event.widget, -2) marked.remove((x, y)) printable_list[x][y] = '*' mine_label["text"] = count - len(marked) winCondition() def markAll(): global marked, printable_list for x, y in marked: printable_list[x][y] = 'M' def tileList(): global tiles tiles = [x for x in game.winfo_children()] tiles = list(tiles[i:i + y_dim] for i in range(0, len(tiles), y_dim)) def restart(): global root root.destroy() main() def neighborCalc(x_coord, y_coord): global x_dim, y_dim, main_list if (x_coord, y_coord) == (0, 0): return [(0, 1), (1, 0), (1, 1)] elif (x_coord, y_coord) == (x_dim - 1, y_dim - 1): return [(x_dim - 1, y_dim - 2), (x_dim - 2, y_dim - 1), (x_dim - 2, y_dim - 2)] elif (x_coord, y_coord) == (0, y_dim - 1): return [(0, y_dim - 2), (1, y_dim - 1), (1, y_dim - 2)] elif (x_coord, y_coord) == (x_dim - 1, 0): return [(x_dim - 1, 1), (x_dim - 2, 0), (x_dim - 2, 1)] elif x_coord == 0: return [(0, y_coord - 1), (0, y_coord + 1), (1, y_coord - 1), (1, y_coord + 1), (1, y_coord)] elif x_coord == (x_dim - 1): return ([(x_dim - 1, y_coord - 1), (x_dim - 1, y_coord + 1), (x_dim - 2, y_coord - 1), (x_dim - 2, y_coord + 1), (x_dim - 2, y_coord)]) elif y_coord == 0: return [(x_coord - 1, 0), (x_coord + 1, 0), (x_coord - 1, 1), (x_coord + 1, 1), (x_coord, 1)] elif y_coord == (y_dim - 1): return ([(x_coord, y_dim - 2), (x_coord - 1, y_dim - 2), (x_coord + 1, y_dim - 2), (x_coord - 1, y_dim - 1), (x_coord + 1, y_dim - 1)]) else: return [(x, y) for x in range(x_coord - 1, x_coord + 2) for y in range(y_coord - 1, y_coord + 2)] def countAdd(): global main_list, printable_list, mines for i, j in mines: neighbors = neighborCalc(i, j) for x, y in neighbors: main_list[x][y] += 1 for i, j in mines: main_list[i][j] = 9 def isGameWon(): for i in mines: if i in revealed: return 0 if count + len(revealed) == x_dim * y_dim: return 2 return 1 def winCondition(): global timer winVar = isGameWon() if winVar in (0, 2): timer.reset() window = Toplevel() window.grab_set() window.configure(bg = '#515151') window.geometry("330x250") window.iconbitmap('assets/Sprites/logo.ico') stats = Frame(window, bg = '#515151') stats.grid(row = 0, column = 0, columnspan = 2, padx = 20, pady = 20) tot, win, perc = statChanger(winVar) Label(stats, text = f"Games Played: {tot}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 1, column = 0, padx = 5, pady = 5) Label(stats, text = f"Games Won: {win}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 2, column = 0, padx = 5, pady = 5) Label(stats, text = f"Percentage: {perc}", bg = '#515151', fg = '#fff', font = ('Calibri 12')).grid(row = 1, column = 1, padx = 5, pady = 5) exitBtn = Label(window, text = " Exit ", font = ('Calibri 26 bold'), borderwidth = 3, relief = 'raised', bg = '#A10000', fg = '#fff') exitBtn.grid(row = 1, column = 0, padx = 7, pady = 7) exitBtn.bind('<Button-1>', lambda x: root.destroy()) restBtn = Label(window, text = "Play Again", font = ('Calibri 26 bold'), borderwidth = 3, relief = 'raised', bg = '#A10000', fg = '#fff') restBtn.grid(row = 1, column = 1, padx = 7, pady = 7) restBtn.bind('<Button-1>', lambda x: restart()) if winVar == 2: window.title("You Win!") Label(stats, text = "Congratulations! You Won the Game!!", bg = '#515151', fg = '#fff', font = ('Calibri 12 bold')).grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) elif winVar == 0: window.title("You Lose!") Label(stats, text = "Sorry you Lost, Better Luck Next Time!", bg = '#515151', fg = '#fff', font = ('Calibri 12 bold')).grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10) def buttonFunc(i, j): height, width = (572 // y_dim), (572 // x_dim) image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/unrevealed_tile.png').resize((height, width))) lbl = Label(game, image = image) lbl.image = image lbl.grid(row = i, column = j, padx = 0.5, pady = 0.5) lbl.bind("<Button-1>", lambda x: show(i, j, x)) lbl.bind("<Button-2>", lambda x: mark(i, j, x)) lbl.bind("<Button-3>", lambda x: mark(i, j, x)) def buttonUpdater(lbl, number): if number == 0: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/zero.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 1: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/one.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 2: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/two.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 3: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/three.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 4: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/four.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 5: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/five.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 6: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/six.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 7: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/seven.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 8: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/eight.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == 9: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/bomb.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == -1: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/flag.png').resize(((572 // y_dim), (572 // x_dim)))) elif number == -2: image = ImageTk.PhotoImage(Image.open( 'assets/Sprites/unrevealed_tile.png').resize(((572 // y_dim), (572 // x_dim)))) lbl['image'] = image lbl.image = image def statChanger(x): with open('assets/data', 'rb') as f: data = pickle.load(f) tot, win, perc = data['Stats']['Total'], data['Stats']['Win'], data['Stats']['Percentage'] if x: win += 1; perc = f"{(win * 100) // tot}%" data['Stats']['Total'], data['Stats']['Win'], data['Stats']['Percentage'] = tot, win, perc with open('assets/data', 'wb') as f: writeObject = pickle.dump(data, f) return tot, win, perc def dimSet(height, width, mines, diff): global x_dim, y_dim, count value = diff.get() toRestart = False if value == 1: x_dim = 9 y_dim = 9 count = 10 toRestart = True elif value == 2: x_dim = 16 y_dim = 16 count = 40 toRestart = True elif value == 3: x_dim = 24 y_dim = 24 count = 99 toRestart = True elif value == 0: x_dim = height.get() y_dim = width.get() count = mines.get() x_dim = int(x_dim) y_dim = int(y_dim) count = int(count) if (x_dim >= 9) and (y_dim >= 9) and (10 <= count <= x_dim * y_dim): toRestart = True else: print('\a') if toRestart: with open('assets/data', 'rb') as f: data = pickle.load(f) data['Dimensions']['xdim'], data['Dimensions']['ydim'], data['Dimensions']['mines'] = x_dim, y_dim, count with open('assets/data', 'wb') as f: pickle.dump(data, f) restart() def firstScreen(): config = Toplevel() config.grab_set() config.iconbitmap('assets/Sprites/logo.ico') difficulty = LabelFrame(config, text = 'Difficulty') difficulty.pack(padx = 20, pady = 20) Button(config, text = "OK", command = lambda: dimSet(height, width, mines, diff)).pack() firstThreeDifficulty = Frame(difficulty) firstThreeDifficulty.grid(column = 0, row = 0, padx = 10) customDifficulty = Frame(difficulty) customDifficulty.grid(column = 1, row = 0, padx = 10) customDifficultyButton = Frame(customDifficulty) customDifficultyButton.grid(row = 0, column = 0, columnspan = 2) diff = IntVar() diff.set(1) bbtn = Radiobutton(firstThreeDifficulty, text = "Beginner \n10 Mines\n9x9 Grid ", variable = diff, value = 1) bbtn.pack(anchor = W) ibtn = Radiobutton(firstThreeDifficulty, text = "Intermediate\n40 Mines\n16x16 Grid", variable = diff, value = 2) ibtn.pack(anchor = W) abtn = Radiobutton(firstThreeDifficulty, text = "Advanced \n99 Mines\n24x24 Grid", variable = diff, value = 3) abtn.pack(anchor = W) customBtn = Radiobutton(customDifficultyButton, text = "Custom", variable = diff, value = 0) customBtn.pack(anchor = W) def validate_entry(text, min, max, widget): if text == "": return True try: value = int(text) except ValueError: # oops, couldn't convert to int print('\a') return False if 0 <= value <= max: return True else: print('\a') widget.delete(0, END) widget.insert(0, max) height.config(validate = "key", validatecommand = hcmd) width.config(validate = "key", validatecommand = wcmd) mines.config(validate = "key", validatecommand = mcmd) return False hcmd = (root.register(lambda x: validate_entry(x, 9, 24, height)), "%P") wcmd = (root.register(lambda x: validate_entry(x, 9, 24, width)), "%P") mcmd = (root.register(lambda x: validate_entry(x, 10, min(int(height.get()) * int(width.get()), 400), mines)), "%P") hlbl = Label(customDifficulty, state = DISABLED, text = "Height (9-24)") hlbl.grid(row = 1, column = 0) height = Entry(customDifficulty, state = DISABLED) height.grid(row = 1, column = 1) wlbl = Label(customDifficulty, state = DISABLED, text = "Width (9-24)") wlbl.grid(row = 2, column = 0) width = Entry(customDifficulty, state = DISABLED) width.grid(row = 2, column = 1) mlbl = Label(customDifficulty, state = DISABLED, text = "Mines (10-400)") mlbl.grid(row = 3, column = 0) mines = Entry(customDifficulty, state = DISABLED) mines.grid(row = 3, column = 1) def enabler(event): hlbl['state'] = NORMAL height['state'] = NORMAL height.config(validate = "key", validatecommand = hcmd) wlbl['state'] = NORMAL width['state'] = NORMAL width.config(validate = "key", validatecommand = wcmd) mlbl['state'] = NORMAL mines['state'] = NORMAL mines.config(validate = "key", validatecommand = mcmd) def disabler(event): hlbl['state'] = DISABLED height['state'] = DISABLED wlbl['state'] = DISABLED width['state'] = DISABLED mlbl['state'] = DISABLED mines['state'] = DISABLED bbtn.bind("<Button-1>", lambda x: disabler(x)) ibtn.bind("<Button-1>", lambda x: disabler(x)) abtn.bind("<Button-1>", lambda x: disabler(x)) customBtn.bind("<Button-1>", lambda x: enabler(x)) # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # # ||||||||||||||||||||||||||||||||||||||||||||||||||| # def main(): global x_dim, y_dim, count, F, root, main_list, printable_list, mines, zeroes, revealPerTurn, marked, revealed, game, L, x_var, y_var, tiles, timer, timer_label, stuff, mine_label try: with open('assets/data', 'rb') as f: data = pickle.load(f) x_dim, y_dim, count = data['Dimensions']['xdim'], data['Dimensions']['ydim'], data['Dimensions']['mines'] root = Tk() root.title("Minesweeper") root.minsize(684, 700) root.configure(bg = '#a1a1a1') root.iconbitmap('assets/Sprites/logo.ico') main_list = [[0 for y in range(y_dim)] for x in range(x_dim)] printable_list = [["*" for y in range(y_dim)] for x in range(x_dim)] mines, zeroes, revealPerTurn, marked, revealed = [], [], [], [], [] game = Frame(root, bg = '#000', height = 572, width = 572) game.pack(pady = 5) stuff = Frame(root, bg = '#a1a1a1', width = 572) stuff.pack() pyglet.font.add_file("assets/Font/DS-DIGI.TTF") timer_label = Label(stuff, text = "0", fg = "white", bg = "#515151", font = "DS-Digital 40", width = 5) timer_label.grid(row = 0, column = 0, padx = 30) timer = Timer(timer_label) mine_label = Label(stuff, text = count, fg = "white", bg = "#515151", font = "DS-Digital 40", width = 5) mine_label.grid(row = 0, column = 1, padx = 30) with open('assets/data', 'rb') as f: data = pickle.load(f) data['Stats']['Total'] += 1 data['Stats']['Percentage'] = f"{(data['Stats']['Win'] * 100) // data['Stats']['Total']}%" with open('assets/data', 'wb') as f: pickle.dump(data, f) menuMaker() minesAdder(x_dim, y_dim, count) countAdd() for x_var in range(x_dim): for y_var in range(y_dim): buttonFunc(x_var, y_var) tiles = [x for x in game.winfo_children()] tiles = list(tiles[i:i + y_dim] for i in range(0, len(tiles), y_dim)) if [x_dim, y_dim, count] == [5, 5, 0]: firstScreen() root.bind("<F5>", lambda x: firstScreen()) root.bind("<F2>", lambda x: restart()) root.mainloop() except (FileExistsError, FileNotFoundError): with open("assets/data", "wb") as f: data = { "Dimensions": { "xdim" : 5, "ydim" : 5, "mines": 0 }, "Stats" : { "Total": 0, "Percentage": "0%", "Win": 0 } } pickle.dump(data, f) main() except ModuleNotFoundError: root.destroy() main() if __name__ == '__main__': main()
"""Solvent accessible surface area code.""" import functools import typing from typing import Any, Dict, Iterable, List, Optional, Union import numpy as np import scipy.spatial from morfeus.data import atomic_symbols, jmol_colors from morfeus.geometry import Atom, Sphere from morfeus.io import read_geometry from morfeus.typing import ArrayLike1D, ArrayLike2D from morfeus.utils import convert_elements, get_radii, Import, requires_dependency if typing.TYPE_CHECKING: from matplotlib.colors import hex2color import pyvista as pv from pyvistaqt import BackgroundPlotter class SASA: """Performs and stores results of solvent accessible surface area calculations. Args: elements: Elements as atomic symbols or numbers coordinates: Coordinates (Å) radii: VdW radii (Å) radii_type: Choice of vdW radii: 'bondi' or 'crc' (default) probe_radius: Radius of probe atom (Å) density: Area per point (Ų) on the vdW surface Attributes: area: Area of the solvent accessible surface. atom_areas: Atom areas (starting from 1) atom_volumes: Atom volumes (starting from 1) volume: Volume of the solvent accessible surface """ area: float atom_areas: Dict[int, float] atom_volumes: Dict[int, float] volume: float _atoms: List[Atom] _density: float _probe_radius: float def __init__( self, elements: Union[Iterable[int], Iterable[str]], coordinates: ArrayLike2D, radii: Optional[ArrayLike1D] = None, radii_type: str = "crc", probe_radius: float = 1.4, density: float = 0.01, ) -> None: # Converting elements to atomic numbers if the are symbols elements = convert_elements(elements, output="numbers") coordinates = np.array(coordinates) # Getting radii if they are not supplied if radii is None: radii = get_radii(elements, radii_type=radii_type) # Increment the radii with the probe radius radii = np.array(radii) radii = radii + probe_radius # Construct list of atoms atoms = [] for i, (coordinate, radius, element) in enumerate( zip(coordinates, radii, elements), start=1 ): atom = Atom(element, coordinate, radius, i) atoms.append(atom) # Set up attributes self._atoms = atoms self._density = density self._probe_radius = probe_radius # Determine accessible and occluded points for each atom self._determine_accessible_points() # Calculate atom areas and volumes self._calculate() def _calculate(self) -> None: """Calculate solvent accessible surface area and volume.""" for atom in self._atoms: # Get number of points of eache type n_accessible = len(atom.accessible_points) n_occluded = len(atom.occluded_points) n_points = len(atom.accessible_points) + len(atom.occluded_points) # Calculate part occluded and accessible ratio_occluded = n_occluded / n_points ratio_accessible = 1 - ratio_occluded # Calculate area area = 4 * np.pi * atom.radius**2 * ratio_accessible atom.area = area atom.point_areas = np.zeros(n_points) if n_accessible > 0: atom.point_areas[atom.accessible_mask] = atom.area / n_accessible # Center accessible points and normalize centered_points = np.array(atom.accessible_points) - atom.coordinates centered_points /= np.linalg.norm(centered_points, axis=1).reshape(-1, 1) # Add accessible points accessible_summed = np.sum(centered_points, axis=0) # Calculate volume volume = (4 * np.pi / 3 / n_points) * ( atom.radius**2 * np.dot(atom.coordinates, accessible_summed) + atom.radius**3 * n_accessible ) atom.volume = volume atom.point_volumes = np.zeros(n_points) if n_accessible > 0: atom.point_volumes[atom.accessible_mask] = atom.volume / n_accessible # Set up attributes self.atom_areas = {atom.index: atom.area for atom in self._atoms} self.atom_volumes = {atom.index: atom.volume for atom in self._atoms} self.area = sum([atom.area for atom in self._atoms]) self.volume = sum([atom.volume for atom in self._atoms]) def _determine_accessible_points(self) -> None: """Determine occluded and accessible points of each atom.""" # Based on distances to all other atoms (brute force). for atom in self._atoms: # Construct sphere for atom sphere = Sphere(atom.coordinates, atom.radius, density=self._density) atom.points = sphere.points # Select atoms that are at a distance less than the sum of radii # !TODO can be vectorized test_atoms = [] for test_atom in self._atoms: if test_atom is not atom: distance = scipy.spatial.distance.euclidean( atom.coordinates, test_atom.coordinates ) radii_sum = atom.radius + test_atom.radius if distance < radii_sum: test_atoms.append(test_atom) # Select coordinates and radii for other atoms test_coordinates = [test_atom.coordinates for test_atom in test_atoms] test_radii = [test_atom.radius for test_atom in test_atoms] test_radii = np.array(test_radii).reshape(-1, 1) # Get distances to other atoms and subtract radii if test_coordinates: distances = scipy.spatial.distance.cdist( test_coordinates, sphere.points ) distances -= test_radii # Take smallest distance and perform check min_distances = np.min(distances, axis=0) atom.occluded_mask = min_distances < 0 atom.accessible_mask = ~atom.occluded_mask else: atom.occluded_mask = np.zeros(len(atom.points), dtype=bool) atom.accessible_mask = np.ones(len(atom.points), dtype=bool) atom.occluded_points = sphere.points[atom.occluded_mask] atom.accessible_points = sphere.points[atom.accessible_mask] def print_report(self, verbose: bool = False) -> None: """Print report of results. Args: verbose: Whether to print atom areas """ print(f"Probe radius (Å): {self._probe_radius}") print(f"Solvent accessible surface area (Ų): {self.area:.1f}") print("Volume inside solvent accessible surface (ų): " f"{self.volume:.1f}") if verbose: print(f"{"Symbol":<10s}{"Index":<10s}{"Area (Ų)":<10s}") for atom, (i, area) in zip(self._atoms, self.atom_areas.items()): symbol = atomic_symbols[atom.element] print(f"{symbol:<10s}{i:<10d}{area:<10.1f}") @requires_dependency( [ Import(module="matplotlib.colors", item="hex2color"), Import(module="pyvista", alias="pv"), Import(module="pyvistaqt", item="BackgroundPlotter"), ], globals(), ) def draw_3D( self, atom_scale: float = 1, background_color: str = "white", point_color: str = "steelblue", opacity: float = 0.25, size: float = 1, ) -> None: """Draw a 3D representation. Draws the molecule with the solvent accessible surface area. Args: atom_scale: Scaling factor for atom size background_color: Background color for plot point_color: Color of surface points opacity: Point opacity size: Point size """ # Set up plotter p = BackgroundPlotter() p.set_background(background_color) # Draw molecule for atom in self._atoms: color = hex2color(jmol_colors[atom.element]) radius = atom.radius * atom_scale - self._probe_radius sphere = pv.Sphere(center=list(atom.coordinates), radius=radius) p.add_mesh(sphere, color=color, opacity=1, name=str(atom.index)) # Draw surface points surface_points = np.vstack([atom.accessible_points for atom in self._atoms]) p.add_points( surface_points, color=point_color, opacity=opacity, point_size=size ) def __repr__(self) -> str: return f"{self.__class__.__name__}({len(self._atoms)!r} atoms)" def cli(file: str) -> Any: """CLI for solvent accessible surface area. Args: file: Geometry file Returns: Partially instantiated class """ elements, coordinates = read_geometry(file) return functools.partial(SASA, elements, coordinates)
"""Solvent accessible surface area code.""" import functools import typing from typing import Any, Dict, Iterable, List, Optional, Union import numpy as np import scipy.spatial from morfeus.data import atomic_symbols, jmol_colors from morfeus.geometry import Atom, Sphere from morfeus.io import read_geometry from morfeus.typing import ArrayLike1D, ArrayLike2D from morfeus.utils import convert_elements, get_radii, Import, requires_dependency if typing.TYPE_CHECKING: from matplotlib.colors import hex2color import pyvista as pv from pyvistaqt import BackgroundPlotter class SASA: """Performs and stores results of solvent accessible surface area calculations. Args: elements: Elements as atomic symbols or numbers coordinates: Coordinates (Å) radii: VdW radii (Å) radii_type: Choice of vdW radii: 'bondi' or 'crc' (default) probe_radius: Radius of probe atom (Å) density: Area per point (Ų) on the vdW surface Attributes: area: Area of the solvent accessible surface. atom_areas: Atom areas (starting from 1) atom_volumes: Atom volumes (starting from 1) volume: Volume of the solvent accessible surface """ area: float atom_areas: Dict[int, float] atom_volumes: Dict[int, float] volume: float _atoms: List[Atom] _density: float _probe_radius: float def __init__( self, elements: Union[Iterable[int], Iterable[str]], coordinates: ArrayLike2D, radii: Optional[ArrayLike1D] = None, radii_type: str = "crc", probe_radius: float = 1.4, density: float = 0.01, ) -> None: # Converting elements to atomic numbers if the are symbols elements = convert_elements(elements, output="numbers") coordinates = np.array(coordinates) # Getting radii if they are not supplied if radii is None: radii = get_radii(elements, radii_type=radii_type) # Increment the radii with the probe radius radii = np.array(radii) radii = radii + probe_radius # Construct list of atoms atoms = [] for i, (coordinate, radius, element) in enumerate( zip(coordinates, radii, elements), start=1 ): atom = Atom(element, coordinate, radius, i) atoms.append(atom) # Set up attributes self._atoms = atoms self._density = density self._probe_radius = probe_radius # Determine accessible and occluded points for each atom self._determine_accessible_points() # Calculate atom areas and volumes self._calculate() def _calculate(self) -> None: """Calculate solvent accessible surface area and volume.""" for atom in self._atoms: # Get number of points of eache type n_accessible = len(atom.accessible_points) n_occluded = len(atom.occluded_points) n_points = len(atom.accessible_points) + len(atom.occluded_points) # Calculate part occluded and accessible ratio_occluded = n_occluded / n_points ratio_accessible = 1 - ratio_occluded # Calculate area area = 4 * np.pi * atom.radius**2 * ratio_accessible atom.area = area atom.point_areas = np.zeros(n_points) if n_accessible > 0: atom.point_areas[atom.accessible_mask] = atom.area / n_accessible # Center accessible points and normalize centered_points = np.array(atom.accessible_points) - atom.coordinates centered_points /= np.linalg.norm(centered_points, axis=1).reshape(-1, 1) # Add accessible points accessible_summed = np.sum(centered_points, axis=0) # Calculate volume volume = (4 * np.pi / 3 / n_points) * ( atom.radius**2 * np.dot(atom.coordinates, accessible_summed) + atom.radius**3 * n_accessible ) atom.volume = volume atom.point_volumes = np.zeros(n_points) if n_accessible > 0: atom.point_volumes[atom.accessible_mask] = atom.volume / n_accessible # Set up attributes self.atom_areas = {atom.index: atom.area for atom in self._atoms} self.atom_volumes = {atom.index: atom.volume for atom in self._atoms} self.area = sum([atom.area for atom in self._atoms]) self.volume = sum([atom.volume for atom in self._atoms]) def _determine_accessible_points(self) -> None: """Determine occluded and accessible points of each atom.""" # Based on distances to all other atoms (brute force). for atom in self._atoms: # Construct sphere for atom sphere = Sphere(atom.coordinates, atom.radius, density=self._density) atom.points = sphere.points # Select atoms that are at a distance less than the sum of radii # !TODO can be vectorized test_atoms = [] for test_atom in self._atoms: if test_atom is not atom: distance = scipy.spatial.distance.euclidean( atom.coordinates, test_atom.coordinates ) radii_sum = atom.radius + test_atom.radius if distance < radii_sum: test_atoms.append(test_atom) # Select coordinates and radii for other atoms test_coordinates = [test_atom.coordinates for test_atom in test_atoms] test_radii = [test_atom.radius for test_atom in test_atoms] test_radii = np.array(test_radii).reshape(-1, 1) # Get distances to other atoms and subtract radii if test_coordinates: distances = scipy.spatial.distance.cdist( test_coordinates, sphere.points ) distances -= test_radii # Take smallest distance and perform check min_distances = np.min(distances, axis=0) atom.occluded_mask = min_distances < 0 atom.accessible_mask = ~atom.occluded_mask else: atom.occluded_mask = np.zeros(len(atom.points), dtype=bool) atom.accessible_mask = np.ones(len(atom.points), dtype=bool) atom.occluded_points = sphere.points[atom.occluded_mask] atom.accessible_points = sphere.points[atom.accessible_mask] def print_report(self, verbose: bool = False) -> None: """Print report of results. Args: verbose: Whether to print atom areas """ print(f"Probe radius (Å): {self._probe_radius}") print(f"Solvent accessible surface area (Ų): {self.area:.1f}") print("Volume inside solvent accessible surface (ų): " f"{self.volume:.1f}") if verbose: print(f"{'Symbol':<10s}{'Index':<10s}{'Area (Ų)':<10s}") for atom, (i, area) in zip(self._atoms, self.atom_areas.items()): symbol = atomic_symbols[atom.element] print(f"{symbol:<10s}{i:<10d}{area:<10.1f}") @requires_dependency( [ Import(module="matplotlib.colors", item="hex2color"), Import(module="pyvista", alias="pv"), Import(module="pyvistaqt", item="BackgroundPlotter"), ], globals(), ) def draw_3D( self, atom_scale: float = 1, background_color: str = "white", point_color: str = "steelblue", opacity: float = 0.25, size: float = 1, ) -> None: """Draw a 3D representation. Draws the molecule with the solvent accessible surface area. Args: atom_scale: Scaling factor for atom size background_color: Background color for plot point_color: Color of surface points opacity: Point opacity size: Point size """ # Set up plotter p = BackgroundPlotter() p.set_background(background_color) # Draw molecule for atom in self._atoms: color = hex2color(jmol_colors[atom.element]) radius = atom.radius * atom_scale - self._probe_radius sphere = pv.Sphere(center=list(atom.coordinates), radius=radius) p.add_mesh(sphere, color=color, opacity=1, name=str(atom.index)) # Draw surface points surface_points = np.vstack([atom.accessible_points for atom in self._atoms]) p.add_points( surface_points, color=point_color, opacity=opacity, point_size=size ) def __repr__(self) -> str: return f"{self.__class__.__name__}({len(self._atoms)!r} atoms)" def cli(file: str) -> Any: """CLI for solvent accessible surface area. Args: file: Geometry file Returns: Partially instantiated class """ elements, coordinates = read_geometry(file) return functools.partial(SASA, elements, coordinates)
# ==================================================== # Library # # ==================================================== import os import gc import sys import math import time import random import shutil from requests import get from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.metrics import roc_auc_score from sklearn import model_selection from tqdm.auto import tqdm from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch import optim import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau import albumentations as A from albumentations import ( Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose) from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform if 'kaggle_web_client' in sys.modules: ROOT_DIR = '/kaggle/' else: ROOT_DIR = '/home/yuki/Kaggle-SIIM-FISABIO-RSNA' sys.path.append(os.path.join(ROOT_DIR, 'input/timm-pytorch-image-models/pytorch-image-models-master')) import timm sys.path.append(os.path.join(ROOT_DIR, 'input/pytorch-sam')) from sam import SAM from torch.cuda.amp import autocast, GradScaler import warnings warnings.filterwarnings('ignore') # ==================================================== # Directory settings # # ==================================================== if 'kaggle_web_client' in sys.modules: OUTPUT_DIR = os.path.join(ROOT_DIR, 'working/') else: name_code = os.path.splitext(os.path.basename(__file__))[0].split('-') OUTPUT_DIR = os.path.join(ROOT_DIR, 'output/', name_code[1], name_code[-1]) if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # ==================================================== # Config # # ==================================================== class CFG: ###################### # Globals # ###################### debug = False use_amp = False print_freq = 100 size = 384 epochs = 6 gradient_accumulation_steps = 1 max_grad_norm = 10000 seed = 42 target_col = 'detection_label' n_fold = 5 trn_fold = [0] train = True ###################### # Loaders # ###################### loader_params = { "train": { "batch_size": 32, "num_workers": 4, "shuffle": True, "pin_memory": True, "drop_last": True }, "valid": { "batch_size": 64, "num_workers": 4, "shuffle": False, "pin_memory": True, "drop_last": False }, "test": { "batch_size": 64, "num_workers": 4, "shuffle": False, "pin_memory": True, "drop_last": False } } ###################### # Split # ###################### split_name = "StratifiedKFold" split_params = { "n_splits": 5, "shuffle": True, "random_state": 42 } ###################### # Criterion # ###################### loss_name = "BCEWithLogitsLoss" loss_params: dict = {} ###################### # Optimizer # ###################### optimizer_name = "AdamW" optimizer_params = { "lr": 1e-4, "weight_decay": 1e-6, "amsgrad": False } # For SAM optimizer base_optimizer = "AdamW" ###################### # Scheduler # ###################### scheduler_name = 'CosineAnnealingLR' scheduler_params = { "T_max": 6, "eta_min": 1e-6, "last_epoch": -1 } ###################### # Model # ###################### model_name = "tf_efficientnet_b3_ns" pretrained = True target_size = 1 # ==================================================== # Data Loading # # ==================================================== def get_train_file_path(image_id): return os.path.join(ROOT_DIR, f"input/siim-covid19-resized-to-256px-jpg/train/{image_id}.jpg") def get_test_file_path(image_id): return os.path.join(ROOT_DIR, f"/input/siim-covid19-resized-to-256px-jpg/test/{image_id}.jpg") train = pd.read_csv(os.path.join(ROOT_DIR, 'input/siim-covid19-updated-train-labels/updated_train_labels.csv')) train['detection_label'] = train.apply(lambda row: 0 if row[[ 'xmin', 'ymin', 'xmax', 'ymax']].values.tolist() == [0, 0, 1, 1] else 1, axis=1) # この処理は重たく、しかもmAPに関係ないので省く # cols = ['xmin', 'ymin', 'xmax', 'ymax'] # for idx, (xmin, ymin, xmax, ymax, label) in enumerate(zip(train['frac_xmin'].to_numpy(), # train['frac_ymin'].to_numpy(), # train['frac_xmax'].to_numpy(), # train['frac_ymax'].to_numpy(), # train['detection_label'].to_numpy())): # if label == 0: # train.loc[idx, cols] = [0, 0, 1, 1] # else: # bbox = [xmin, ymin, xmax, ymax] # train.loc[idx, cols] = A.convert_bbox_from_albumentations( # bbox, 'pascal_voc', CFG.size, CFG.size) test = pd.read_csv(os.path.join(ROOT_DIR, 'input/siim-covid19-updated-train-labels/updated_sample_submission.csv')) train['filepath'] = train['id'].apply(get_train_file_path) test['filepath'] = test['id'].apply(get_test_file_path) if CFG.debug: CFG.epochs = 1 # train = train.sample(n=1000, random_state=CFG.seed).reset_index(drop=True) # ==================================================== # Utils # # ==================================================== def get_score(y_true, y_pred): score = roc_auc_score(y_true, y_pred) return score def get_result(result_df): preds = result_df['preds'].values labels = result_df[CFG.target_col].values score = get_score(labels, preds) LOGGER.info(f'Score: {score:<.5f}') def get_annotations(df): return df[['id', 'detection_label', 'xmin', 'ymin', 'xmax', 'ymax']] def get_predictions(df, col): df_ = df.copy() df_ = df_[['id', col]] df_ = df_.rename(columns={col: 'conf'}) df_['conf'] = df_['conf'].apply(lambda x: 1 - x) # df_['detection_label'] = df_['conf'].apply(lambda x: '0' if x > 0.5 else '1') df_bbox = pd.DataFrame({ 'detection_label': ['0'] * len(df_), 'xmin': [0] * len(df_), 'ymin': [0] * len(df_), 'xmax': [1] * len(df_), 'ymax': [1] * len(df_), }) df_ = pd.concat([df_, df_bbox], axis=1) return df_ def compute_overlap(boxes, query_boxes): """ Args boxes: (N, 4) ndarray of float query_boxes: (4) ndarray of float Returns overlaps: (N) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] overlaps = np.zeros((N), dtype=np.float64) box_area = ( (query_boxes[2] - query_boxes[0]) * (query_boxes[3] - query_boxes[1]) ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[2]) - max(boxes[n, 0], query_boxes[0]) ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[3]) - max(boxes[n, 1], query_boxes[1]) ) if ih > 0: ua = np.float64( (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1]) + box_area - iw * ih ) overlaps[n] = iw * ih / ua return overlaps def check_if_true_or_false_positive(annotations, detections, iou_threshold): annotations = np.array(annotations, dtype=np.float64) scores = [] false_positives = [] true_positives = [] # a GT box should be mapped only one predicted box at most. detected_annotations = [] for d in detections: scores.append(d[4]) if len(annotations) == 0: false_positives.append(1) true_positives.append(0) continue overlaps = compute_overlap(annotations, d[:4]) assigned_annotation = np.argmax(overlaps) max_overlap = overlaps[assigned_annotation] if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: false_positives.append(0) true_positives.append(1) detected_annotations.append(assigned_annotation) else: false_positives.append(1) true_positives.append(0) return scores, false_positives, true_positives def _compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves. Code originally from https://github.com/rbgirshick/py-faster-rcnn. # Arguments recall: The recall curve (list). precision: The precision curve (list). # Returns The average precision as computed in py-faster-rcnn. """ # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], recall, [1.])) mpre = np.concatenate(([0.], precision, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap def get_real_annotations(table): res = dict() ids = table['id'].values.astype(np.str) labels = table['detection_label'].values.astype(np.str) xmin = table['xmin'].values.astype(np.float32) xmax = table['xmax'].values.astype(np.float32) ymin = table['ymin'].values.astype(np.float32) ymax = table['ymax'].values.astype(np.float32) for i in range(len(ids)): id = ids[i] label = labels[i] if id not in res: res[id] = dict() if label not in res[id]: res[id][label] = [] box = [xmin[i], ymin[i], xmax[i], ymax[i]] res[id][label].append(box) return res def get_detections(table): res = dict() ids = table['id'].values.astype(np.str) labels = table['detection_label'].values.astype(np.str) scores = table['conf'].values.astype(np.float32) xmin = table['xmin'].values.astype(np.float32) xmax = table['xmax'].values.astype(np.float32) ymin = table['ymin'].values.astype(np.float32) ymax = table['ymax'].values.astype(np.float32) for i in range(len(ids)): id = ids[i] label = labels[i] if id not in res: res[id] = dict() if label not in res[id]: res[id][label] = [] box = [xmin[i], ymin[i], xmax[i], ymax[i], scores[i]] res[id][label].append(box) return res def mean_average_precision_for_boxes(ann, pred, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=True): """ :param ann: path to CSV-file with annotations or numpy array of shape (N, 6) :param pred: path to CSV-file with predictions (detections) or numpy array of shape (N, 7) :param iou_threshold: IoU between boxes which count as 'match'. Default: 0.5 :param exclude_not_in_annotations: exclude image IDs which are not exist in annotations. Default: False :param verbose: print detailed run info. Default: True :return: tuple, where first value is mAP and second values is dict with AP for each class. """ valid = pd.DataFrame( ann, columns=['id', 'detection_label', 'xmin', 'ymin', 'xmax', 'ymax']) preds = pd.DataFrame( pred, columns=['id', 'detection_label', 'conf', 'xmin', 'ymin', 'xmax', 'ymax']) ann_unique = valid['id'].unique() preds_unique = preds['id'].unique() if verbose: print('Number of files in annotations: {}'.format(len(ann_unique))) print('Number of files in predictions: {}'.format(len(preds_unique))) # Exclude files not in annotations! if exclude_not_in_annotations: preds = preds[preds['id'].isin(ann_unique)] preds_unique = preds['id'].unique() if verbose: print('Number of files in detection after reduction: {}'.format( len(preds_unique))) unique_classes = valid['detection_label'].unique().astype(np.str) if verbose: print('Unique classes: {}'.format(len(unique_classes))) all_detections = get_detections(preds) all_annotations = get_real_annotations(valid) if verbose: print('Detections length: {}'.format(len(all_detections))) print('Annotations length: {}'.format(len(all_annotations))) average_precisions = {} for zz, label in enumerate(sorted(unique_classes)): # Negative class if str(label) == 'nan': continue false_positives = [] true_positives = [] scores = [] num_annotations = 0.0 for i in range(len(ann_unique)): detections = [] annotations = [] id = ann_unique[i] if id in all_detections: if label in all_detections[id]: detections = all_detections[id][label] if id in all_annotations: if label in all_annotations[id]: annotations = all_annotations[id][label] if len(detections) == 0 and len(annotations) == 0: continue num_annotations += len(annotations) scr, fp, tp = check_if_true_or_false_positive( annotations, detections, iou_threshold) scores += scr false_positives += fp true_positives += tp if num_annotations == 0: average_precisions[label] = 0, 0 continue false_positives = np.array(false_positives) true_positives = np.array(true_positives) scores = np.array(scores) # sort by score indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] # compute false positives and true positives false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) # compute recall and precision recall = true_positives / num_annotations precision = true_positives / \ np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) # compute average precision average_precision = _compute_ap(recall, precision) average_precisions[label] = average_precision, num_annotations, precision, recall if verbose: s1 = "{:30s} | {:.6f} | {:7d}".format( label, average_precision, int(num_annotations)) print(s1) present_classes = 0 precision = 0 for label, (average_precision, num_annotations, _, _) in average_precisions.items(): if num_annotations > 0: present_classes += 1 precision += average_precision mean_ap = precision / present_classes if verbose: print('mAP: {:.6f}'.format(mean_ap)) return mean_ap, average_precisions def init_logger(log_file=os.path.join(OUTPUT_DIR, 'train.log')): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger LOGGER = init_logger() def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False seed_torch(seed=CFG.seed) def get_device() -> torch.device: return torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = get_device() # ================================================= # CV Split # # ================================================= folds = train.copy() Fold = model_selection.__getattribute__(CFG.split_name)(**CFG.split_params) for n, (train_index, valid_index) in enumerate(Fold.split(folds, folds[CFG.target_col])): folds.loc[valid_index, 'fold'] = int(n) folds['fold'] = folds['fold'].astype(int) print(folds.groupby(['fold', CFG.target_col]).size()) # ==================================================== # Transform # # ==================================================== def get_transforms(*, data): if data == 'train': return Compose([ Resize(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ToTensorV2(), ]) elif data == 'valid': return Compose([ Resize(CFG.size, CFG.size), ToTensorV2(), ]) # ==================================================== # Dataset # # ==================================================== class SiimDataset(Dataset): def __init__(self, df=None, transform=None): self.df = df self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): filepath = self.df.loc[idx, 'filepath'] image = cv2.imread(filepath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] label = torch.tensor(self.df.loc[idx, CFG.target_col]) return image.float(), label.float() # ==================================================== # Scheduler # # ==================================================== def get_scheduler(optimizer=None): if CFG.scheduler_name is None: return else: return optim.lr_scheduler.__getattribute__(CFG.scheduler_name)(optimizer, **CFG.scheduler_params) def scheduler_step(scheduler=None, avg_val_loss=None): if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(avg_val_loss) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() # ==================================================== # Criterion # # ==================================================== def get_criterion(): if hasattr(nn, CFG.loss_name): return nn.__getattribute__(CFG.loss_name)(**CFG.loss_params) else: raise NotImplementedError # ==================================================== # Optimizer # # ==================================================== def get_optimizer(model: nn.Module): if CFG.optimizer_name == 'SAM': base_optimizer = optim.__getattribute__(CFG.base_optimizer_name) return SAM(model.parameters(), base_optimizer, **CFG.optimizer_params) else: if hasattr(optim, CFG.optimizer_name): return optim.__getattribute__(CFG.optimizer_name)(model.parameters(), **CFG.optimizer_params) else: raise NotImplementedError # ==================================================== # Model # # ==================================================== class CustomEfficientNet(nn.Module): def __init__(self, model_name=CFG.model_name, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, CFG.target_size) def forward(self, x): x = self.model(x) return torch.squeeze(x) # ==================================================== # Helper functions # # ==================================================== class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (remain %s)' % (asMinutes(s), asMinutes(rs)) def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device): scaler = GradScaler(enabled=CFG.use_amp) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to train mode model.train() start = end = time.time() global_step = 0 for step, (images, labels) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with autocast(enabled=CFG.use_amp): y_preds = model(images) loss = criterion(y_preds, labels) # record loss losses.update(loss.item(), batch_size) if CFG.gradient_accumulation_steps > 1: loss = loss / CFG.gradient_accumulation_steps scaler.scale(loss).backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG.max_grad_norm) if (step + 1) % CFG.gradient_accumulation_steps == 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() global_step += 1 # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(train_loader) - 1): print('Epoch: [{0}][{1}/{2}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'Grad: {grad_norm:.4f} ' 'LR: {lr:.6f} ' .format(epoch + 1, step, len(train_loader), data_time=data_time, loss=losses, remain=timeSince(start, float(step + 1) / len(train_loader)), grad_norm=grad_norm, lr=scheduler.get_lr()[0], ) ) return losses.avg def valid_fn(valid_loader, model, criterion, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluation mode model.eval() preds = [] start = end = time.time() for step, (images, labels) in enumerate(valid_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) # compute loss with torch.no_grad(): y_preds = model(images) loss = criterion(y_preds, labels) losses.update(loss.item(), batch_size) # record accuracy preds.append(y_preds.sigmoid().to('cpu').numpy()) if CFG.gradient_accumulation_steps > 1: loss = loss / CFG.gradient_accumulation_steps # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(valid_loader) - 1): print('EVAL: [{0}/{1}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' .format(step, len(valid_loader), data_time=data_time, loss=losses, remain=timeSince(start, float(step + 1) / len(valid_loader)) ) ) predictions = np.concatenate(preds) return losses.avg, predictions # ==================================================== # Train loop # # ==================================================== def train_loop(folds, fold): LOGGER.info(f'========== fold: {fold} training ==========') # ==================================================== # loader # ==================================================== train_index = folds[folds['fold'] != fold].index valid_index = folds[folds['fold'] == fold].index train_folds = folds.loc[train_index].reset_index(drop=True) valid_folds = folds.loc[valid_index].reset_index(drop=True) train_dataset = SiimDataset(train_folds, transform=get_transforms(data='train')) valid_dataset = SiimDataset(valid_folds, transform=get_transforms(data='valid')) train_loader = DataLoader(train_dataset, **CFG.loader_params['train']) valid_loader = DataLoader(valid_dataset, **CFG.loader_params['valid']) # ==================================================== # model # # ==================================================== model = CustomEfficientNet(CFG.model_name, pretrained=CFG.pretrained) model.to(device) criterion = get_criterion() optimizer = get_optimizer(model) scheduler = get_scheduler(optimizer) # ==================================================== # loop # # ==================================================== best_score = -np.inf for epoch in range(CFG.epochs): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device) # eval avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device) scheduler_step(scheduler) # scoring valid_folds['preds'] = preds annotations = get_annotations(valid_folds) predictions = get_predictions(valid_folds, col='preds') mAP, AP = mean_average_precision_for_boxes( annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) score = AP['0'][0] elapsed = time.time() - start_time LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s') LOGGER.info(f'Epoch {epoch+1} - mAP: {score}') if score > best_score: best_score = score LOGGER.info(f'Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model') torch.save({'model': model.state_dict(), 'preds': preds}, os.path.join(OUTPUT_DIR, f'{CFG.model_name}_fold{fold}_best.pth') ) check_point = torch.load(os.path.join(OUTPUT_DIR, f'{CFG.model_name}_fold{fold}_best.pth')) valid_folds['preds'] = check_point['preds'] del model, optimizer, scheduler gc.collect() torch.cuda.empty_cache() return valid_folds # ==================================================== # main # # ==================================================== def main(): """ Prepare: 1.train 2.test 3.submission 4.folds """ if CFG.train: # train oof_df = pd.DataFrame() for fold in range(CFG.n_fold): if fold in CFG.trn_fold: _oof_df = train_loop(folds, fold) oof_df = pd.concat([oof_df, _oof_df]) LOGGER.info(f'========== fold: {fold} result ==========') annotations = get_annotations(_oof_df) predictions = get_predictions(_oof_df, col='preds') mAP, AP = mean_average_precision_for_boxes( annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) LOGGER.info(f"Class none: {AP["0"][0]:.4f}") # CV result if len(CFG.trn_fold) != 1: LOGGER.info('========== CV ==========') annotations = get_annotations(_oof_df) predictions = get_predictions(_oof_df, col='preds') mAP, AP = mean_average_precision_for_boxes(annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) LOGGER.info(f"Class none: {AP["0"][0]:.4f}") # save result oof_df.to_csv(os.path.join(OUTPUT_DIR, 'oof_df.csv'), index=False) if __name__ == '__main__': main()
# ==================================================== # Library # # ==================================================== import os import gc import sys import math import time import random import shutil from requests import get from pathlib import Path from contextlib import contextmanager from collections import defaultdict, Counter import scipy as sp import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.metrics import roc_auc_score from sklearn import model_selection from tqdm.auto import tqdm from functools import partial import cv2 from PIL import Image import torch import torch.nn as nn import torch.nn.functional as F from torch import optim import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau import albumentations as A from albumentations import ( Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip, RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout, IAAAdditiveGaussianNoise, Transpose) from albumentations.pytorch import ToTensorV2 from albumentations import ImageOnlyTransform if 'kaggle_web_client' in sys.modules: ROOT_DIR = '/kaggle/' else: ROOT_DIR = '/home/yuki/Kaggle-SIIM-FISABIO-RSNA' sys.path.append(os.path.join(ROOT_DIR, 'input/timm-pytorch-image-models/pytorch-image-models-master')) import timm sys.path.append(os.path.join(ROOT_DIR, 'input/pytorch-sam')) from sam import SAM from torch.cuda.amp import autocast, GradScaler import warnings warnings.filterwarnings('ignore') # ==================================================== # Directory settings # # ==================================================== if 'kaggle_web_client' in sys.modules: OUTPUT_DIR = os.path.join(ROOT_DIR, 'working/') else: name_code = os.path.splitext(os.path.basename(__file__))[0].split('-') OUTPUT_DIR = os.path.join(ROOT_DIR, 'output/', name_code[1], name_code[-1]) if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # ==================================================== # Config # # ==================================================== class CFG: ###################### # Globals # ###################### debug = False use_amp = False print_freq = 100 size = 384 epochs = 6 gradient_accumulation_steps = 1 max_grad_norm = 10000 seed = 42 target_col = 'detection_label' n_fold = 5 trn_fold = [0] train = True ###################### # Loaders # ###################### loader_params = { "train": { "batch_size": 32, "num_workers": 4, "shuffle": True, "pin_memory": True, "drop_last": True }, "valid": { "batch_size": 64, "num_workers": 4, "shuffle": False, "pin_memory": True, "drop_last": False }, "test": { "batch_size": 64, "num_workers": 4, "shuffle": False, "pin_memory": True, "drop_last": False } } ###################### # Split # ###################### split_name = "StratifiedKFold" split_params = { "n_splits": 5, "shuffle": True, "random_state": 42 } ###################### # Criterion # ###################### loss_name = "BCEWithLogitsLoss" loss_params: dict = {} ###################### # Optimizer # ###################### optimizer_name = "AdamW" optimizer_params = { "lr": 1e-4, "weight_decay": 1e-6, "amsgrad": False } # For SAM optimizer base_optimizer = "AdamW" ###################### # Scheduler # ###################### scheduler_name = 'CosineAnnealingLR' scheduler_params = { "T_max": 6, "eta_min": 1e-6, "last_epoch": -1 } ###################### # Model # ###################### model_name = "tf_efficientnet_b3_ns" pretrained = True target_size = 1 # ==================================================== # Data Loading # # ==================================================== def get_train_file_path(image_id): return os.path.join(ROOT_DIR, f"input/siim-covid19-resized-to-256px-jpg/train/{image_id}.jpg") def get_test_file_path(image_id): return os.path.join(ROOT_DIR, f"/input/siim-covid19-resized-to-256px-jpg/test/{image_id}.jpg") train = pd.read_csv(os.path.join(ROOT_DIR, 'input/siim-covid19-updated-train-labels/updated_train_labels.csv')) train['detection_label'] = train.apply(lambda row: 0 if row[[ 'xmin', 'ymin', 'xmax', 'ymax']].values.tolist() == [0, 0, 1, 1] else 1, axis=1) # この処理は重たく、しかもmAPに関係ないので省く # cols = ['xmin', 'ymin', 'xmax', 'ymax'] # for idx, (xmin, ymin, xmax, ymax, label) in enumerate(zip(train['frac_xmin'].to_numpy(), # train['frac_ymin'].to_numpy(), # train['frac_xmax'].to_numpy(), # train['frac_ymax'].to_numpy(), # train['detection_label'].to_numpy())): # if label == 0: # train.loc[idx, cols] = [0, 0, 1, 1] # else: # bbox = [xmin, ymin, xmax, ymax] # train.loc[idx, cols] = A.convert_bbox_from_albumentations( # bbox, 'pascal_voc', CFG.size, CFG.size) test = pd.read_csv(os.path.join(ROOT_DIR, 'input/siim-covid19-updated-train-labels/updated_sample_submission.csv')) train['filepath'] = train['id'].apply(get_train_file_path) test['filepath'] = test['id'].apply(get_test_file_path) if CFG.debug: CFG.epochs = 1 # train = train.sample(n=1000, random_state=CFG.seed).reset_index(drop=True) # ==================================================== # Utils # # ==================================================== def get_score(y_true, y_pred): score = roc_auc_score(y_true, y_pred) return score def get_result(result_df): preds = result_df['preds'].values labels = result_df[CFG.target_col].values score = get_score(labels, preds) LOGGER.info(f'Score: {score:<.5f}') def get_annotations(df): return df[['id', 'detection_label', 'xmin', 'ymin', 'xmax', 'ymax']] def get_predictions(df, col): df_ = df.copy() df_ = df_[['id', col]] df_ = df_.rename(columns={col: 'conf'}) df_['conf'] = df_['conf'].apply(lambda x: 1 - x) # df_['detection_label'] = df_['conf'].apply(lambda x: '0' if x > 0.5 else '1') df_bbox = pd.DataFrame({ 'detection_label': ['0'] * len(df_), 'xmin': [0] * len(df_), 'ymin': [0] * len(df_), 'xmax': [1] * len(df_), 'ymax': [1] * len(df_), }) df_ = pd.concat([df_, df_bbox], axis=1) return df_ def compute_overlap(boxes, query_boxes): """ Args boxes: (N, 4) ndarray of float query_boxes: (4) ndarray of float Returns overlaps: (N) ndarray of overlap between boxes and query_boxes """ N = boxes.shape[0] overlaps = np.zeros((N), dtype=np.float64) box_area = ( (query_boxes[2] - query_boxes[0]) * (query_boxes[3] - query_boxes[1]) ) for n in range(N): iw = ( min(boxes[n, 2], query_boxes[2]) - max(boxes[n, 0], query_boxes[0]) ) if iw > 0: ih = ( min(boxes[n, 3], query_boxes[3]) - max(boxes[n, 1], query_boxes[1]) ) if ih > 0: ua = np.float64( (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1]) + box_area - iw * ih ) overlaps[n] = iw * ih / ua return overlaps def check_if_true_or_false_positive(annotations, detections, iou_threshold): annotations = np.array(annotations, dtype=np.float64) scores = [] false_positives = [] true_positives = [] # a GT box should be mapped only one predicted box at most. detected_annotations = [] for d in detections: scores.append(d[4]) if len(annotations) == 0: false_positives.append(1) true_positives.append(0) continue overlaps = compute_overlap(annotations, d[:4]) assigned_annotation = np.argmax(overlaps) max_overlap = overlaps[assigned_annotation] if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: false_positives.append(0) true_positives.append(1) detected_annotations.append(assigned_annotation) else: false_positives.append(1) true_positives.append(0) return scores, false_positives, true_positives def _compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves. Code originally from https://github.com/rbgirshick/py-faster-rcnn. # Arguments recall: The recall curve (list). precision: The precision curve (list). # Returns The average precision as computed in py-faster-rcnn. """ # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], recall, [1.])) mpre = np.concatenate(([0.], precision, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap def get_real_annotations(table): res = dict() ids = table['id'].values.astype(np.str) labels = table['detection_label'].values.astype(np.str) xmin = table['xmin'].values.astype(np.float32) xmax = table['xmax'].values.astype(np.float32) ymin = table['ymin'].values.astype(np.float32) ymax = table['ymax'].values.astype(np.float32) for i in range(len(ids)): id = ids[i] label = labels[i] if id not in res: res[id] = dict() if label not in res[id]: res[id][label] = [] box = [xmin[i], ymin[i], xmax[i], ymax[i]] res[id][label].append(box) return res def get_detections(table): res = dict() ids = table['id'].values.astype(np.str) labels = table['detection_label'].values.astype(np.str) scores = table['conf'].values.astype(np.float32) xmin = table['xmin'].values.astype(np.float32) xmax = table['xmax'].values.astype(np.float32) ymin = table['ymin'].values.astype(np.float32) ymax = table['ymax'].values.astype(np.float32) for i in range(len(ids)): id = ids[i] label = labels[i] if id not in res: res[id] = dict() if label not in res[id]: res[id][label] = [] box = [xmin[i], ymin[i], xmax[i], ymax[i], scores[i]] res[id][label].append(box) return res def mean_average_precision_for_boxes(ann, pred, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=True): """ :param ann: path to CSV-file with annotations or numpy array of shape (N, 6) :param pred: path to CSV-file with predictions (detections) or numpy array of shape (N, 7) :param iou_threshold: IoU between boxes which count as 'match'. Default: 0.5 :param exclude_not_in_annotations: exclude image IDs which are not exist in annotations. Default: False :param verbose: print detailed run info. Default: True :return: tuple, where first value is mAP and second values is dict with AP for each class. """ valid = pd.DataFrame( ann, columns=['id', 'detection_label', 'xmin', 'ymin', 'xmax', 'ymax']) preds = pd.DataFrame( pred, columns=['id', 'detection_label', 'conf', 'xmin', 'ymin', 'xmax', 'ymax']) ann_unique = valid['id'].unique() preds_unique = preds['id'].unique() if verbose: print('Number of files in annotations: {}'.format(len(ann_unique))) print('Number of files in predictions: {}'.format(len(preds_unique))) # Exclude files not in annotations! if exclude_not_in_annotations: preds = preds[preds['id'].isin(ann_unique)] preds_unique = preds['id'].unique() if verbose: print('Number of files in detection after reduction: {}'.format( len(preds_unique))) unique_classes = valid['detection_label'].unique().astype(np.str) if verbose: print('Unique classes: {}'.format(len(unique_classes))) all_detections = get_detections(preds) all_annotations = get_real_annotations(valid) if verbose: print('Detections length: {}'.format(len(all_detections))) print('Annotations length: {}'.format(len(all_annotations))) average_precisions = {} for zz, label in enumerate(sorted(unique_classes)): # Negative class if str(label) == 'nan': continue false_positives = [] true_positives = [] scores = [] num_annotations = 0.0 for i in range(len(ann_unique)): detections = [] annotations = [] id = ann_unique[i] if id in all_detections: if label in all_detections[id]: detections = all_detections[id][label] if id in all_annotations: if label in all_annotations[id]: annotations = all_annotations[id][label] if len(detections) == 0 and len(annotations) == 0: continue num_annotations += len(annotations) scr, fp, tp = check_if_true_or_false_positive( annotations, detections, iou_threshold) scores += scr false_positives += fp true_positives += tp if num_annotations == 0: average_precisions[label] = 0, 0 continue false_positives = np.array(false_positives) true_positives = np.array(true_positives) scores = np.array(scores) # sort by score indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] # compute false positives and true positives false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) # compute recall and precision recall = true_positives / num_annotations precision = true_positives / \ np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) # compute average precision average_precision = _compute_ap(recall, precision) average_precisions[label] = average_precision, num_annotations, precision, recall if verbose: s1 = "{:30s} | {:.6f} | {:7d}".format( label, average_precision, int(num_annotations)) print(s1) present_classes = 0 precision = 0 for label, (average_precision, num_annotations, _, _) in average_precisions.items(): if num_annotations > 0: present_classes += 1 precision += average_precision mean_ap = precision / present_classes if verbose: print('mAP: {:.6f}'.format(mean_ap)) return mean_ap, average_precisions def init_logger(log_file=os.path.join(OUTPUT_DIR, 'train.log')): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger LOGGER = init_logger() def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False seed_torch(seed=CFG.seed) def get_device() -> torch.device: return torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = get_device() # ================================================= # CV Split # # ================================================= folds = train.copy() Fold = model_selection.__getattribute__(CFG.split_name)(**CFG.split_params) for n, (train_index, valid_index) in enumerate(Fold.split(folds, folds[CFG.target_col])): folds.loc[valid_index, 'fold'] = int(n) folds['fold'] = folds['fold'].astype(int) print(folds.groupby(['fold', CFG.target_col]).size()) # ==================================================== # Transform # # ==================================================== def get_transforms(*, data): if data == 'train': return Compose([ Resize(CFG.size, CFG.size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ToTensorV2(), ]) elif data == 'valid': return Compose([ Resize(CFG.size, CFG.size), ToTensorV2(), ]) # ==================================================== # Dataset # # ==================================================== class SiimDataset(Dataset): def __init__(self, df=None, transform=None): self.df = df self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): filepath = self.df.loc[idx, 'filepath'] image = cv2.imread(filepath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: image = self.transform(image=image)['image'] label = torch.tensor(self.df.loc[idx, CFG.target_col]) return image.float(), label.float() # ==================================================== # Scheduler # # ==================================================== def get_scheduler(optimizer=None): if CFG.scheduler_name is None: return else: return optim.lr_scheduler.__getattribute__(CFG.scheduler_name)(optimizer, **CFG.scheduler_params) def scheduler_step(scheduler=None, avg_val_loss=None): if isinstance(scheduler, ReduceLROnPlateau): scheduler.step(avg_val_loss) elif isinstance(scheduler, CosineAnnealingLR): scheduler.step() elif isinstance(scheduler, CosineAnnealingWarmRestarts): scheduler.step() # ==================================================== # Criterion # # ==================================================== def get_criterion(): if hasattr(nn, CFG.loss_name): return nn.__getattribute__(CFG.loss_name)(**CFG.loss_params) else: raise NotImplementedError # ==================================================== # Optimizer # # ==================================================== def get_optimizer(model: nn.Module): if CFG.optimizer_name == 'SAM': base_optimizer = optim.__getattribute__(CFG.base_optimizer_name) return SAM(model.parameters(), base_optimizer, **CFG.optimizer_params) else: if hasattr(optim, CFG.optimizer_name): return optim.__getattribute__(CFG.optimizer_name)(model.parameters(), **CFG.optimizer_params) else: raise NotImplementedError # ==================================================== # Model # # ==================================================== class CustomEfficientNet(nn.Module): def __init__(self, model_name=CFG.model_name, pretrained=False): super().__init__() self.model = timm.create_model(model_name, pretrained=pretrained) n_features = self.model.classifier.in_features self.model.classifier = nn.Linear(n_features, CFG.target_size) def forward(self, x): x = self.model(x) return torch.squeeze(x) # ==================================================== # Helper functions # # ==================================================== class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (remain %s)' % (asMinutes(s), asMinutes(rs)) def train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device): scaler = GradScaler(enabled=CFG.use_amp) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to train mode model.train() start = end = time.time() global_step = 0 for step, (images, labels) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) with autocast(enabled=CFG.use_amp): y_preds = model(images) loss = criterion(y_preds, labels) # record loss losses.update(loss.item(), batch_size) if CFG.gradient_accumulation_steps > 1: loss = loss / CFG.gradient_accumulation_steps scaler.scale(loss).backward() grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CFG.max_grad_norm) if (step + 1) % CFG.gradient_accumulation_steps == 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() global_step += 1 # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(train_loader) - 1): print('Epoch: [{0}][{1}/{2}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' 'Grad: {grad_norm:.4f} ' 'LR: {lr:.6f} ' .format(epoch + 1, step, len(train_loader), data_time=data_time, loss=losses, remain=timeSince(start, float(step + 1) / len(train_loader)), grad_norm=grad_norm, lr=scheduler.get_lr()[0], ) ) return losses.avg def valid_fn(valid_loader, model, criterion, device): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() # switch to evaluation mode model.eval() preds = [] start = end = time.time() for step, (images, labels) in enumerate(valid_loader): # measure data loading time data_time.update(time.time() - end) images = images.to(device) labels = labels.to(device) batch_size = labels.size(0) # compute loss with torch.no_grad(): y_preds = model(images) loss = criterion(y_preds, labels) losses.update(loss.item(), batch_size) # record accuracy preds.append(y_preds.sigmoid().to('cpu').numpy()) if CFG.gradient_accumulation_steps > 1: loss = loss / CFG.gradient_accumulation_steps # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % CFG.print_freq == 0 or step == (len(valid_loader) - 1): print('EVAL: [{0}/{1}] ' 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' 'Elapsed {remain:s} ' 'Loss: {loss.val:.4f}({loss.avg:.4f}) ' .format(step, len(valid_loader), data_time=data_time, loss=losses, remain=timeSince(start, float(step + 1) / len(valid_loader)) ) ) predictions = np.concatenate(preds) return losses.avg, predictions # ==================================================== # Train loop # # ==================================================== def train_loop(folds, fold): LOGGER.info(f'========== fold: {fold} training ==========') # ==================================================== # loader # ==================================================== train_index = folds[folds['fold'] != fold].index valid_index = folds[folds['fold'] == fold].index train_folds = folds.loc[train_index].reset_index(drop=True) valid_folds = folds.loc[valid_index].reset_index(drop=True) train_dataset = SiimDataset(train_folds, transform=get_transforms(data='train')) valid_dataset = SiimDataset(valid_folds, transform=get_transforms(data='valid')) train_loader = DataLoader(train_dataset, **CFG.loader_params['train']) valid_loader = DataLoader(valid_dataset, **CFG.loader_params['valid']) # ==================================================== # model # # ==================================================== model = CustomEfficientNet(CFG.model_name, pretrained=CFG.pretrained) model.to(device) criterion = get_criterion() optimizer = get_optimizer(model) scheduler = get_scheduler(optimizer) # ==================================================== # loop # # ==================================================== best_score = -np.inf for epoch in range(CFG.epochs): start_time = time.time() # train avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device) # eval avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device) scheduler_step(scheduler) # scoring valid_folds['preds'] = preds annotations = get_annotations(valid_folds) predictions = get_predictions(valid_folds, col='preds') mAP, AP = mean_average_precision_for_boxes( annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) score = AP['0'][0] elapsed = time.time() - start_time LOGGER.info(f'Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s') LOGGER.info(f'Epoch {epoch+1} - mAP: {score}') if score > best_score: best_score = score LOGGER.info(f'Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model') torch.save({'model': model.state_dict(), 'preds': preds}, os.path.join(OUTPUT_DIR, f'{CFG.model_name}_fold{fold}_best.pth') ) check_point = torch.load(os.path.join(OUTPUT_DIR, f'{CFG.model_name}_fold{fold}_best.pth')) valid_folds['preds'] = check_point['preds'] del model, optimizer, scheduler gc.collect() torch.cuda.empty_cache() return valid_folds # ==================================================== # main # # ==================================================== def main(): """ Prepare: 1.train 2.test 3.submission 4.folds """ if CFG.train: # train oof_df = pd.DataFrame() for fold in range(CFG.n_fold): if fold in CFG.trn_fold: _oof_df = train_loop(folds, fold) oof_df = pd.concat([oof_df, _oof_df]) LOGGER.info(f'========== fold: {fold} result ==========') annotations = get_annotations(_oof_df) predictions = get_predictions(_oof_df, col='preds') mAP, AP = mean_average_precision_for_boxes( annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) LOGGER.info(f"Class none: {AP['0'][0]:.4f}") # CV result if len(CFG.trn_fold) != 1: LOGGER.info('========== CV ==========') annotations = get_annotations(_oof_df) predictions = get_predictions(_oof_df, col='preds') mAP, AP = mean_average_precision_for_boxes(annotations, predictions, iou_threshold=0.5, exclude_not_in_annotations=False, verbose=False) LOGGER.info(f"Class none: {AP['0'][0]:.4f}") # save result oof_df.to_csv(os.path.join(OUTPUT_DIR, 'oof_df.csv'), index=False) if __name__ == '__main__': main()
from tap_slack.streams.base import BaseStream import singer import datetime import time LOGGER = singer.get_logger() # noqa class AccessLogsStream(BaseStream): API_METHOD = 'team_accessLogs' TABLE = 'access_logs' KEY_PROPERTIES = ['user_id', 'ip', 'user_agent'] TIMEOUT = 3 def response_key(self): return 'logins' def get_params(self): return { "count": 100, } def transform_record(self, record): transformed = super().transform_record(record) # Anonymize IPs by dropping the last octet if self.config.get('anonymize_ips', True): ip = transformed.get('ip', '') parts = ip.split('.') parts[-1] = 'x' transformed['ip'] = '.'.join(parts) return transformed def sync_paginated(self, params): table = self.TABLE params = { "count": 1000, "page": 1, "before": "now", } oldest, latest = self.get_lookback() stop_at_timestamp = oldest # This endpoint unfortunately doesn't not allow us to query _forward_ in time, # only backwards. So, we must just start at today and work our way backwards # until we reach some stopping point. This endpoint orders access logs by date_first, # which is the _first_ time that a user was seen at a given ip/user-agent. Since we're # looking backwards in time, there might be some very old records whose date_last is # is continually updated, but we unfortunately are not able to capture this informatation # with the current capabilities of the Slack API. When using the data replicated by this # stream, be sure to remember that the date_first will be accurate but the date_last will # be misleading an incorrect. For perfect information, set the "lookback_days" value to some # very large number of days to capture all historical access log information while True: response = self.client.make_request(self.API_METHOD, params, self.TIMEOUT) transformed = self.get_stream_data(response) if len(transformed) == 0: LOGGER.info(f"No more data available - stopping at {params["before"]}") break else: min_date_first = transformed[-1]['date_first'] with singer.metrics.record_counter(endpoint=table) as counter: singer.write_records(table, transformed) counter.increment(len(transformed)) params['before'] = min_date_first remaining = response.get('paging', {}).get('pages') LOGGER.info(f"{remaining} pages remaining") if min_date_first < stop_at_timestamp: LOGGER.info(f"Exceeded stop_at_timestamp - stopping at {params["before"]}") break else: self.log_progress(oldest, min_date_first)
from tap_slack.streams.base import BaseStream import singer import datetime import time LOGGER = singer.get_logger() # noqa class AccessLogsStream(BaseStream): API_METHOD = 'team_accessLogs' TABLE = 'access_logs' KEY_PROPERTIES = ['user_id', 'ip', 'user_agent'] TIMEOUT = 3 def response_key(self): return 'logins' def get_params(self): return { "count": 100, } def transform_record(self, record): transformed = super().transform_record(record) # Anonymize IPs by dropping the last octet if self.config.get('anonymize_ips', True): ip = transformed.get('ip', '') parts = ip.split('.') parts[-1] = 'x' transformed['ip'] = '.'.join(parts) return transformed def sync_paginated(self, params): table = self.TABLE params = { "count": 1000, "page": 1, "before": "now", } oldest, latest = self.get_lookback() stop_at_timestamp = oldest # This endpoint unfortunately doesn't not allow us to query _forward_ in time, # only backwards. So, we must just start at today and work our way backwards # until we reach some stopping point. This endpoint orders access logs by date_first, # which is the _first_ time that a user was seen at a given ip/user-agent. Since we're # looking backwards in time, there might be some very old records whose date_last is # is continually updated, but we unfortunately are not able to capture this informatation # with the current capabilities of the Slack API. When using the data replicated by this # stream, be sure to remember that the date_first will be accurate but the date_last will # be misleading an incorrect. For perfect information, set the "lookback_days" value to some # very large number of days to capture all historical access log information while True: response = self.client.make_request(self.API_METHOD, params, self.TIMEOUT) transformed = self.get_stream_data(response) if len(transformed) == 0: LOGGER.info(f"No more data available - stopping at {params['before']}") break else: min_date_first = transformed[-1]['date_first'] with singer.metrics.record_counter(endpoint=table) as counter: singer.write_records(table, transformed) counter.increment(len(transformed)) params['before'] = min_date_first remaining = response.get('paging', {}).get('pages') LOGGER.info(f"{remaining} pages remaining") if min_date_first < stop_at_timestamp: LOGGER.info(f"Exceeded stop_at_timestamp - stopping at {params['before']}") break else: self.log_progress(oldest, min_date_first)
__version__ = '1.0.0' import csv import datetime import importlib from abc import abstractmethod from collections import defaultdict from typing import Dict, List, Set import numpy as np import pandas as pd from dateutil import relativedelta as rdelta import logging from functools import partial import calendar from openpyxl import Workbook from scipy.interpolate import interp1d import json from typing import Callable __author__ = 'schien' # import pkg_resources # part of setuptools # version = pkg_resources.require("excel-modelling-helper")[0].version param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name', 'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b', 'param 3': 'param_c', 'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '', 'source': ''} param_name_map_v2 = {'CAGR': 'cagr', 'comment': '', 'label': '', 'mean growth': 'growth_factor', 'param': '', 'ref date': 'ref_date', 'ref value': '', 'scenario': 'source_scenarios_string', 'source': '', 'tags': '', 'type': '', 'unit': '', 'variability growth': 'ef_growth_factor', 'initial_value_proportional_variation': '', 'variable': 'name'} param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2} # logger.basicConfig(level=logger.DEBUG) logger = logging.getLogger(__name__) class DistributionFunctionGenerator(object): module: str distribution: str param_a: str param_b: str param_c: str def __init__(self, module_name=None, distribution_name=None, param_a: float = None, param_b: float = None, param_c: float = None, size=None, **kwargs): """ Instantiate a new object. :param module_name: :param distribution_name: :param param_a: :param param_b: :param param_c: :param size: :param kwargs: can contain key "sample_mean_value" with bool value """ self.kwargs = kwargs self.size = size self.module_name = module_name self.distribution_name = distribution_name self.sample_mean_value = kwargs.get('sample_mean_value', False) # prepare function arguments if distribution_name == 'choice': if type(param_a) == str: tokens = param_a.split(',') params = [float(token.strip()) for token in tokens] self.random_function_params = [np.array(params, dtype=np.float)] else: self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)] logger.debug(f'setting function params for choice distribution {self.random_function_params}') else: self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]] def get_mean(self, distribution_function): """Get the mean value for a distribution. If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted. Else, the distribution is instantiated and then the mean is being calculated. :param distribution_function: :return: the mean as a scalar """ name = self.distribution_name params = self.random_function_params if name == 'normal': return params[0] if name == 'uniform': return (params[0] + params[1]) / 2. if name == 'choice': return params[0].mean() if name == 'triangular': return (params[0] + params[1] + params[2]) / 3. return distribution_function().mean() def generate_values(self, *args, **kwargs): """ Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg. If `self.sample_mean_value == True` the sample will contain "size" times the mean value. :param args: :param kwargs: :return: sample as vector of given size """ sample_size = kwargs.get('size', self.size) f = self.instantiate_distribution_function(self.module_name, self.distribution_name) distribution_function = partial(f, *self.random_function_params, size=sample_size) if self.sample_mean_value: sample = np.full(sample_size, self.get_mean(distribution_function)) else: sample = distribution_function() return sample @staticmethod def instantiate_distribution_function(module_name, distribution_name): module = importlib.import_module(module_name) func = getattr(module, distribution_name) return func class Parameter(object): """ A single parameter """ version: int name: str unit: str comment: str source: str scenario: str processes: Dict[str, List] "optional comma-separated list of tags" tags: str def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None, comment: str = None, source: str = None, version=None, **kwargs): self.version = version # The source definition of scenarios. A comma-separated list self.source = source self.comment = comment self.unit = unit self.source_scenarios_string = source_scenarios_string self.tags = tags self.name = name self.scenario = None self.cache = None # track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter self.processes = defaultdict(list) self.kwargs = kwargs def __call__(self, settings=None, *args, **kwargs): """ Samples from a parameter. Values are cached and returns the same value every time called. @todo confusing interface that accepts 'settings' and kwargs at the same time. worse- 'use_time_series' must be present in the settings dict :param args: :param kwargs: :return: """ if self.cache is None: kwargs['name'] = self.name kwargs['unit'] = self.unit kwargs['tags'] = self.tags kwargs['scenario'] = self.scenario if not settings: settings = {} common_args = {'size': settings.get('sample_size', 1), 'sample_mean_value': settings.get('sample_mean_value', False), 'with_pint_units': settings.get('with_pint_units', False) } common_args.update(**self.kwargs) if settings.get('use_time_series', False): if self.version == 2: generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times']) else: generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args, times=settings['times']) else: generator = DistributionFunctionGenerator(**common_args) self.cache = generator.generate_values(*args, **kwargs) return self.cache def add_usage(self, process_name, variable_name): # add the name of a variable of a process model that is backed by this parameter self.processes[process_name].append(variable_name) class GrowthTimeSeriesGenerator(DistributionFunctionGenerator): ref_date: str # of the mean values # the type of growth ['exp'] # growth_function_type: str # of the error function variance: str # error function growth rate ef_growth_factor: str def __init__(self, times=None, size=None, index_names=None, ref_date=None, with_pint_units=False, *args, **kwargs): super().__init__(*args, **kwargs) self.ref_date = ref_date if ref_date else None self.with_pint_units = with_pint_units if self.with_pint_units: import pint self.times = times self.size = size iterables = [times, range(0, size)] self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names) assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency' def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ assert 'ref value' in self.kwargs # 1. Generate $\mu$ start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() ref_date = self.ref_date if not ref_date: raise Exception(f"Ref date not set for variable {kwargs["name"]}") mu = self.generate_mu(end_date, ref_date, start_date) # 3. Generate $\sigma$ # Prepare array with growth values $\sigma$ if self.sample_mean_value: sigma = np.zeros((len(self.times), self.size)) else: if self.kwargs['type'] == 'interp': def get_date(record): return datetime.datetime.strptime(record[0], "%Y-%m-%d") ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date) intial_value = ref_value_[0][1] else: intial_value = float(self.kwargs['ref value']) variability_ = intial_value * self.kwargs['initial_value_proportional_variation'] logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}') sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size)) # logger.debug(ref_date.strftime("%b %d %Y")) # 4. Prepare growth array for $\alpha_{sigma}$ alpha_sigma = growth_coefficients(start_date, end_date, ref_date, self.kwargs['ef_growth_factor'], 1) # 5. Prepare DataFrame iterables = [self.times, range(self.size)] index_names = ['time', 'samples'] _multi_index = pd.MultiIndex.from_product(iterables, names=index_names) # logger.debug(start_date) # logger.debug(end_date) from dateutil import relativedelta r = relativedelta.relativedelta(end_date, start_date) months = r.years * 12 + r.months + 1 name = kwargs['name'] # Apply growth to $\sigma$ and add $\sigma$ to $\mu$ # logger.debug(sigma.size) # logger.debug(alpha_sigma.shape) # logger.debug(months) if self.with_pint_units: unit_ = kwargs["unit"] if not unit_: unit_ = 'dimensionless' dtype = f'pint[{unit_}]' else: dtype = 'float64' series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index, dtype=dtype) # test if df has sub-zero values df_sigma__dropna = series.where(series < 0).dropna() if self.with_pint_units: _values = df_sigma__dropna.pint.m else: _values = df_sigma__dropna if not _values.empty: logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}") return series def generate_mu(self, end_date, ref_date, start_date): if self.kwargs['type'] == 'exp': mu_bar = np.full(len(self.times), float(self.kwargs['ref value'])) # 2. Apply Growth to Mean Values $\alpha_{mu}$ alpha_mu = growth_coefficients(start_date, end_date, ref_date, self.kwargs['growth_factor'], 1) mu = mu_bar * alpha_mu.ravel() mu = mu.reshape(len(self.times), 1) return mu if self.kwargs['type'] == 'interp': def toTimestamp(d): return calendar.timegm(d.timetuple()) def interpolate(growth_config: Dict[str, float], date_range, kind='linear'): arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in growth_config.keys()]) arr2 = np.array([val for val in growth_config.values()]) f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate') return f([toTimestamp(date_val) for date_val in date_range]) ref_value_ = json.loads(self.kwargs['ref value'].strip()) return interpolate(ref_value_, self.times, self.kwargs['param']) class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator): cagr: str ref_date: str def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, with_pint_units=False, *args, **kwargs): super().__init__(*args, **kwargs) self.cagr = cagr if cagr else 0 self.ref_date = ref_date if ref_date else None self.with_pint_units = with_pint_units if self.with_pint_units: import pint self.times = times self.size = size iterables = [times, range(0, size)] self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names) assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency' def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,)) alpha = self.cagr # @todo - fill to cover the entire time: define rules for filling first ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime() # assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.' # assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.' start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size) x = a.ravel() values = np.multiply(values, x) # df = pd.DataFrame(values) # df.columns = [kwargs['name']] # df.set_index(self._multi_index, inplace=True) # # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series # data_series = df.iloc[:, 0] # data_series._metadata = kwargs # data_series.index.rename(['time', 'samples'], inplace=True) # if self.with_pint_units: if not kwargs["unit"]: dtype = 'pint[dimensionless]' else: dtype = f'pint[{kwargs['unit']}]' else: dtype = 'float64' series = pd.Series(values, index=self._multi_index, dtype=dtype) return series def growth_coefficients(start_date, end_date, ref_date, alpha, samples): """ Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0). a growth rate alpha t0 start date t' end date y' output y0 start value """ start_offset = 0 if ref_date < start_date: offset_delta = rdelta.relativedelta(start_date, ref_date) start_offset = offset_delta.months + 12 * offset_delta.years start_date = ref_date end_offset = 0 if ref_date > end_date: offset_delta = rdelta.relativedelta(ref_date, end_date) end_offset = offset_delta.months + 12 * offset_delta.years end_date = ref_date delta_ar = rdelta.relativedelta(ref_date, start_date) ar = delta_ar.months + 12 * delta_ar.years delta_br = rdelta.relativedelta(end_date, ref_date) br = delta_br.months + 12 * delta_br.years # we place the ref point on the lower interval (delta_ar + 1) but let it start from 0 # in turn we let the upper interval start from 1 g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float) h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float) g = np.flipud(g) # now join the two arrays a = np.vstack((g, h)) if start_offset > 0: a = a[start_offset:] if end_offset > 0: a = a[:-end_offset] return a class ParameterScenarioSet(object): """ The set of all version of a parameter for all the scenarios. """ default_scenario = 'default' "the name of the parameters in this set" parameter_name: str scenarios: Dict[str, Parameter] def __init__(self): self.scenarios = {} def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario): """ Add a scenario for this parameter. :param scenario_name: :param parameter: :return: """ self.scenarios[scenario_name] = parameter def __getitem__(self, item): return self.scenarios.__getitem__(item) def __setitem__(self, key, value): return self.scenarios.__setitem__(key, value) class ParameterRepository(object): """ Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses). The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will create new samples. Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet. """ parameter_sets: Dict[str, ParameterScenarioSet] tags: Dict[str, Dict[str, Set[Parameter]]] def __init__(self): self.parameter_sets = defaultdict(ParameterScenarioSet) self.tags = defaultdict(lambda: defaultdict(set)) def add_all(self, parameters: List[Parameter]): for p in parameters: self.add_parameter(p) def clear_cache(self): for p_sets in self.parameter_sets.values(): for param_name, param in p_sets.scenarios.items(): param.cache = None def add_parameter(self, parameter: Parameter): """ A parameter can have several scenarios. They are specified as a comma-separated list in a string. :param parameter: :return: """ # try reading the scenarios from the function arg or from the parameter attribute scenario_string = parameter.source_scenarios_string if scenario_string: _scenarios = [i.strip() for i in scenario_string.split(',')] self.fill_missing_attributes_from_default_parameter(parameter) else: _scenarios = [ParameterScenarioSet.default_scenario] for scenario in _scenarios: parameter.scenario = scenario self.parameter_sets[parameter.name][scenario] = parameter # record all tags for this parameter if parameter.tags: _tags = [i.strip() for i in parameter.tags.split(',')] for tag in _tags: self.tags[tag][parameter.name].add(parameter) def fill_missing_attributes_from_default_parameter(self, param): """ Empty fields in Parameter definitions in scenarios are populated with default values. E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar. +----------+----------+-----+--------+------------+ | name | scenario | val | tags | source | +----------+----------+-----+--------+------------+ | Power_TV | | 60 | UD, TV | EnergyStar | | Power_TV | 8K | 85 | new_tag| | +----------+----------+-----+--------+------------+ **Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten with the default value. :param param: :return: """ if not self.exists(param.name) or ParameterScenarioSet.default_scenario not in self.parameter_sets[ param.name].scenarios.keys(): logger.warning( f'No default value for param {param.name} found.') return default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario] for att_name, att_value in default.__dict__.items(): if att_name in ['unit', 'label', 'comment', 'source', 'tags']: if att_name == 'tags' and default.tags != param.tags: logger.warning( f'For param {param.name} for scenarios {param.source_scenarios_string}, ' f'tags is different from default parameter tags. Overwriting with default values.') setattr(param, att_name, att_value) if not getattr(param, att_name): logger.debug( f'For param {param.name} for scenarios {param.source_scenarios_string}, ' f'populating attribute {att_name} with value {att_value} from default parameter.') setattr(param, att_name, att_value) def __getitem__(self, item) -> Parameter: """ Return the default scenario parameter for a given variable name :param item: the name of the variable :return: """ return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario) def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter: if self.exists(param_name, scenario=scenario_name): return self.parameter_sets[param_name][scenario_name] try: return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario] except KeyError: raise KeyError(f"{param_name} not found") def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]: """ Get all registered dicts that are registered for a tag :param tag: str - single tag :return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for all parameter names with a given tag """ return self.tags[tag] def exists(self, param, scenario=None) -> bool: # if scenario is not None: # return present = param in self.parameter_sets.keys() if not present: return False scenario = scenario if scenario else ParameterScenarioSet.default_scenario return scenario in self.parameter_sets[param].scenarios.keys() def list_scenarios(self, param): if param in self.parameter_sets.keys(): return self.parameter_sets[param].scenarios.keys() class TableHandler(object): version: int def __init__(self, version=2): self.version = version @abstractmethod def load_definitions(self, sheet_name, filename=None, id_flag=False): raise NotImplementedError() class Xlsx2CsvHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): from xlsx2csv import Xlsx2csv data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0) definitions = [] _sheet_names = [sheet_name] if sheet_name else [data.keys()] for _sheet_name in _sheet_names: sheet = data[_sheet_name] header = sheet.header if header[0] != 'variable': continue for row in sheet.rows: values = {} for key, cell in zip(header, row): values[key] = cell definitions.append(values) return definitions class DictReaderStrip(csv.DictReader): @property def fieldnames(self): if self._fieldnames is None: # Initialize self._fieldnames # Note: DictReader is an old-style class, so can't use super() csv.DictReader.fieldnames.fget(self) if self._fieldnames is not None: self._fieldnames = [name.strip() for name in self._fieldnames] return self._fieldnames class CSVHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): reader = DictReaderStrip(open(filename, encoding='utf-8-sig'), delimiter=',') definitions = [] _definition_tracking = defaultdict(dict) for i, row in enumerate(reader): values = {k: v.strip() for k, v in row.items()} if not values['variable']: logger.debug(f'ignoring row {i}: {row[0].value}') continue number_columns = [] if self.version == 2: number_columns = ['ref value', 'initial_value_proportional_variation', 'mean growth', 'variability growth'] if self.version == 1: number_columns = ['param 1', 'param 2', 'param 3', 'CAGR'] for key in number_columns: try: if key in values: # guard against empty strings new_val = float(values.get(key, 0) or 0) values[key] = new_val except: if 'type' in values and values['type'] == 'interp': # this is a json array ... @todo can we have more validation on these strings? continue else: raise Exception( f'Could not convert value <{values[key]}> for key {key} to number in row {i} for variable {values['variable']}') if 'ref date' in values and values['ref date']: if isinstance(values['ref date'], str): values['ref date'] = datetime.datetime.strptime(values['ref date'], '%d/%m/%Y') if values['ref date'].day != 1: logger.warning( f'ref date truncated to first of month for variable {values['variable']}') values['ref date'] = values['ref date'].replace(day=1) else: raise Exception( f"{values["ref date"]} for variable {values["variable"]} is not a date - " f"check spreadsheet value is a valid day of a month") logger.debug(f'values for {values['variable']}: {values}') definitions.append(values) scenario = values['scenario'] if values['scenario'] else "n/a" if scenario in _definition_tracking[values['variable']]: logger.error( f"Duplicate entry for parameter " f"with name <{values["variable"]}> and <{scenario}> scenario in file") raise ValueError( f"Duplicate entry for parameter " f"with name <{values["variable"]}> and <{scenario}> scenario in file") else: _definition_tracking[values['variable']][scenario] = 1 return definitions class PandasCSVHandler(TableHandler): def strip(self, text): try: return text.strip() except AttributeError: return text def load_definitions(self, sheet_name, filename=None, id_flag=False): self.version = 2 import pandas as pd df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'], dtype={'initial_value_proportional_variation': 'float64'}, dayfirst=True, # date_parser=l0ambda x: pd.datetime.strptime(x, '%d-%m-%Y') ) df = df.dropna(subset=['variable', 'ref value']) df.fillna("", inplace=True) return df.to_dict(orient='records') class OpenpyxlTableHandler(TableHandler): version: int def __init__(self, version=2): super().__init__(version=version) self.highest_id = -1 self.id_map = defaultdict(lambda: defaultdict(dict)) self.id_column = None @staticmethod def get_sheet_range_bounds(filename, sheet_name): import openpyxl wb = openpyxl.load_workbook(filename) sheet = wb[sheet_name] rows = list(sheet.iter_rows()) return len(rows) def add_ids(self, ws=None, values=None, definitions=None, row_idx=None, id_flag=False, sheet_name=None, **kwargs): """ using the id map, assign ids to those variables that have not got an id yet :return: :rtype: """ name = values["variable"] scenario = values['scenario'] if values['scenario'] else "default" if name not in self.id_map.keys() or scenario not in self.id_map[name].keys(): # If this is the first process and it has no ID, set it to 0 pid = self.highest_id + 1 # else set it to the highest existing ID plus 1 self.highest_id += 1 self.id_map[name][scenario] = pid values["id"] = pid logger.debug(f'{name} {scenario}: {values}') definitions[name][scenario]["id"] = pid c = ws.cell(row=row_idx + 2, column=self.id_column) c.value = pid logger.info("ID " + str(pid) + " given to process " + values['variable']) def ref_date_handling(self, values: Dict = None, definitions=None, sheet_name=None, id_flag=None, **kwargs): if 'ref date' in values and values['ref date']: if isinstance(values['ref date'], datetime.datetime): # values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode)) if values['ref date'].day != 1: logger.warning(f'ref date truncated to first of month for variable {values['variable']}') values['ref date'] = values['ref date'].replace(day=1) else: raise Exception( f"{values["ref date"]} for variable {values["variable"]} is not a date - " f"check spreadsheet value is a valid day of a month") logger.debug(f'values for {values['variable']}: {values}') name = values['variable'] scenario = values['scenario'] if values['scenario'] else "default" # store id's in a map to identify largest existing id if id_flag: if 'id' in values.keys() and (values["id"] or values["id"] == 0): pid = values['id'] if name not in self.id_map.keys() or scenario not in self.id_map[name].keys(): # raises exception if the ID already exists if (any(pid in d.values() for d in self.id_map.values())): raise Exception("Duplicate ID variable " + name) else: self.id_map[name][scenario] = pid if pid > self.highest_id: self.highest_id = pid if scenario in definitions[name].keys(): logger.error( f"Duplicate entry for parameter " f"with name <{values["variable"]}> and <{scenario}> scenario in sheet {sheet_name}") raise ValueError( f"Duplicate entry for parameter " f"with name <{values["variable"]}> and <{scenario}> scenario in sheet {sheet_name}") else: definitions[name][scenario] = values def table_visitor(self, wb: Workbook = None, sheet_names: List[str] = None, visitor_function: Callable = None, definitions=None, id_flag=False): """ stub for id management :param definitions: :param wb: :type wb: :param sheet_names: :type sheet_names: :param visitor_function: :type visitor_function: :return: :rtype: """ if not sheet_names: sheet_names = wb.sheetnames for _sheet_name in sheet_names: if _sheet_name == 'metadata': continue sheet = wb[_sheet_name] rows = list(sheet.iter_rows()) header = [cell.value for cell in rows[0]] if header[0] != 'variable': continue if id_flag: # get the id column number self.id_column = header.index('id') + 1 for i, row in enumerate(rows[1:]): values = {} for key, cell in zip(header, row): values[key] = cell.value if not values['variable']: logger.debug(f'ignoring row {i}: {row[0].value}') continue visitor_function(ws=sheet, values=values, definitions=definitions, row_idx=i, sheet_name=_sheet_name, id_flag=id_flag, row=row, header=header) return definitions def load_definitions(self, sheet_name, filename: str = None, id_flag=False): """ @todo - document that this not only loads definitions, but also writes the data file, if 'id' flag is True :param sheet_name: :param filename: :param id_flag: :return: """ import openpyxl wb = openpyxl.load_workbook(filename, data_only=True) definitions = defaultdict(lambda: defaultdict(dict)) _sheet_names = [sheet_name] if sheet_name else wb.sheetnames version = 1 try: sheet = wb['metadata'] rows = list(sheet.iter_rows()) for row in rows: if row[0].value == 'version': version = row[1].value self.version = version except: logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2') table_visitor_partial = partial(self.table_visitor, wb=wb, sheet_names=_sheet_names, definitions=definitions, id_flag=id_flag) table_visitor_partial(visitor_function=self.ref_date_handling) if id_flag: table_visitor_partial(visitor_function=self.add_ids) wb.save(filename) res = [] for var_set in definitions.values(): for scenario_var in var_set.values(): res.append(scenario_var) return res # return [definitions_ .values()] # return definitions class XLWingsTableHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): import xlwings as xw definitions = [] wb = xw.Book(fullname=filename) _sheet_names = [sheet_name] if sheet_name else wb.sheets for _sheet_name in _sheet_names: sheet = wb.sheets[_sheet_name] range = sheet.range('A1').expand() rows = range.rows header = [cell.value for cell in rows[0]] # check if this sheet contains parameters or if it documentation if header[0] != 'variable': continue total_rows = OpenpyxlTableHandler.get_sheet_range_bounds(filename, _sheet_name) range = sheet.range((1, 1), (total_rows, len(header))) rows = range.rows for row in rows[1:]: values = {} for key, cell in zip(header, row): values[key] = cell.value definitions.append(values) return definitions class TableParameterLoader(object): definition_version: int """Utility to populate ParameterRepository from spreadsheets. The structure of the spreadsheets is: | variable | ... | |----------|-----| | ... | ... | If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored. """ def __init__(self, filename, table_handler='openpyxl', version=2, **kwargs): self.filename = filename self.definition_version = version # default - will be overwritten by handler logger.info(f'Using {table_handler} excel handler') table_handler_instance = None if table_handler == 'csv': table_handler_instance = CSVHandler(version) if table_handler == 'pandas': table_handler_instance = PandasCSVHandler(version) if table_handler == 'openpyxl': table_handler_instance = OpenpyxlTableHandler() if table_handler == 'xlsx2csv': table_handler_instance = Xlsx2CsvHandler() if table_handler == 'xlwings': table_handler_instance = XLWingsTableHandler() self.table_handler: TableHandler = table_handler_instance def load_parameter_definitions(self, sheet_name: str = None, id_flag=False): """ Load variable text from rows in excel file. If no spreadsheet arg is given, all spreadsheets are loaded. The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored. Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the values associated with each header will be None. For example, given the speadsheet: | variable | A | B | |----------|---|---| | Title | | | | Entry | 1 | 2 | The following list of definitions would be returned: [ { variable: 'Title', A: None, B: None } , { variable: 'Entry', A: 1 , B: 2 } ] :param sheet_name: :return: list of dicts with {header col name : cell value} pairs """ definitions = self.table_handler.load_definitions(sheet_name, filename=self.filename, id_flag=id_flag) self.definition_version = self.table_handler.version return definitions def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None, id_flag=False): """ Create a Repo from an excel file. :param repository: the repository to load into :param sheet_name: :return: """ repository.add_all(self.load_parameters(sheet_name, id_flag=id_flag)) def load_parameters(self, sheet_name, id_flag=False): parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name, id_flag=id_flag) params = [] param_name_map = param_name_maps[int(self.definition_version)] for _def in parameter_definitions: # substitute names from the headers with the kwargs names in the Parameter and Distributions classes # e.g. 'variable' -> 'name', 'module' -> 'module_name', etc parameter_kwargs_def = {} for k, v in _def.items(): if k in param_name_map: if param_name_map[k]: parameter_kwargs_def[param_name_map[k]] = v else: parameter_kwargs_def[k] = v name_ = parameter_kwargs_def['name'] del parameter_kwargs_def['name'] p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def) params.append(p) return params
__version__ = '1.0.0' import csv import datetime import importlib from abc import abstractmethod from collections import defaultdict from typing import Dict, List, Set import numpy as np import pandas as pd from dateutil import relativedelta as rdelta import logging from functools import partial import calendar from openpyxl import Workbook from scipy.interpolate import interp1d import json from typing import Callable __author__ = 'schien' # import pkg_resources # part of setuptools # version = pkg_resources.require("excel-modelling-helper")[0].version param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name', 'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b', 'param 3': 'param_c', 'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '', 'source': ''} param_name_map_v2 = {'CAGR': 'cagr', 'comment': '', 'label': '', 'mean growth': 'growth_factor', 'param': '', 'ref date': 'ref_date', 'ref value': '', 'scenario': 'source_scenarios_string', 'source': '', 'tags': '', 'type': '', 'unit': '', 'variability growth': 'ef_growth_factor', 'initial_value_proportional_variation': '', 'variable': 'name'} param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2} # logger.basicConfig(level=logger.DEBUG) logger = logging.getLogger(__name__) class DistributionFunctionGenerator(object): module: str distribution: str param_a: str param_b: str param_c: str def __init__(self, module_name=None, distribution_name=None, param_a: float = None, param_b: float = None, param_c: float = None, size=None, **kwargs): """ Instantiate a new object. :param module_name: :param distribution_name: :param param_a: :param param_b: :param param_c: :param size: :param kwargs: can contain key "sample_mean_value" with bool value """ self.kwargs = kwargs self.size = size self.module_name = module_name self.distribution_name = distribution_name self.sample_mean_value = kwargs.get('sample_mean_value', False) # prepare function arguments if distribution_name == 'choice': if type(param_a) == str: tokens = param_a.split(',') params = [float(token.strip()) for token in tokens] self.random_function_params = [np.array(params, dtype=np.float)] else: self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)] logger.debug(f'setting function params for choice distribution {self.random_function_params}') else: self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]] def get_mean(self, distribution_function): """Get the mean value for a distribution. If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted. Else, the distribution is instantiated and then the mean is being calculated. :param distribution_function: :return: the mean as a scalar """ name = self.distribution_name params = self.random_function_params if name == 'normal': return params[0] if name == 'uniform': return (params[0] + params[1]) / 2. if name == 'choice': return params[0].mean() if name == 'triangular': return (params[0] + params[1] + params[2]) / 3. return distribution_function().mean() def generate_values(self, *args, **kwargs): """ Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg. If `self.sample_mean_value == True` the sample will contain "size" times the mean value. :param args: :param kwargs: :return: sample as vector of given size """ sample_size = kwargs.get('size', self.size) f = self.instantiate_distribution_function(self.module_name, self.distribution_name) distribution_function = partial(f, *self.random_function_params, size=sample_size) if self.sample_mean_value: sample = np.full(sample_size, self.get_mean(distribution_function)) else: sample = distribution_function() return sample @staticmethod def instantiate_distribution_function(module_name, distribution_name): module = importlib.import_module(module_name) func = getattr(module, distribution_name) return func class Parameter(object): """ A single parameter """ version: int name: str unit: str comment: str source: str scenario: str processes: Dict[str, List] "optional comma-separated list of tags" tags: str def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None, comment: str = None, source: str = None, version=None, **kwargs): self.version = version # The source definition of scenarios. A comma-separated list self.source = source self.comment = comment self.unit = unit self.source_scenarios_string = source_scenarios_string self.tags = tags self.name = name self.scenario = None self.cache = None # track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter self.processes = defaultdict(list) self.kwargs = kwargs def __call__(self, settings=None, *args, **kwargs): """ Samples from a parameter. Values are cached and returns the same value every time called. @todo confusing interface that accepts 'settings' and kwargs at the same time. worse- 'use_time_series' must be present in the settings dict :param args: :param kwargs: :return: """ if self.cache is None: kwargs['name'] = self.name kwargs['unit'] = self.unit kwargs['tags'] = self.tags kwargs['scenario'] = self.scenario if not settings: settings = {} common_args = {'size': settings.get('sample_size', 1), 'sample_mean_value': settings.get('sample_mean_value', False), 'with_pint_units': settings.get('with_pint_units', False) } common_args.update(**self.kwargs) if settings.get('use_time_series', False): if self.version == 2: generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times']) else: generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args, times=settings['times']) else: generator = DistributionFunctionGenerator(**common_args) self.cache = generator.generate_values(*args, **kwargs) return self.cache def add_usage(self, process_name, variable_name): # add the name of a variable of a process model that is backed by this parameter self.processes[process_name].append(variable_name) class GrowthTimeSeriesGenerator(DistributionFunctionGenerator): ref_date: str # of the mean values # the type of growth ['exp'] # growth_function_type: str # of the error function variance: str # error function growth rate ef_growth_factor: str def __init__(self, times=None, size=None, index_names=None, ref_date=None, with_pint_units=False, *args, **kwargs): super().__init__(*args, **kwargs) self.ref_date = ref_date if ref_date else None self.with_pint_units = with_pint_units if self.with_pint_units: import pint self.times = times self.size = size iterables = [times, range(0, size)] self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names) assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency' def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ assert 'ref value' in self.kwargs # 1. Generate $\mu$ start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() ref_date = self.ref_date if not ref_date: raise Exception(f"Ref date not set for variable {kwargs['name']}") mu = self.generate_mu(end_date, ref_date, start_date) # 3. Generate $\sigma$ # Prepare array with growth values $\sigma$ if self.sample_mean_value: sigma = np.zeros((len(self.times), self.size)) else: if self.kwargs['type'] == 'interp': def get_date(record): return datetime.datetime.strptime(record[0], "%Y-%m-%d") ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date) intial_value = ref_value_[0][1] else: intial_value = float(self.kwargs['ref value']) variability_ = intial_value * self.kwargs['initial_value_proportional_variation'] logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}') sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size)) # logger.debug(ref_date.strftime("%b %d %Y")) # 4. Prepare growth array for $\alpha_{sigma}$ alpha_sigma = growth_coefficients(start_date, end_date, ref_date, self.kwargs['ef_growth_factor'], 1) # 5. Prepare DataFrame iterables = [self.times, range(self.size)] index_names = ['time', 'samples'] _multi_index = pd.MultiIndex.from_product(iterables, names=index_names) # logger.debug(start_date) # logger.debug(end_date) from dateutil import relativedelta r = relativedelta.relativedelta(end_date, start_date) months = r.years * 12 + r.months + 1 name = kwargs['name'] # Apply growth to $\sigma$ and add $\sigma$ to $\mu$ # logger.debug(sigma.size) # logger.debug(alpha_sigma.shape) # logger.debug(months) if self.with_pint_units: unit_ = kwargs["unit"] if not unit_: unit_ = 'dimensionless' dtype = f'pint[{unit_}]' else: dtype = 'float64' series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index, dtype=dtype) # test if df has sub-zero values df_sigma__dropna = series.where(series < 0).dropna() if self.with_pint_units: _values = df_sigma__dropna.pint.m else: _values = df_sigma__dropna if not _values.empty: logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}") return series def generate_mu(self, end_date, ref_date, start_date): if self.kwargs['type'] == 'exp': mu_bar = np.full(len(self.times), float(self.kwargs['ref value'])) # 2. Apply Growth to Mean Values $\alpha_{mu}$ alpha_mu = growth_coefficients(start_date, end_date, ref_date, self.kwargs['growth_factor'], 1) mu = mu_bar * alpha_mu.ravel() mu = mu.reshape(len(self.times), 1) return mu if self.kwargs['type'] == 'interp': def toTimestamp(d): return calendar.timegm(d.timetuple()) def interpolate(growth_config: Dict[str, float], date_range, kind='linear'): arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in growth_config.keys()]) arr2 = np.array([val for val in growth_config.values()]) f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate') return f([toTimestamp(date_val) for date_val in date_range]) ref_value_ = json.loads(self.kwargs['ref value'].strip()) return interpolate(ref_value_, self.times, self.kwargs['param']) class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator): cagr: str ref_date: str def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, with_pint_units=False, *args, **kwargs): super().__init__(*args, **kwargs) self.cagr = cagr if cagr else 0 self.ref_date = ref_date if ref_date else None self.with_pint_units = with_pint_units if self.with_pint_units: import pint self.times = times self.size = size iterables = [times, range(0, size)] self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names) assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency' def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,)) alpha = self.cagr # @todo - fill to cover the entire time: define rules for filling first ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime() # assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.' # assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.' start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size) x = a.ravel() values = np.multiply(values, x) # df = pd.DataFrame(values) # df.columns = [kwargs['name']] # df.set_index(self._multi_index, inplace=True) # # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series # data_series = df.iloc[:, 0] # data_series._metadata = kwargs # data_series.index.rename(['time', 'samples'], inplace=True) # if self.with_pint_units: if not kwargs["unit"]: dtype = 'pint[dimensionless]' else: dtype = f'pint[{kwargs["unit"]}]' else: dtype = 'float64' series = pd.Series(values, index=self._multi_index, dtype=dtype) return series def growth_coefficients(start_date, end_date, ref_date, alpha, samples): """ Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0). a growth rate alpha t0 start date t' end date y' output y0 start value """ start_offset = 0 if ref_date < start_date: offset_delta = rdelta.relativedelta(start_date, ref_date) start_offset = offset_delta.months + 12 * offset_delta.years start_date = ref_date end_offset = 0 if ref_date > end_date: offset_delta = rdelta.relativedelta(ref_date, end_date) end_offset = offset_delta.months + 12 * offset_delta.years end_date = ref_date delta_ar = rdelta.relativedelta(ref_date, start_date) ar = delta_ar.months + 12 * delta_ar.years delta_br = rdelta.relativedelta(end_date, ref_date) br = delta_br.months + 12 * delta_br.years # we place the ref point on the lower interval (delta_ar + 1) but let it start from 0 # in turn we let the upper interval start from 1 g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float) h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float) g = np.flipud(g) # now join the two arrays a = np.vstack((g, h)) if start_offset > 0: a = a[start_offset:] if end_offset > 0: a = a[:-end_offset] return a class ParameterScenarioSet(object): """ The set of all version of a parameter for all the scenarios. """ default_scenario = 'default' "the name of the parameters in this set" parameter_name: str scenarios: Dict[str, Parameter] def __init__(self): self.scenarios = {} def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario): """ Add a scenario for this parameter. :param scenario_name: :param parameter: :return: """ self.scenarios[scenario_name] = parameter def __getitem__(self, item): return self.scenarios.__getitem__(item) def __setitem__(self, key, value): return self.scenarios.__setitem__(key, value) class ParameterRepository(object): """ Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses). The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will create new samples. Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet. """ parameter_sets: Dict[str, ParameterScenarioSet] tags: Dict[str, Dict[str, Set[Parameter]]] def __init__(self): self.parameter_sets = defaultdict(ParameterScenarioSet) self.tags = defaultdict(lambda: defaultdict(set)) def add_all(self, parameters: List[Parameter]): for p in parameters: self.add_parameter(p) def clear_cache(self): for p_sets in self.parameter_sets.values(): for param_name, param in p_sets.scenarios.items(): param.cache = None def add_parameter(self, parameter: Parameter): """ A parameter can have several scenarios. They are specified as a comma-separated list in a string. :param parameter: :return: """ # try reading the scenarios from the function arg or from the parameter attribute scenario_string = parameter.source_scenarios_string if scenario_string: _scenarios = [i.strip() for i in scenario_string.split(',')] self.fill_missing_attributes_from_default_parameter(parameter) else: _scenarios = [ParameterScenarioSet.default_scenario] for scenario in _scenarios: parameter.scenario = scenario self.parameter_sets[parameter.name][scenario] = parameter # record all tags for this parameter if parameter.tags: _tags = [i.strip() for i in parameter.tags.split(',')] for tag in _tags: self.tags[tag][parameter.name].add(parameter) def fill_missing_attributes_from_default_parameter(self, param): """ Empty fields in Parameter definitions in scenarios are populated with default values. E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar. +----------+----------+-----+--------+------------+ | name | scenario | val | tags | source | +----------+----------+-----+--------+------------+ | Power_TV | | 60 | UD, TV | EnergyStar | | Power_TV | 8K | 85 | new_tag| | +----------+----------+-----+--------+------------+ **Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten with the default value. :param param: :return: """ if not self.exists(param.name) or ParameterScenarioSet.default_scenario not in self.parameter_sets[ param.name].scenarios.keys(): logger.warning( f'No default value for param {param.name} found.') return default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario] for att_name, att_value in default.__dict__.items(): if att_name in ['unit', 'label', 'comment', 'source', 'tags']: if att_name == 'tags' and default.tags != param.tags: logger.warning( f'For param {param.name} for scenarios {param.source_scenarios_string}, ' f'tags is different from default parameter tags. Overwriting with default values.') setattr(param, att_name, att_value) if not getattr(param, att_name): logger.debug( f'For param {param.name} for scenarios {param.source_scenarios_string}, ' f'populating attribute {att_name} with value {att_value} from default parameter.') setattr(param, att_name, att_value) def __getitem__(self, item) -> Parameter: """ Return the default scenario parameter for a given variable name :param item: the name of the variable :return: """ return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario) def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter: if self.exists(param_name, scenario=scenario_name): return self.parameter_sets[param_name][scenario_name] try: return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario] except KeyError: raise KeyError(f"{param_name} not found") def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]: """ Get all registered dicts that are registered for a tag :param tag: str - single tag :return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for all parameter names with a given tag """ return self.tags[tag] def exists(self, param, scenario=None) -> bool: # if scenario is not None: # return present = param in self.parameter_sets.keys() if not present: return False scenario = scenario if scenario else ParameterScenarioSet.default_scenario return scenario in self.parameter_sets[param].scenarios.keys() def list_scenarios(self, param): if param in self.parameter_sets.keys(): return self.parameter_sets[param].scenarios.keys() class TableHandler(object): version: int def __init__(self, version=2): self.version = version @abstractmethod def load_definitions(self, sheet_name, filename=None, id_flag=False): raise NotImplementedError() class Xlsx2CsvHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): from xlsx2csv import Xlsx2csv data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0) definitions = [] _sheet_names = [sheet_name] if sheet_name else [data.keys()] for _sheet_name in _sheet_names: sheet = data[_sheet_name] header = sheet.header if header[0] != 'variable': continue for row in sheet.rows: values = {} for key, cell in zip(header, row): values[key] = cell definitions.append(values) return definitions class DictReaderStrip(csv.DictReader): @property def fieldnames(self): if self._fieldnames is None: # Initialize self._fieldnames # Note: DictReader is an old-style class, so can't use super() csv.DictReader.fieldnames.fget(self) if self._fieldnames is not None: self._fieldnames = [name.strip() for name in self._fieldnames] return self._fieldnames class CSVHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): reader = DictReaderStrip(open(filename, encoding='utf-8-sig'), delimiter=',') definitions = [] _definition_tracking = defaultdict(dict) for i, row in enumerate(reader): values = {k: v.strip() for k, v in row.items()} if not values['variable']: logger.debug(f'ignoring row {i}: {row[0].value}') continue number_columns = [] if self.version == 2: number_columns = ['ref value', 'initial_value_proportional_variation', 'mean growth', 'variability growth'] if self.version == 1: number_columns = ['param 1', 'param 2', 'param 3', 'CAGR'] for key in number_columns: try: if key in values: # guard against empty strings new_val = float(values.get(key, 0) or 0) values[key] = new_val except: if 'type' in values and values['type'] == 'interp': # this is a json array ... @todo can we have more validation on these strings? continue else: raise Exception( f'Could not convert value <{values[key]}> for key {key} to number in row {i} for variable {values["variable"]}') if 'ref date' in values and values['ref date']: if isinstance(values['ref date'], str): values['ref date'] = datetime.datetime.strptime(values['ref date'], '%d/%m/%Y') if values['ref date'].day != 1: logger.warning( f'ref date truncated to first of month for variable {values["variable"]}') values['ref date'] = values['ref date'].replace(day=1) else: raise Exception( f"{values['ref date']} for variable {values['variable']} is not a date - " f"check spreadsheet value is a valid day of a month") logger.debug(f'values for {values["variable"]}: {values}') definitions.append(values) scenario = values['scenario'] if values['scenario'] else "n/a" if scenario in _definition_tracking[values['variable']]: logger.error( f"Duplicate entry for parameter " f"with name <{values['variable']}> and <{scenario}> scenario in file") raise ValueError( f"Duplicate entry for parameter " f"with name <{values['variable']}> and <{scenario}> scenario in file") else: _definition_tracking[values['variable']][scenario] = 1 return definitions class PandasCSVHandler(TableHandler): def strip(self, text): try: return text.strip() except AttributeError: return text def load_definitions(self, sheet_name, filename=None, id_flag=False): self.version = 2 import pandas as pd df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'], dtype={'initial_value_proportional_variation': 'float64'}, dayfirst=True, # date_parser=l0ambda x: pd.datetime.strptime(x, '%d-%m-%Y') ) df = df.dropna(subset=['variable', 'ref value']) df.fillna("", inplace=True) return df.to_dict(orient='records') class OpenpyxlTableHandler(TableHandler): version: int def __init__(self, version=2): super().__init__(version=version) self.highest_id = -1 self.id_map = defaultdict(lambda: defaultdict(dict)) self.id_column = None @staticmethod def get_sheet_range_bounds(filename, sheet_name): import openpyxl wb = openpyxl.load_workbook(filename) sheet = wb[sheet_name] rows = list(sheet.iter_rows()) return len(rows) def add_ids(self, ws=None, values=None, definitions=None, row_idx=None, id_flag=False, sheet_name=None, **kwargs): """ using the id map, assign ids to those variables that have not got an id yet :return: :rtype: """ name = values["variable"] scenario = values['scenario'] if values['scenario'] else "default" if name not in self.id_map.keys() or scenario not in self.id_map[name].keys(): # If this is the first process and it has no ID, set it to 0 pid = self.highest_id + 1 # else set it to the highest existing ID plus 1 self.highest_id += 1 self.id_map[name][scenario] = pid values["id"] = pid logger.debug(f'{name} {scenario}: {values}') definitions[name][scenario]["id"] = pid c = ws.cell(row=row_idx + 2, column=self.id_column) c.value = pid logger.info("ID " + str(pid) + " given to process " + values['variable']) def ref_date_handling(self, values: Dict = None, definitions=None, sheet_name=None, id_flag=None, **kwargs): if 'ref date' in values and values['ref date']: if isinstance(values['ref date'], datetime.datetime): # values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode)) if values['ref date'].day != 1: logger.warning(f'ref date truncated to first of month for variable {values["variable"]}') values['ref date'] = values['ref date'].replace(day=1) else: raise Exception( f"{values['ref date']} for variable {values['variable']} is not a date - " f"check spreadsheet value is a valid day of a month") logger.debug(f'values for {values["variable"]}: {values}') name = values['variable'] scenario = values['scenario'] if values['scenario'] else "default" # store id's in a map to identify largest existing id if id_flag: if 'id' in values.keys() and (values["id"] or values["id"] == 0): pid = values['id'] if name not in self.id_map.keys() or scenario not in self.id_map[name].keys(): # raises exception if the ID already exists if (any(pid in d.values() for d in self.id_map.values())): raise Exception("Duplicate ID variable " + name) else: self.id_map[name][scenario] = pid if pid > self.highest_id: self.highest_id = pid if scenario in definitions[name].keys(): logger.error( f"Duplicate entry for parameter " f"with name <{values['variable']}> and <{scenario}> scenario in sheet {sheet_name}") raise ValueError( f"Duplicate entry for parameter " f"with name <{values['variable']}> and <{scenario}> scenario in sheet {sheet_name}") else: definitions[name][scenario] = values def table_visitor(self, wb: Workbook = None, sheet_names: List[str] = None, visitor_function: Callable = None, definitions=None, id_flag=False): """ stub for id management :param definitions: :param wb: :type wb: :param sheet_names: :type sheet_names: :param visitor_function: :type visitor_function: :return: :rtype: """ if not sheet_names: sheet_names = wb.sheetnames for _sheet_name in sheet_names: if _sheet_name == 'metadata': continue sheet = wb[_sheet_name] rows = list(sheet.iter_rows()) header = [cell.value for cell in rows[0]] if header[0] != 'variable': continue if id_flag: # get the id column number self.id_column = header.index('id') + 1 for i, row in enumerate(rows[1:]): values = {} for key, cell in zip(header, row): values[key] = cell.value if not values['variable']: logger.debug(f'ignoring row {i}: {row[0].value}') continue visitor_function(ws=sheet, values=values, definitions=definitions, row_idx=i, sheet_name=_sheet_name, id_flag=id_flag, row=row, header=header) return definitions def load_definitions(self, sheet_name, filename: str = None, id_flag=False): """ @todo - document that this not only loads definitions, but also writes the data file, if 'id' flag is True :param sheet_name: :param filename: :param id_flag: :return: """ import openpyxl wb = openpyxl.load_workbook(filename, data_only=True) definitions = defaultdict(lambda: defaultdict(dict)) _sheet_names = [sheet_name] if sheet_name else wb.sheetnames version = 1 try: sheet = wb['metadata'] rows = list(sheet.iter_rows()) for row in rows: if row[0].value == 'version': version = row[1].value self.version = version except: logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2') table_visitor_partial = partial(self.table_visitor, wb=wb, sheet_names=_sheet_names, definitions=definitions, id_flag=id_flag) table_visitor_partial(visitor_function=self.ref_date_handling) if id_flag: table_visitor_partial(visitor_function=self.add_ids) wb.save(filename) res = [] for var_set in definitions.values(): for scenario_var in var_set.values(): res.append(scenario_var) return res # return [definitions_ .values()] # return definitions class XLWingsTableHandler(TableHandler): def load_definitions(self, sheet_name, filename=None, id_flag=False): import xlwings as xw definitions = [] wb = xw.Book(fullname=filename) _sheet_names = [sheet_name] if sheet_name else wb.sheets for _sheet_name in _sheet_names: sheet = wb.sheets[_sheet_name] range = sheet.range('A1').expand() rows = range.rows header = [cell.value for cell in rows[0]] # check if this sheet contains parameters or if it documentation if header[0] != 'variable': continue total_rows = OpenpyxlTableHandler.get_sheet_range_bounds(filename, _sheet_name) range = sheet.range((1, 1), (total_rows, len(header))) rows = range.rows for row in rows[1:]: values = {} for key, cell in zip(header, row): values[key] = cell.value definitions.append(values) return definitions class TableParameterLoader(object): definition_version: int """Utility to populate ParameterRepository from spreadsheets. The structure of the spreadsheets is: | variable | ... | |----------|-----| | ... | ... | If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored. """ def __init__(self, filename, table_handler='openpyxl', version=2, **kwargs): self.filename = filename self.definition_version = version # default - will be overwritten by handler logger.info(f'Using {table_handler} excel handler') table_handler_instance = None if table_handler == 'csv': table_handler_instance = CSVHandler(version) if table_handler == 'pandas': table_handler_instance = PandasCSVHandler(version) if table_handler == 'openpyxl': table_handler_instance = OpenpyxlTableHandler() if table_handler == 'xlsx2csv': table_handler_instance = Xlsx2CsvHandler() if table_handler == 'xlwings': table_handler_instance = XLWingsTableHandler() self.table_handler: TableHandler = table_handler_instance def load_parameter_definitions(self, sheet_name: str = None, id_flag=False): """ Load variable text from rows in excel file. If no spreadsheet arg is given, all spreadsheets are loaded. The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored. Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the values associated with each header will be None. For example, given the speadsheet: | variable | A | B | |----------|---|---| | Title | | | | Entry | 1 | 2 | The following list of definitions would be returned: [ { variable: 'Title', A: None, B: None } , { variable: 'Entry', A: 1 , B: 2 } ] :param sheet_name: :return: list of dicts with {header col name : cell value} pairs """ definitions = self.table_handler.load_definitions(sheet_name, filename=self.filename, id_flag=id_flag) self.definition_version = self.table_handler.version return definitions def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None, id_flag=False): """ Create a Repo from an excel file. :param repository: the repository to load into :param sheet_name: :return: """ repository.add_all(self.load_parameters(sheet_name, id_flag=id_flag)) def load_parameters(self, sheet_name, id_flag=False): parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name, id_flag=id_flag) params = [] param_name_map = param_name_maps[int(self.definition_version)] for _def in parameter_definitions: # substitute names from the headers with the kwargs names in the Parameter and Distributions classes # e.g. 'variable' -> 'name', 'module' -> 'module_name', etc parameter_kwargs_def = {} for k, v in _def.items(): if k in param_name_map: if param_name_map[k]: parameter_kwargs_def[param_name_map[k]] = v else: parameter_kwargs_def[k] = v name_ = parameter_kwargs_def['name'] del parameter_kwargs_def['name'] p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def) params.append(p) return params
# -*- coding: utf-8 -*- # :Project: pglast -- Wrap PG nodes into a Python AST # :Created: sab 27 feb 2021, 19:47:11 # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © 2021 Lele Gaifax # from datetime import date import json from keyword import iskeyword from pathlib import Path import subprocess from re import match, sub CYEARS = '' CYEARS += str(date.today().year) AST_PY_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # from collections import namedtuple from decimal import Decimal from enum import Enum SlotTypeInfo = namedtuple('SlotTypeInfo', ['c_type', 'py_type', 'adaptor']) class Node: "Base class for all AST nodes." __slots__ = () def __init__(self, data): if not isinstance(data, dict): raise ValueError(f'Bad argument, expected a dictionary, got {{type(data)!r}}') if '@' not in data: raise ValueError('Bad argument, expected a dictionary with a "@" key') if data['@'] != self.__class__.__name__: raise ValueError(f'Bad argument, wrong "@" value, expected {{self.__class__.__name__!r}}, got {{data['@']!r}}') G = globals() for a in self: v = data.get(a) if v is not None: if isinstance(v, dict) and '@' in v: v = G[v['@']](v) elif isinstance(v, (tuple, list)): v = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in v) setattr(self, a, v) def __iter__(self): "Iterate over all attribute names of this node." return iter(self.__slots__) def __repr__(self): "Build a representation of the whole node and its subtree, for debug." attrs = [] for a in self: if a != 'location': v = getattr(self, a) if v is not None: attrs.append(f'{{a}}={{v!r}}') if attrs: attrs = ' ' + ' '.join(attrs) else: attrs = '' return '<' + self.__class__.__name__ + attrs + '>' def __eq__(self, other): ''' Compare two nodes, returning ``True`` if they are considered equivalent. This is mainly an helper method used by tests: for this reason, two nodes are considered equal when all their attributes match, ignoring *positional* ones such as ``location``, ``stmt_len`` and ``stmt_location``. ''' if not isinstance(other, type(self)): return False for a in self: if ((a not in ('location', 'stmt_len', 'stmt_location') and getattr(self, a) != getattr(other, a))): return False return True def __call__(self, depth=None, ellipsis=..., skip_none=False): '''Serialize the node as a structure made of simple Python data-types. :type depth: ``None`` or ``int`` :param depth: if not ``None``, the maximum depth to reach :param ellipsis: the marker value that will be used to replace cut-off branch :param bool skip_none: whether ``None``-valued attributes should be elided :param bool enum_name: whether Enums will be rendered as their name only :return: a :class:`dict` instance This performs a top-down recursive visit to the whole AST tree: each ``Node`` instance becomes a dictionary with a special ``@`` key carrying the node type, lists becomes tuples and ``Enum`` instances become dictionaries with a special ``#`` key carrying the enum name.''' from enum import Enum d = {{'@': self.__class__.__name__}} for a in self: v = getattr(self, a) if isinstance(v, Node): if depth is None or depth > 0: v = v(None if depth is None else depth - 1, ellipsis, skip_none) else: v = ellipsis elif isinstance(v, tuple): if depth is None or depth > 0: if v and isinstance(v[0], Node): v = tuple(i(None if depth is None else depth - 1, ellipsis, skip_none) for i in v) else: v = ellipsis elif isinstance(v, Enum): v = {{'#': v.__class__.__name__, 'name': v.name, 'value': v.value}} if not skip_none or v is not None: d[a] = v return d def __setattr__(self, name, value): '''Validate the given `value` and if acceptable assign it to the `name` attribute. This tries to coerce the given `value` accordingly with the *ctype* of the attribute, raising opportune exception when that is not possible. ''' if value is not None: ctype, ptype, adaptor = self.__slots__[name] if not isinstance(ptype, tuple): ptype = (ptype,) if not isinstance(value, ptype): raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected {{ptype}}, got {{type(value)}}: {{value!r}}') if adaptor is not None: value = adaptor(value) elif ctype == 'Node*': if isinstance(value, dict) and '@' in value: value = globals()[value['@']](value) elif ctype == 'char': if isinstance(value, int): value = chr(value) elif len(value) != 1: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a single char, got {{value!r}}') elif ctype == 'bool': value = bool(value) elif ctype == 'Bitmapset*': if isinstance(value, (list, tuple)): value = set(value) elif ctype == 'List*': G = globals() value = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) elif ctype != 'char*': from pglast import enums if hasattr(enums, ctype): enum = getattr(enums, ctype) if not isinstance(value, enum): if isinstance(value, dict) and '#' in value: if value['#'] != ctype: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None if 'name' in value: value = value['name'] elif 'value' in value: value = value['value'] else: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None try: if isinstance(value, str) and len(value) > 1: value = enum[value] else: value = enum(value) except (KeyError, ValueError): raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None else: if ctype.endswith('*'): cls = globals().get(ctype[:-1]) if cls is None: raise NotImplementedError(f'Unhandled {{ctype!r}} for attribute {{self.__class__.__name__}}.{{name}}') if isinstance(value, dict) and '@' in value: value = cls(value) super().__setattr__(name, value) class Expr(Node): '''Abstract super class of several *expression* classes.''' __slots__ = () class Value(Node): '''Abstract super class, representing PG's `Value`__ union type. __ https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/include/nodes/value.h ''' __slots__ = () def __init__(self, value=None): if ((value is not None and isinstance(value, dict) and '@' in value)): super().__init__(value) else: self.val = value class BitString(Value): '''Implement the ``T_BitString`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', str, None)}} class Float(Value): '''Implement the ``T_Float`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', (str, Decimal), Decimal)}} class Integer(Value): '''Implement the ``T_Integer`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', int, None)}} class Null(Value): '''Implement the ``T_Null`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', type(None), None)}} class String(Value): '''Implement the ``T_String`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', str, None)}} """ AST_PYX_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # #cython: language_level=3 from cpython.ref cimport Py_INCREF from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM from decimal import Decimal from pglast import ast, enums from pglast cimport structs """ STRUCTS_PXD_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # #cython: language_level=3 from libc.stdint cimport int16_t, int32_t, uint32_t, uint64_t cdef extern from "postgres.h": ctypedef unsigned char bool ctypedef struct Node: int type ctypedef union ValueUnion: int ival char *str ctypedef struct Value: int type ValueUnion val ctypedef struct Bitmapset: int nwords unsigned long *words cdef extern from "nodes/bitmapset.h": ctypedef struct Bitmapset: pass int bms_next_member(const Bitmapset *a, int prevbit) cdef extern from "nodes/pg_list.h": ctypedef struct List: int length void* list_nth(List* list, int n) cdef extern from "nodes/value.h": ctypedef struct ValUnion: int ival char *str ctypedef struct Value: NodeTag type ValUnion val int intVal(Value* v) char* strVal(Value* v) """ AST_RST_HEADER = f"""\ .. -*- coding: utf-8 -*- .. :Project: pglast -- DO NOT EDIT: generated automatically .. :Author: Lele Gaifax <lele@metapensiero.it> .. :License: GNU General Public License version 3 or later .. :Copyright: © {CYEARS} Lele Gaifax .. .. _pglast.ast: =================================================================== :mod:`pglast.ast` --- Python classes representing PG parser nodes =================================================================== The module implements a set of *data* classes, one for each ``C`` structure defined in the PostgreSQL headers ``include/nodes/primnodes.h`` and ``include/nodes/parsenodes.h``. .. module:: pglast.parser.ast .. autoclass:: Node :special-members: __repr__, __eq__, __call__, __setattr__ .. autoclass:: Value .. autoclass:: BitString .. autoclass:: Float .. autoclass:: Integer .. autoclass:: Null .. autoclass:: String """ def get_libpg_query_info(): "Return a tuple with (version, baseurl) of the libpg_query library." version = subprocess.check_output(['git', 'describe', '--all', '--long'], cwd='libpg_query') version = version.decode('utf-8').strip().split('/')[-1] remote = subprocess.check_output(['git', 'remote', 'get-url', 'origin'], cwd='libpg_query') remote = remote.decode('utf-8') baseurl = '%s/blob/%s/' % (remote[:-5], version[-7:]) return version, baseurl def extract_toc(header): "Extract the enums and defines with their position in the header." toc = {} content = header.read_text(encoding='utf-8') in_typedef_enum = 0 for lineno, line in enumerate(content.splitlines(), 1): if line.startswith(('struct ', 'typedef struct ')): m = match(r'(typedef )?struct\s+([\w_]+)', line) if m is not None: toc[m.group(2)] = lineno return toc def emit_struct_def(name, fields, output): output.write(f'\n\n ctypedef struct {name}:\n') empty = True for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype in ('Expr', 'Oid'): continue if ctype == 'int16': ctype = 'int16_t' elif ctype in ('bits32', 'int32'): ctype = 'int32_t' elif ctype == 'uint32': ctype = 'uint32_t' elif ctype == 'uint64': ctype = 'uint64_t' if ctype == 'AttrNumber': ctype = 'int' elif ctype in ('AclMode', 'Index', 'SubTransactionId'): ctype = 'unsigned int' elif ctype == 'Cost': ctype = 'float' elif ctype.endswith('*'): ctype = f'const {ctype}' fname = field['name'] if iskeyword(fname): fname = f'{fname}_ "{fname}"' output.write(f' {ctype} {fname}\n') empty = False if empty: output.write(f' pass\n') def emit_generic_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = data.{name} ''') def emit_bool_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = bool(data.{name}) ''') def emit_value_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name}.type == structs.T_Null: v_{name} = ast.Null(None) elif data.{name}.type == structs.T_Integer: v_{name} = ast.Integer(data.{name}.val.ival) elif data.{name}.type == structs.T_Float: v_{name} = ast.Float(Decimal(data.{name}.val.str.decode("utf-8"))) elif data.{name}.type == structs.T_BitString: v_{name} = ast.BitString(data.{name}.val.str.decode("utf-8")) else: v_{name} = ast.String(data.{name}.val.str.decode("utf-8")) ''') def emit_str_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name} is not NULL: v_{name} = data.{name}.decode("utf-8") else: v_{name} = None ''') def emit_char_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = chr(data.{name}) ''') def emit_list_attr(name, ctype, output): output.write(f'''\ cdef tuple v_{name} cdef int {name}_i if data.{name} is not NULL: v_{name} = PyTuple_New(data.{name}.length) for i in range(data.{name}.length): item = create(structs.list_nth(data.{name}, i), offset_to_index) Py_INCREF(item) PyTuple_SET_ITEM(v_{name}, i, item) else: v_{name} = None ''') def emit_node_attr(name, ctype, output): output.write(f'''\ cdef v_{name} = create(&data.{name}, offset_to_index) ''') def emit_create_stmt_attr(name, ctype, output): output.write(f''' cdef object v_{name} = create_CreateStmt(<structs.CreateStmt*> data, offset_to_index) ''') def emit_nodeptr_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name} is not NULL: v_{name} = create(data.{name}, offset_to_index) else: v_{name} = None ''') def emit_no_attr(name, ctype, output): # output.write(f'# cdef object v_{name} = data.{name}\n') pass def emit_int_enum_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = getattr(enums, {ctype!r})(data.{name}) ''') def emit_str_enum_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = getattr(enums, {ctype!r})(chr(data.{name})) ''') def emit_bitmapset_attr(name, ctype, output): output.write(f'''\ cdef set v_{name} cdef int {name}_member if data.{name} is not NULL: v_{name} = set() {name}_member = structs.bms_next_member(data.{name}, -1) while {name}_member >= 0: v_{name}.add({name}_member) {name}_member = structs.bms_next_member(data.{name}, {name}_member) else: v_{name} = None ''') def emit_location_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = offset_to_index(data.{name}) ''') def emit_stmt_len_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = offset_to_index(data.stmt_location + data.{name}) - offset_to_index(data.stmt_location) ''') def emitter_for(fname, ctype, enums): from pglast import enums as eimpl if fname == 'location' or fname == 'stmt_location': emitter = emit_location_attr elif fname == 'stmt_len': emitter = emit_stmt_len_attr elif ctype == 'List*': emitter = emit_list_attr elif ctype == 'CreateStmt': emitter = emit_create_stmt_attr elif ctype == 'Expr': emitter = emit_no_attr superclass = 'Expr' elif ctype == 'NodeTag': emitter = emit_no_attr elif ctype == 'Value': emitter = emit_value_attr elif ctype == 'char*': emitter = emit_str_attr elif ctype == 'char': emitter = emit_char_attr elif ctype == 'bool': emitter = emit_bool_attr elif ctype == 'Bitmapset*': emitter = emit_bitmapset_attr elif ctype.endswith('*'): emitter = emit_nodeptr_attr elif ctype in enums: if issubclass(getattr(eimpl, ctype), eimpl.IntEnum): emitter = emit_int_enum_attr else: emitter = emit_str_enum_attr else: emitter = emit_generic_attr return emitter def emit_node_def(name, fields, enums, url, output, doc): attrs = [] superclass = 'Node' for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype == 'Oid': continue fname = field['name'] if iskeyword(fname): fname = f'{fname}_' emitter = emitter_for(fname, ctype, enums) if ctype == 'Expr': superclass = 'Expr' comment = field['comment'] if comment: comment = comment.strip() if comment.startswith('/*'): comment = comment[2:-2].strip() comment = sub(r'\t+', ' ', comment) comment = sub(r'\*-+\s*', '', comment) comment = sub(r'-+\n', '', comment) comment = sub(r'\n +', '\n ', comment) comment = sub(r'\*\)', '\\*)', comment) comment = comment.strip() comment = comment[0].upper() + comment[1:] if comment.lower() == 'see above': comment = '' attrs.append((fname, ctype, comment, emitter)) real_attrs = [] if attrs: for attr, ctype, comment, emitter in attrs: if emitter is emit_no_attr: continue real_attrs.append((attr, ctype)) output.write(f'''\ class {name}({superclass}): __slots__ = {{{', '.join(repr(a)+': '+repr(t) for a, t in real_attrs)}}} ''') if real_attrs: output.write(f'''\ def __init__(self, {', '.join(f'{attr}=None' for attr, __ in real_attrs)}): # pragma: no cover ''') if len(real_attrs) > 1: output.write(f'''\ if (({real_attrs[0][0]} is not None and {' is '.join(attr for attr, __ in real_attrs[1:])} is None and isinstance({real_attrs[0][0]}, dict) and '@' in {real_attrs[0][0]})): super().__init__({real_attrs[0][0]}) else: ''') for a, v in real_attrs: output.write(f' self.{a} = {a}\n') else: for a, v in real_attrs: output.write(f' self.{a} = {a}\n') else: output.write('''\ def __init__(self): # pragma: no cover pass ''') doc.write(f''' .. class:: {name}({', '.join(f'{attr}=None' for attr, __ in real_attrs)}) Wrapper for the `homonymous <{url}>`__ parser node. ''') for attr, ctype, comment, emitter in attrs: if emitter is emit_no_attr: continue if ctype == 'List*': type = 'tuple' elif ctype in ('char', 'char*'): type = 'str' elif ctype == 'Node*': type = 'Node' elif ctype in enums: type = f'{ctype}' else: type = ctype doc.write(f' .. attribute:: {attr}\n') doc.write(f' :type: {type}\n\n') if comment: doc.write(f' {comment}\n\n') def emit_node_create_function(nodes, enums, output): from pglast import enums as eimpl nnames = set(n[0] for n in nodes) for name, fields in nodes: attrs = [] real_attrs = [] for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype in ('Expr', 'Oid'): continue fname = field['name'] if iskeyword(fname): fname = f'{fname}_' emitter = emitter_for(fname, ctype, enums) attrs.append((fname, ctype, emitter)) if emitter is not emit_no_attr: real_attrs.append((fname, ctype)) output.write(f'''\ cdef create_{name}(structs.{name}* data, offset_to_index): ''') for attr, ctype, emitter in attrs: emitter(attr, ctype, output) output.write(f'''\ return ast.{name}({', '.join(f'v_{attr}' for attr, __ in real_attrs)}) ''') output.write('''\ cdef create(void* data, offset_to_index): if data is NULL: return None cdef tuple t cdef int i cdef int tag = structs.nodeTag(data) ''') tags = sorted(eimpl.NodeTag) first = True for tag in eimpl.NodeTag: name = tag.name[2:] if name in nnames: output.write(' ') output.write('if' if first else 'elif') output.write(f' tag == structs.{tag.name}:\n') output.write(f' return create_{name}(<structs.{name}*> data, offset_to_index)\n') first = False elif name == 'Null': output.write('''\ elif tag == structs.T_Null: return ast.Null(None) ''') elif name == 'Integer': output.write('''\ elif tag == structs.T_Integer: return ast.Integer(structs.intVal(<structs.Value *> data)) ''') elif name == 'Float': output.write('''\ elif tag == structs.T_Float: return ast.Float(Decimal(structs.strVal(<structs.Value *> data).decode("utf-8"))) ''') elif name == 'BitString': output.write('''\ elif tag == structs.T_BitString: return ast.BitString(structs.strVal(<structs.Value *> data).decode("utf-8")) ''') elif name == 'String': output.write('''\ elif tag == structs.T_String: return ast.String(structs.strVal(<structs.Value *> data).decode("utf-8")) ''') elif name == 'List': output.write('''\ elif tag == structs.T_List: t = PyTuple_New((<structs.List *> data).length) for i in range((<structs.List *> data).length): item = create(structs.list_nth(<structs.List *> data, i), offset_to_index) Py_INCREF(item) PyTuple_SET_ITEM(t, i, item) return t ''') output.write('''\ raise ValueError("Unhandled tag: %s" % tag) ''') def workhorse(args): libpg_query_version, libpg_query_baseurl = get_libpg_query_info() pgq_dir = Path('libpg_query') pg_inc_dir = pgq_dir / 'src' / 'postgres' / 'include' with (pgq_dir / 'srcdata' / 'struct_defs.json').open(encoding='utf-8') as f: structs = json.load(f) ctypes = set() for header in ('nodes/parsenodes', 'nodes/primnodes'): for name in structs[header]: fields = structs[header][name]['fields'] for field in fields: if 'c_type' in field: ctypes.add(field['c_type']) with (pgq_dir / 'srcdata' / 'all_known_enums.json').open(encoding='utf-8') as f: enums = sorted(json.load(f)) with (pgq_dir / 'srcdata' / 'enum_defs.json').open(encoding='utf-8') as f: node_tags = [e['name'] for e in json.load(f)['nodes/nodes']['NodeTag']['values'] if 'name' in e] linenos = {} structs_pxd = args.output_dir / 'structs.pxd' with structs_pxd.open('w', encoding='utf-8') as output: output.write(STRUCTS_PXD_HEADER % libpg_query_version) output.write('\n\ncdef extern from *:') for name in enums: output.write(f'\n ctypedef enum {name}:\n') if name == 'NodeTag': for tag in node_tags: output.write(f' {tag}\n') else: output.write(' pass\n') output.write('\n int nodeTag(void* data)\n') nodes = [] for header in ('nodes/parsenodes', 'nodes/primnodes'): output.write(f'\n\ncdef extern from "{header}.h":\n') toc = extract_toc(pg_inc_dir / (header + '.h')) for name in toc: linenos[name] = (header, toc[name]) defs = structs[header] for name in defs: fields = defs[name]['fields'] if name not in ('Const', 'NextValueExpr', 'Value'): nodes.append((name, fields)) emit_struct_def(name, fields, output) ast_py = args.output_dir / 'ast.py' with ast_py.open('w', encoding='utf-8') as output, \ args.rstdoc.open('w', encoding='utf-8') as doc: output.write(AST_PY_HEADER % libpg_query_version) doc.write(AST_RST_HEADER) for name, fields in sorted(nodes): header, lineno = linenos[name] url = f'{libpg_query_baseurl}src/postgres/include/{header}.h#L{lineno}' emit_node_def(name, fields, enums, url, output, doc) output.write(''' def _fixup_attribute_types_in_slots(): G = globals() def traverse_sub_classes(cls): for subc in cls.__subclasses__(): yield subc yield from traverse_sub_classes(subc) for cls in traverse_sub_classes(Node): slots = cls.__slots__ if not (slots and isinstance(slots, dict) and isinstance(next(iter(slots.values())), str)): continue for attr in slots: adaptor = None ctype = slots[attr] if ctype == 'List*': ptype = (list, tuple) adaptor = lambda value: tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) elif ctype == 'bool': ptype = (bool, int) adaptor = bool elif ctype == 'char': ptype = (str, int) def adaptor(value): if isinstance(value, int): value = chr(value) elif len(value) != 1: raise ValueError(f'Bad value for attribute {{cls.__name__}}.{{attr}}, expected a single char, got {{value!r}}') return value elif ctype == 'char*': ptype = str elif ctype in ('Expr*', 'Node*'): ptype = (dict, list, tuple, Node) def adaptor(value): if isinstance(value, dict): if '@' in value: value = G[value['@']](value) elif isinstance(value, (list, tuple)): value = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) return value elif ctype in ('Value', 'Value*'): ptype = (int, str, float, Decimal, Value) elif ctype in ('int', 'int16', 'bits32', 'int32', 'uint32', 'uint64', 'AttrNumber', 'AclMode', 'Index', 'SubTransactionId'): ptype = int elif ctype == 'Cost': ptype = float elif ctype == 'CreateStmt': ptype = (dict, CreateStmt) def adaptor(value): if isinstance(value, dict): if '@' in value: value = G[value['@']](value) return value elif ctype == 'Bitmapset*': ptype = (list, set, tuple) adaptor = lambda value: set(value) if isinstance(value, (list, tuple)) else value else: from pglast import enums if hasattr(enums, ctype): ptype = (int, str, dict, getattr(enums, ctype)) else: if ctype.endswith('*'): ptype = G.get(ctype[:-1]) if ptype is None: raise NotImplementedError(f'unknown {{ctype!r}}') from None else: ptype = (dict, ptype) slots[attr] = SlotTypeInfo(ctype, ptype, adaptor) _fixup_attribute_types_in_slots() del _fixup_attribute_types_in_slots ''') ast_pyx = args.output_dir / 'ast.pyx' with ast_pyx.open('w', encoding='utf-8') as output: output.write(AST_PYX_HEADER % libpg_query_version) emit_node_create_function(nodes, enums, output) def main(): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description="PG structs extractor", formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_dir', type=Path, help="where Cython sources will be created") parser.add_argument('rstdoc', type=Path, help="reST documentation to be created") args = parser.parse_args() workhorse(args) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- # :Project: pglast -- Wrap PG nodes into a Python AST # :Created: sab 27 feb 2021, 19:47:11 # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © 2021 Lele Gaifax # from datetime import date import json from keyword import iskeyword from pathlib import Path import subprocess from re import match, sub CYEARS = '' CYEARS += str(date.today().year) AST_PY_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # from collections import namedtuple from decimal import Decimal from enum import Enum SlotTypeInfo = namedtuple('SlotTypeInfo', ['c_type', 'py_type', 'adaptor']) class Node: "Base class for all AST nodes." __slots__ = () def __init__(self, data): if not isinstance(data, dict): raise ValueError(f'Bad argument, expected a dictionary, got {{type(data)!r}}') if '@' not in data: raise ValueError('Bad argument, expected a dictionary with a "@" key') if data['@'] != self.__class__.__name__: raise ValueError(f'Bad argument, wrong "@" value, expected {{self.__class__.__name__!r}}, got {{data["@"]!r}}') G = globals() for a in self: v = data.get(a) if v is not None: if isinstance(v, dict) and '@' in v: v = G[v['@']](v) elif isinstance(v, (tuple, list)): v = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in v) setattr(self, a, v) def __iter__(self): "Iterate over all attribute names of this node." return iter(self.__slots__) def __repr__(self): "Build a representation of the whole node and its subtree, for debug." attrs = [] for a in self: if a != 'location': v = getattr(self, a) if v is not None: attrs.append(f'{{a}}={{v!r}}') if attrs: attrs = ' ' + ' '.join(attrs) else: attrs = '' return '<' + self.__class__.__name__ + attrs + '>' def __eq__(self, other): ''' Compare two nodes, returning ``True`` if they are considered equivalent. This is mainly an helper method used by tests: for this reason, two nodes are considered equal when all their attributes match, ignoring *positional* ones such as ``location``, ``stmt_len`` and ``stmt_location``. ''' if not isinstance(other, type(self)): return False for a in self: if ((a not in ('location', 'stmt_len', 'stmt_location') and getattr(self, a) != getattr(other, a))): return False return True def __call__(self, depth=None, ellipsis=..., skip_none=False): '''Serialize the node as a structure made of simple Python data-types. :type depth: ``None`` or ``int`` :param depth: if not ``None``, the maximum depth to reach :param ellipsis: the marker value that will be used to replace cut-off branch :param bool skip_none: whether ``None``-valued attributes should be elided :param bool enum_name: whether Enums will be rendered as their name only :return: a :class:`dict` instance This performs a top-down recursive visit to the whole AST tree: each ``Node`` instance becomes a dictionary with a special ``@`` key carrying the node type, lists becomes tuples and ``Enum`` instances become dictionaries with a special ``#`` key carrying the enum name.''' from enum import Enum d = {{'@': self.__class__.__name__}} for a in self: v = getattr(self, a) if isinstance(v, Node): if depth is None or depth > 0: v = v(None if depth is None else depth - 1, ellipsis, skip_none) else: v = ellipsis elif isinstance(v, tuple): if depth is None or depth > 0: if v and isinstance(v[0], Node): v = tuple(i(None if depth is None else depth - 1, ellipsis, skip_none) for i in v) else: v = ellipsis elif isinstance(v, Enum): v = {{'#': v.__class__.__name__, 'name': v.name, 'value': v.value}} if not skip_none or v is not None: d[a] = v return d def __setattr__(self, name, value): '''Validate the given `value` and if acceptable assign it to the `name` attribute. This tries to coerce the given `value` accordingly with the *ctype* of the attribute, raising opportune exception when that is not possible. ''' if value is not None: ctype, ptype, adaptor = self.__slots__[name] if not isinstance(ptype, tuple): ptype = (ptype,) if not isinstance(value, ptype): raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected {{ptype}}, got {{type(value)}}: {{value!r}}') if adaptor is not None: value = adaptor(value) elif ctype == 'Node*': if isinstance(value, dict) and '@' in value: value = globals()[value['@']](value) elif ctype == 'char': if isinstance(value, int): value = chr(value) elif len(value) != 1: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a single char, got {{value!r}}') elif ctype == 'bool': value = bool(value) elif ctype == 'Bitmapset*': if isinstance(value, (list, tuple)): value = set(value) elif ctype == 'List*': G = globals() value = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) elif ctype != 'char*': from pglast import enums if hasattr(enums, ctype): enum = getattr(enums, ctype) if not isinstance(value, enum): if isinstance(value, dict) and '#' in value: if value['#'] != ctype: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None if 'name' in value: value = value['name'] elif 'value' in value: value = value['value'] else: raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None try: if isinstance(value, str) and len(value) > 1: value = enum[value] else: value = enum(value) except (KeyError, ValueError): raise ValueError(f'Bad value for attribute {{self.__class__.__name__}}.{{name}}, expected a {{ptype}}, got {{value!r}}') from None else: if ctype.endswith('*'): cls = globals().get(ctype[:-1]) if cls is None: raise NotImplementedError(f'Unhandled {{ctype!r}} for attribute {{self.__class__.__name__}}.{{name}}') if isinstance(value, dict) and '@' in value: value = cls(value) super().__setattr__(name, value) class Expr(Node): '''Abstract super class of several *expression* classes.''' __slots__ = () class Value(Node): '''Abstract super class, representing PG's `Value`__ union type. __ https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/include/nodes/value.h ''' __slots__ = () def __init__(self, value=None): if ((value is not None and isinstance(value, dict) and '@' in value)): super().__init__(value) else: self.val = value class BitString(Value): '''Implement the ``T_BitString`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', str, None)}} class Float(Value): '''Implement the ``T_Float`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', (str, Decimal), Decimal)}} class Integer(Value): '''Implement the ``T_Integer`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', int, None)}} class Null(Value): '''Implement the ``T_Null`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', type(None), None)}} class String(Value): '''Implement the ``T_String`` variant of the :class:`Value` union.''' __slots__ = {{'val': SlotTypeInfo('char*', str, None)}} """ AST_PYX_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # #cython: language_level=3 from cpython.ref cimport Py_INCREF from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM from decimal import Decimal from pglast import ast, enums from pglast cimport structs """ STRUCTS_PXD_HEADER = f"""\ # -*- coding: utf-8 -*- # :Project: pglast -- DO NOT EDIT: automatically extracted from struct_defs.json @ %s # :Author: Lele Gaifax <lele@metapensiero.it> # :License: GNU General Public License version 3 or later # :Copyright: © {CYEARS} Lele Gaifax # #cython: language_level=3 from libc.stdint cimport int16_t, int32_t, uint32_t, uint64_t cdef extern from "postgres.h": ctypedef unsigned char bool ctypedef struct Node: int type ctypedef union ValueUnion: int ival char *str ctypedef struct Value: int type ValueUnion val ctypedef struct Bitmapset: int nwords unsigned long *words cdef extern from "nodes/bitmapset.h": ctypedef struct Bitmapset: pass int bms_next_member(const Bitmapset *a, int prevbit) cdef extern from "nodes/pg_list.h": ctypedef struct List: int length void* list_nth(List* list, int n) cdef extern from "nodes/value.h": ctypedef struct ValUnion: int ival char *str ctypedef struct Value: NodeTag type ValUnion val int intVal(Value* v) char* strVal(Value* v) """ AST_RST_HEADER = f"""\ .. -*- coding: utf-8 -*- .. :Project: pglast -- DO NOT EDIT: generated automatically .. :Author: Lele Gaifax <lele@metapensiero.it> .. :License: GNU General Public License version 3 or later .. :Copyright: © {CYEARS} Lele Gaifax .. .. _pglast.ast: =================================================================== :mod:`pglast.ast` --- Python classes representing PG parser nodes =================================================================== The module implements a set of *data* classes, one for each ``C`` structure defined in the PostgreSQL headers ``include/nodes/primnodes.h`` and ``include/nodes/parsenodes.h``. .. module:: pglast.parser.ast .. autoclass:: Node :special-members: __repr__, __eq__, __call__, __setattr__ .. autoclass:: Value .. autoclass:: BitString .. autoclass:: Float .. autoclass:: Integer .. autoclass:: Null .. autoclass:: String """ def get_libpg_query_info(): "Return a tuple with (version, baseurl) of the libpg_query library." version = subprocess.check_output(['git', 'describe', '--all', '--long'], cwd='libpg_query') version = version.decode('utf-8').strip().split('/')[-1] remote = subprocess.check_output(['git', 'remote', 'get-url', 'origin'], cwd='libpg_query') remote = remote.decode('utf-8') baseurl = '%s/blob/%s/' % (remote[:-5], version[-7:]) return version, baseurl def extract_toc(header): "Extract the enums and defines with their position in the header." toc = {} content = header.read_text(encoding='utf-8') in_typedef_enum = 0 for lineno, line in enumerate(content.splitlines(), 1): if line.startswith(('struct ', 'typedef struct ')): m = match(r'(typedef )?struct\s+([\w_]+)', line) if m is not None: toc[m.group(2)] = lineno return toc def emit_struct_def(name, fields, output): output.write(f'\n\n ctypedef struct {name}:\n') empty = True for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype in ('Expr', 'Oid'): continue if ctype == 'int16': ctype = 'int16_t' elif ctype in ('bits32', 'int32'): ctype = 'int32_t' elif ctype == 'uint32': ctype = 'uint32_t' elif ctype == 'uint64': ctype = 'uint64_t' if ctype == 'AttrNumber': ctype = 'int' elif ctype in ('AclMode', 'Index', 'SubTransactionId'): ctype = 'unsigned int' elif ctype == 'Cost': ctype = 'float' elif ctype.endswith('*'): ctype = f'const {ctype}' fname = field['name'] if iskeyword(fname): fname = f'{fname}_ "{fname}"' output.write(f' {ctype} {fname}\n') empty = False if empty: output.write(f' pass\n') def emit_generic_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = data.{name} ''') def emit_bool_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = bool(data.{name}) ''') def emit_value_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name}.type == structs.T_Null: v_{name} = ast.Null(None) elif data.{name}.type == structs.T_Integer: v_{name} = ast.Integer(data.{name}.val.ival) elif data.{name}.type == structs.T_Float: v_{name} = ast.Float(Decimal(data.{name}.val.str.decode("utf-8"))) elif data.{name}.type == structs.T_BitString: v_{name} = ast.BitString(data.{name}.val.str.decode("utf-8")) else: v_{name} = ast.String(data.{name}.val.str.decode("utf-8")) ''') def emit_str_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name} is not NULL: v_{name} = data.{name}.decode("utf-8") else: v_{name} = None ''') def emit_char_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = chr(data.{name}) ''') def emit_list_attr(name, ctype, output): output.write(f'''\ cdef tuple v_{name} cdef int {name}_i if data.{name} is not NULL: v_{name} = PyTuple_New(data.{name}.length) for i in range(data.{name}.length): item = create(structs.list_nth(data.{name}, i), offset_to_index) Py_INCREF(item) PyTuple_SET_ITEM(v_{name}, i, item) else: v_{name} = None ''') def emit_node_attr(name, ctype, output): output.write(f'''\ cdef v_{name} = create(&data.{name}, offset_to_index) ''') def emit_create_stmt_attr(name, ctype, output): output.write(f''' cdef object v_{name} = create_CreateStmt(<structs.CreateStmt*> data, offset_to_index) ''') def emit_nodeptr_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} if data.{name} is not NULL: v_{name} = create(data.{name}, offset_to_index) else: v_{name} = None ''') def emit_no_attr(name, ctype, output): # output.write(f'# cdef object v_{name} = data.{name}\n') pass def emit_int_enum_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = getattr(enums, {ctype!r})(data.{name}) ''') def emit_str_enum_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = getattr(enums, {ctype!r})(chr(data.{name})) ''') def emit_bitmapset_attr(name, ctype, output): output.write(f'''\ cdef set v_{name} cdef int {name}_member if data.{name} is not NULL: v_{name} = set() {name}_member = structs.bms_next_member(data.{name}, -1) while {name}_member >= 0: v_{name}.add({name}_member) {name}_member = structs.bms_next_member(data.{name}, {name}_member) else: v_{name} = None ''') def emit_location_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = offset_to_index(data.{name}) ''') def emit_stmt_len_attr(name, ctype, output): output.write(f'''\ cdef object v_{name} = offset_to_index(data.stmt_location + data.{name}) - offset_to_index(data.stmt_location) ''') def emitter_for(fname, ctype, enums): from pglast import enums as eimpl if fname == 'location' or fname == 'stmt_location': emitter = emit_location_attr elif fname == 'stmt_len': emitter = emit_stmt_len_attr elif ctype == 'List*': emitter = emit_list_attr elif ctype == 'CreateStmt': emitter = emit_create_stmt_attr elif ctype == 'Expr': emitter = emit_no_attr superclass = 'Expr' elif ctype == 'NodeTag': emitter = emit_no_attr elif ctype == 'Value': emitter = emit_value_attr elif ctype == 'char*': emitter = emit_str_attr elif ctype == 'char': emitter = emit_char_attr elif ctype == 'bool': emitter = emit_bool_attr elif ctype == 'Bitmapset*': emitter = emit_bitmapset_attr elif ctype.endswith('*'): emitter = emit_nodeptr_attr elif ctype in enums: if issubclass(getattr(eimpl, ctype), eimpl.IntEnum): emitter = emit_int_enum_attr else: emitter = emit_str_enum_attr else: emitter = emit_generic_attr return emitter def emit_node_def(name, fields, enums, url, output, doc): attrs = [] superclass = 'Node' for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype == 'Oid': continue fname = field['name'] if iskeyword(fname): fname = f'{fname}_' emitter = emitter_for(fname, ctype, enums) if ctype == 'Expr': superclass = 'Expr' comment = field['comment'] if comment: comment = comment.strip() if comment.startswith('/*'): comment = comment[2:-2].strip() comment = sub(r'\t+', ' ', comment) comment = sub(r'\*-+\s*', '', comment) comment = sub(r'-+\n', '', comment) comment = sub(r'\n +', '\n ', comment) comment = sub(r'\*\)', '\\*)', comment) comment = comment.strip() comment = comment[0].upper() + comment[1:] if comment.lower() == 'see above': comment = '' attrs.append((fname, ctype, comment, emitter)) real_attrs = [] if attrs: for attr, ctype, comment, emitter in attrs: if emitter is emit_no_attr: continue real_attrs.append((attr, ctype)) output.write(f'''\ class {name}({superclass}): __slots__ = {{{', '.join(repr(a)+': '+repr(t) for a, t in real_attrs)}}} ''') if real_attrs: output.write(f'''\ def __init__(self, {', '.join(f'{attr}=None' for attr, __ in real_attrs)}): # pragma: no cover ''') if len(real_attrs) > 1: output.write(f'''\ if (({real_attrs[0][0]} is not None and {' is '.join(attr for attr, __ in real_attrs[1:])} is None and isinstance({real_attrs[0][0]}, dict) and '@' in {real_attrs[0][0]})): super().__init__({real_attrs[0][0]}) else: ''') for a, v in real_attrs: output.write(f' self.{a} = {a}\n') else: for a, v in real_attrs: output.write(f' self.{a} = {a}\n') else: output.write('''\ def __init__(self): # pragma: no cover pass ''') doc.write(f''' .. class:: {name}({', '.join(f'{attr}=None' for attr, __ in real_attrs)}) Wrapper for the `homonymous <{url}>`__ parser node. ''') for attr, ctype, comment, emitter in attrs: if emitter is emit_no_attr: continue if ctype == 'List*': type = 'tuple' elif ctype in ('char', 'char*'): type = 'str' elif ctype == 'Node*': type = 'Node' elif ctype in enums: type = f'{ctype}' else: type = ctype doc.write(f' .. attribute:: {attr}\n') doc.write(f' :type: {type}\n\n') if comment: doc.write(f' {comment}\n\n') def emit_node_create_function(nodes, enums, output): from pglast import enums as eimpl nnames = set(n[0] for n in nodes) for name, fields in nodes: attrs = [] real_attrs = [] for field in fields: if 'name' not in field or 'c_type' not in field: continue ctype = field['c_type'] if ctype in ('Expr', 'Oid'): continue fname = field['name'] if iskeyword(fname): fname = f'{fname}_' emitter = emitter_for(fname, ctype, enums) attrs.append((fname, ctype, emitter)) if emitter is not emit_no_attr: real_attrs.append((fname, ctype)) output.write(f'''\ cdef create_{name}(structs.{name}* data, offset_to_index): ''') for attr, ctype, emitter in attrs: emitter(attr, ctype, output) output.write(f'''\ return ast.{name}({', '.join(f'v_{attr}' for attr, __ in real_attrs)}) ''') output.write('''\ cdef create(void* data, offset_to_index): if data is NULL: return None cdef tuple t cdef int i cdef int tag = structs.nodeTag(data) ''') tags = sorted(eimpl.NodeTag) first = True for tag in eimpl.NodeTag: name = tag.name[2:] if name in nnames: output.write(' ') output.write('if' if first else 'elif') output.write(f' tag == structs.{tag.name}:\n') output.write(f' return create_{name}(<structs.{name}*> data, offset_to_index)\n') first = False elif name == 'Null': output.write('''\ elif tag == structs.T_Null: return ast.Null(None) ''') elif name == 'Integer': output.write('''\ elif tag == structs.T_Integer: return ast.Integer(structs.intVal(<structs.Value *> data)) ''') elif name == 'Float': output.write('''\ elif tag == structs.T_Float: return ast.Float(Decimal(structs.strVal(<structs.Value *> data).decode("utf-8"))) ''') elif name == 'BitString': output.write('''\ elif tag == structs.T_BitString: return ast.BitString(structs.strVal(<structs.Value *> data).decode("utf-8")) ''') elif name == 'String': output.write('''\ elif tag == structs.T_String: return ast.String(structs.strVal(<structs.Value *> data).decode("utf-8")) ''') elif name == 'List': output.write('''\ elif tag == structs.T_List: t = PyTuple_New((<structs.List *> data).length) for i in range((<structs.List *> data).length): item = create(structs.list_nth(<structs.List *> data, i), offset_to_index) Py_INCREF(item) PyTuple_SET_ITEM(t, i, item) return t ''') output.write('''\ raise ValueError("Unhandled tag: %s" % tag) ''') def workhorse(args): libpg_query_version, libpg_query_baseurl = get_libpg_query_info() pgq_dir = Path('libpg_query') pg_inc_dir = pgq_dir / 'src' / 'postgres' / 'include' with (pgq_dir / 'srcdata' / 'struct_defs.json').open(encoding='utf-8') as f: structs = json.load(f) ctypes = set() for header in ('nodes/parsenodes', 'nodes/primnodes'): for name in structs[header]: fields = structs[header][name]['fields'] for field in fields: if 'c_type' in field: ctypes.add(field['c_type']) with (pgq_dir / 'srcdata' / 'all_known_enums.json').open(encoding='utf-8') as f: enums = sorted(json.load(f)) with (pgq_dir / 'srcdata' / 'enum_defs.json').open(encoding='utf-8') as f: node_tags = [e['name'] for e in json.load(f)['nodes/nodes']['NodeTag']['values'] if 'name' in e] linenos = {} structs_pxd = args.output_dir / 'structs.pxd' with structs_pxd.open('w', encoding='utf-8') as output: output.write(STRUCTS_PXD_HEADER % libpg_query_version) output.write('\n\ncdef extern from *:') for name in enums: output.write(f'\n ctypedef enum {name}:\n') if name == 'NodeTag': for tag in node_tags: output.write(f' {tag}\n') else: output.write(' pass\n') output.write('\n int nodeTag(void* data)\n') nodes = [] for header in ('nodes/parsenodes', 'nodes/primnodes'): output.write(f'\n\ncdef extern from "{header}.h":\n') toc = extract_toc(pg_inc_dir / (header + '.h')) for name in toc: linenos[name] = (header, toc[name]) defs = structs[header] for name in defs: fields = defs[name]['fields'] if name not in ('Const', 'NextValueExpr', 'Value'): nodes.append((name, fields)) emit_struct_def(name, fields, output) ast_py = args.output_dir / 'ast.py' with ast_py.open('w', encoding='utf-8') as output, \ args.rstdoc.open('w', encoding='utf-8') as doc: output.write(AST_PY_HEADER % libpg_query_version) doc.write(AST_RST_HEADER) for name, fields in sorted(nodes): header, lineno = linenos[name] url = f'{libpg_query_baseurl}src/postgres/include/{header}.h#L{lineno}' emit_node_def(name, fields, enums, url, output, doc) output.write(''' def _fixup_attribute_types_in_slots(): G = globals() def traverse_sub_classes(cls): for subc in cls.__subclasses__(): yield subc yield from traverse_sub_classes(subc) for cls in traverse_sub_classes(Node): slots = cls.__slots__ if not (slots and isinstance(slots, dict) and isinstance(next(iter(slots.values())), str)): continue for attr in slots: adaptor = None ctype = slots[attr] if ctype == 'List*': ptype = (list, tuple) adaptor = lambda value: tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) elif ctype == 'bool': ptype = (bool, int) adaptor = bool elif ctype == 'char': ptype = (str, int) def adaptor(value): if isinstance(value, int): value = chr(value) elif len(value) != 1: raise ValueError(f'Bad value for attribute {{cls.__name__}}.{{attr}}, expected a single char, got {{value!r}}') return value elif ctype == 'char*': ptype = str elif ctype in ('Expr*', 'Node*'): ptype = (dict, list, tuple, Node) def adaptor(value): if isinstance(value, dict): if '@' in value: value = G[value['@']](value) elif isinstance(value, (list, tuple)): value = tuple(G[i['@']](i) if isinstance(i, dict) and '@' in i else i for i in value) return value elif ctype in ('Value', 'Value*'): ptype = (int, str, float, Decimal, Value) elif ctype in ('int', 'int16', 'bits32', 'int32', 'uint32', 'uint64', 'AttrNumber', 'AclMode', 'Index', 'SubTransactionId'): ptype = int elif ctype == 'Cost': ptype = float elif ctype == 'CreateStmt': ptype = (dict, CreateStmt) def adaptor(value): if isinstance(value, dict): if '@' in value: value = G[value['@']](value) return value elif ctype == 'Bitmapset*': ptype = (list, set, tuple) adaptor = lambda value: set(value) if isinstance(value, (list, tuple)) else value else: from pglast import enums if hasattr(enums, ctype): ptype = (int, str, dict, getattr(enums, ctype)) else: if ctype.endswith('*'): ptype = G.get(ctype[:-1]) if ptype is None: raise NotImplementedError(f'unknown {{ctype!r}}') from None else: ptype = (dict, ptype) slots[attr] = SlotTypeInfo(ctype, ptype, adaptor) _fixup_attribute_types_in_slots() del _fixup_attribute_types_in_slots ''') ast_pyx = args.output_dir / 'ast.pyx' with ast_pyx.open('w', encoding='utf-8') as output: output.write(AST_PYX_HEADER % libpg_query_version) emit_node_create_function(nodes, enums, output) def main(): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description="PG structs extractor", formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_dir', type=Path, help="where Cython sources will be created") parser.add_argument('rstdoc', type=Path, help="reST documentation to be created") args = parser.parse_args() workhorse(args) if __name__ == '__main__': main()
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * from datetime import datetime, timezone from typing import Union, Any, Dict from dateparser import parse import urllib3 import traceback # Disable insecure warnings urllib3.disable_warnings() ''' GLOBAL VARIABLES ''' MALICIOUS_DICTIONARY: Dict[Any, int] = { 'low': Common.DBotScore.GOOD, 'medium': Common.DBotScore.SUSPICIOUS, 'high': Common.DBotScore.BAD } MALICIOUS_THRESHOLD = MALICIOUS_DICTIONARY.get(demisto.params().get('threshold', 'high')) ''' CLIENT ''' class Client: """ The integration's client """ def __init__(self, params: Dict[str, str]): self._cs_client: CrowdStrikeClient = CrowdStrikeClient(params=params) self.query_params: Dict[str, str] = {'offset': 'offset', 'limit': 'limit', 'sort': 'sort', 'free_search': 'q'} self.date_params: Dict[str, Dict[str, str]] = { 'created_date': {'operator': '', 'api_key': 'created_date'}, 'last_updated_date': {'operator': '', 'api_key': 'last_updated'}, 'max_last_modified_date': {'operator': '<=', 'api_key': 'last_modified_date'}, 'min_last_activity_date': {'operator': '>=', 'api_key': 'first_activity_date'}, 'max_last_activity_date': {'operator': '<=', 'api_key': 'last_activity_date'}, } def build_request_params(self, args: Dict[str, Any]) -> Dict[str, Any]: """ Build the params dict for the request :param args: Cortex XSOAR args :return: The params dict """ params: Dict[str, Any] = {key: args.get(arg) for arg, key in self.query_params.items()} query = args.get('query') params['filter'] = query if query else self.build_filter_query(args) return assign_params(**params) def build_filter_query(self, args: Dict[str, str]) -> str: """ Builds the filter query in Falcon Query Language (FQL) :param args: Cortex XSOAR args :return: The query """ filter_query: str = str() for key in args: if key not in self.query_params: if key not in self.date_params: values: List[str] = argToList(args[key], ',') for value in values: filter_query += f"{key}:'{value}'+" else: operator: Optional[str] = self.date_params.get(key, {}).get('operator') api_key: Optional[str] = self.date_params.get(key, {}).get('api_key') # Parsing date argument of ISO format or free language into datetime object, # replacing TZ with UTC, taking its timestamp format and rounding it up. filter_query += f"{api_key}:" \ f"{operator}{int(parse(args[key]).replace(tzinfo=timezone.utc).timestamp())}+" if filter_query.endswith('+'): filter_query = filter_query[:-1] return filter_query def check_quota_status(self) -> Dict[str, Any]: return self._cs_client.check_quota_status() def get_indicator(self, indicator_value: str, indicator_type: str) -> Dict[str, Any]: args: Dict[str, Any] = { 'indicator': indicator_value, 'limit': 1 } if indicator_type == 'hash': args['type'] = get_indicator_hash_type(indicator_value) elif indicator_type == 'ip': args['type'] = 'ip_address' else: args['type'] = indicator_type params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params) def cs_actors(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/actors/v1', params=params) def cs_indicators(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params) def cs_reports(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/reports/v1', params=params) ''' HELPER FUNCTIONS ''' def get_dbot_score_type(indicator_type: str) -> Union[Exception, DBotScoreType, str]: """ Returns the dbot score type :param indicator_type: The indicator type :return: The dbot score type """ if indicator_type == 'ip': return DBotScoreType.IP elif indicator_type == 'domain': return DBotScoreType.DOMAIN elif indicator_type == 'file' or indicator_type == 'hash': return DBotScoreType.FILE elif indicator_type == 'url': return DBotScoreType.URL else: raise DemistoException('Indicator type is not supported.') def get_score_from_resource(r: Dict[str, Any]) -> int: """ Calculates the DBotScore for the resource :param r: The resource :return: The DBotScore """ malicious_confidence: int = MALICIOUS_DICTIONARY.get(r.get('malicious_confidence'), 0) if malicious_confidence == 3 or MALICIOUS_THRESHOLD == 1: score = 3 elif malicious_confidence == 2 or MALICIOUS_THRESHOLD == 2: score = 2 else: score = 1 return score def get_indicator_hash_type(indicator_value: str) -> Union[str, Exception]: """ Calculates the type of the hash :param indicator_value: The hash value :return: The hash type """ length: int = len(indicator_value) if length == 32: return 'hash_md5' elif length == 40: return 'hash_sha1' elif length == 64: return 'hash_sha256' else: raise DemistoException(f'Invalid hash. Hash length is: {length}. Please provide either MD5 (32 length)' f', SHA1 (40 length) or SHA256 (64 length) hash.') def get_indicator_object(indicator_value: Any, indicator_type: str, dbot_score: Common.DBotScore) \ -> Union[Common.IP, Common.URL, Common.File, Common.Domain, None]: """ Returns the corresponding indicator common object :param indicator_value: The indicator value :param indicator_type: The indicator value :param dbot_score: The indicator DBotScore :return: The indicator common object """ if indicator_type == 'ip': return Common.IP( ip=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'url': return Common.URL( url=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'hash': hash_type: Union[str, Exception] = get_indicator_hash_type(indicator_value) if hash_type == 'hash_md5': return Common.File( md5=indicator_value, dbot_score=dbot_score ) elif hash_type == 'hash_sha1': return Common.File( sha1=indicator_value, dbot_score=dbot_score ) else: return Common.File( sha256=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'domain': return Common.Domain( domain=indicator_value, dbot_score=dbot_score ) else: return None def build_indicator(indicator_value: str, indicator_type: str, title: str, client: Client) -> CommandResults: """ Builds an indicator entry :param indicator_value: The indicator value :param indicator_type: The indicator type :param title: The title to show to the user :param client: The integration's client :return: The indicator entry """ res: Dict[str, Any] = client.get_indicator(indicator_value, indicator_type) resources: List[Any] = res.get('resources', []) indicators: List[Union[Common.IP, Common.URL, Common.File, Common.Domain, None]] = list() outputs: List[Dict[str, Any]] = list() md: str = str() if resources: for r in resources: output = get_indicator_outputs(r) outputs.append(output) score = get_score_from_resource(r) dbot_score = Common.DBotScore( indicator=indicator_value, indicator_type=get_dbot_score_type(indicator_type), integration_name='CrowdStrike Falcon Intel v2', malicious_description='High confidence', score=score ) indicator = get_indicator_object(indicator_value, indicator_type, dbot_score) indicators.append(indicator) else: md = 'No indicator found.' results: CommandResults = CommandResults( outputs=outputs, outputs_prefix='FalconIntel.Indicator', outputs_key_field='ID', indicators=indicators, readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res ) return results def get_values(items_list: List[Any], return_type: str = 'str', keys: Union[str, List[Any]] = 'value') \ -> Union[str, List[Union[str, Dict]]]: """ Returns the values of list's items :param items_list: The items list :param return_type: Whether to return string or list :param keys: The key to get the data :return: The values list """ new_list: List[Any] = list() if isinstance(keys, str): new_list = [item.get(keys) for item in items_list] elif isinstance(keys, list): new_list = [{underscoreToCamelCase(f): item.get(f) for f in item if f in keys} for item in items_list] if return_type == 'list': return new_list return ', '.join(str(item) for item in new_list) def get_indicator_outputs(resource: Dict[str, Any]) -> Dict[str, Any]: """ Build the output and extra context of an indicator :param resource: The indicator's object :return: The indicator's human readable """ output: Dict[str, Any] = dict() if resource: indicator_id = resource.get('id') indicator_value = resource.get('indicator') indicator_type = resource.get('type') last_update = resource.get('last_update') publish_date = resource.get('publish_date') malicious_confidence = resource.get('malicious_confidence') reports = resource.get('reports') actors = resource.get('actors') malware_families = resource.get('malware_families') kill_chains = resource.get('kill_chains') domain_types = resource.get('domain_types') ip_address_types = resource.get('ip_address_types') relations: List[Any] = resource.get('relations', [])[:10] labels: List[Any] = resource.get('labels', [])[:10] output = assign_params(**{ 'ID': indicator_id, 'Type': indicator_type, 'Value': indicator_value, 'LastUpdate': datetime.fromtimestamp(last_update, timezone.utc).isoformat() if last_update else None, 'PublishDate': datetime.fromtimestamp(publish_date, timezone.utc).isoformat() if publish_date else None, 'MaliciousConfidence': malicious_confidence, 'Reports': reports, 'Actors': actors, 'MalwareFamilies': malware_families, 'KillChains': kill_chains, 'DomainTypes': domain_types, 'IPAddressTypes': ip_address_types, 'Relations': [f'{item.get('Type')}: {item.get('Indicator')}' for item in # type: ignore get_values(relations, return_type='list', keys=['indicator', 'type'])], 'Labels': get_values(labels, return_type='list', keys='name') }) return output ''' COMMANDS ''' def run_test_module(client: Client) -> Union[str, Exception]: """ If a client is successfully constructed then an access token was successfully created, therefore the username and password are valid and a connection was made. On top of the above, this function checks for allocated quota and validates the http request to actors & indicators. :param client: the client object with an access token :return: ok if got a valid access token and not all the quota is used at the moment """ output: Dict[str, Any] = client.check_quota_status() error = output.get('errors') if error and isinstance(error, list): return error[0] meta = output.get('meta') if meta is not None and isinstance(meta, dict): quota = meta.get('quota') if quota is not None: total = quota.get('total') used = quota.get('used') if total <= used: raise Exception(f'Quota limitation has been reached: {used}') else: client._cs_client.http_request('GET', 'intel/combined/indicators/v1', params={'limit': 1}) client._cs_client.http_request('GET', 'intel/combined/actors/v1', params={'limit': 1}) return 'ok' raise Exception('Quota limitation is unreachable') def file_command(file: str, client: Client) -> CommandResults: return build_indicator(file, 'hash', 'Falcon Intel file reputation:\n', client) def ip_command(ip: str, client: Client) -> CommandResults: return build_indicator(ip, 'ip', 'Falcon Intel IP reputation:\n', client) def url_command(url: str, client: Client) -> CommandResults: return build_indicator(url, 'url', 'Falcon Intel URL reputation:\n', client) def domain_command(domain: str, client: Client) -> CommandResults: return build_indicator(domain, 'domain', 'Falcon Intel domain reputation:\n', client) def cs_actors_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_actors(args) resources: List[Any] = res.get('resources', []) outputs: List[Dict[str, Any]] = list() md_outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Actor search:' if resources: for r in resources: image_url = r.get('image', {}).get('url') name = r.get('name') actor_id = r.get('id') url = r.get('url') slug = r.get('slug') short_description = r.get('short_description') first_activity_date = r.get('first_activity_date') last_activity_date = r.get('last_activity_date') active = r.get('active') known_as = r.get('known_as') target_industries = r.get('target_industries', []) target_countries = r.get('target_countries', []) origins = r.get('origins', []) motivations = r.get('motivations', []) capability = r.get('capability', {}).get('value') group = r.get('group') region = r.get('region', {}).get('value') kill_chain = r.get('kill_chain') output: Dict[str, Any] = assign_params(**{ 'ImageURL': image_url, 'Name': name, 'ID': actor_id, 'URL': url, 'Slug': slug, 'ShortDescription': short_description, 'FirstActivityDate': datetime.fromtimestamp(first_activity_date, timezone.utc).isoformat() if first_activity_date else None, 'LastActivityDate': datetime.fromtimestamp(last_activity_date, timezone.utc).isoformat() if last_activity_date else None, 'Active': active, 'KnownAs': known_as, 'TargetIndustries': get_values(target_industries, return_type='list'), 'TargetCountries': get_values(target_countries, return_type='list'), 'Origins': get_values(origins, return_type='list'), 'Motivations': get_values(motivations, return_type='list'), 'Capability': capability, 'Group': group, 'Region': region, 'KillChains': kill_chain }) outputs.append(output) md_output: Dict[str, Any] = output for key in ('URL', 'ImageURL'): if key in md_output: value = md_output[key] md_output[key] = f'[{value}]({value})' md_outputs.append(md_output) else: md = 'No actors found.' results: CommandResults = CommandResults( outputs=outputs, outputs_key_field='ID', outputs_prefix='FalconIntel.Actor', readable_output=md if md else tableToMarkdown(name=title, t=md_outputs, headerTransform=pascalToSpace), raw_response=res ) return results def cs_indicators_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_indicators(args) resources: List[Any] = res.get('resources', []) indicators: List[Union[Common.IP, Common.URL, Common.File, Common.Domain, None]] = list() outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Indicator search:' if resources: for r in resources: output = get_indicator_outputs(r) indicator_value = output.get('Value') outputs.append(output) indicator_type = output.get('Type') if indicator_type in ('hash_md5', 'hash_sha256', 'hash_sha1', 'ip_address', 'url', 'domain'): if indicator_type in ('hash_md5', 'hash_sha1', 'hash_sha256'): indicator_type = 'hash' elif indicator_type == 'ip_address': indicator_type = 'ip' score = get_score_from_resource(r) dbot_score = Common.DBotScore( indicator=indicator_value, indicator_type=get_dbot_score_type(indicator_type), integration_name='CrowdStrike Falcon Intel v2', malicious_description='High confidence', score=score ) indicator = get_indicator_object(indicator_value, indicator_type, dbot_score) indicators.append(indicator) else: md = 'No indicators found.' results: CommandResults = CommandResults( outputs=outputs, outputs_prefix='FalconIntel.Indicator', outputs_key_field='ID', readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res, indicators=indicators ) return results def cs_reports_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_reports(args) resources: List[Any] = res.get('resources', []) outputs: List[Dict[str, Any]] = list() md_outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Report search:' if resources: for r in resources: report_id: int = r.get('id') url: str = r.get('url') name: str = r.get('name') report_type: str = r.get('type', {}).get('name') sub_type: str = r.get('sub_type', {}).get('name') slug: str = r.get('slug') created_date: int = r.get('created_date') last_modified_date: int = r.get('last_modified_date') short_description: str = r.get('short_description') target_industries: List[Any] = r.get('target_industries', []) target_countries: List[Any] = r.get('target_countries', []) motivations: List[Any] = r.get('motivations', []) tags: List[Any] = r.get('tags', []) actors: List[Any] = r.get('actors', []) output: Dict[str, Any] = assign_params(**{ 'ID': report_id, 'URL': url, 'Name': name, 'Type': report_type, 'SubType': sub_type, 'Slug': slug, 'CreatedDate': datetime.fromtimestamp(created_date, timezone.utc).isoformat() if created_date else None, 'LastModifiedSate': datetime.fromtimestamp(last_modified_date, timezone.utc).isoformat() if last_modified_date else None, 'ShortDescription': short_description, 'TargetIndustries': get_values(target_industries, return_type='list'), 'TargetCountries': get_values(target_countries, return_type='list'), 'Motivations': get_values(motivations, return_type='list'), 'Tags': get_values(tags, return_type='list'), 'Actors': get_values(actors, return_type='list', keys='name') }) outputs.append(output) md_output: Dict[str, Any] = output if 'URL' in md_output: value = md_output['URL'] md_output['URL'] = f'[{value}]({value})' md_outputs.append(md_output) else: md = 'No reports found.' results: CommandResults = CommandResults( outputs_prefix='FalconIntel.Report', outputs=outputs, outputs_key_field='ID', readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res ) return results def main(): params: Dict[str, str] = demisto.params() args: Dict[str, str] = demisto.args() try: command: str = demisto.command() LOG(f'Command being called in CrowdStrike Falcon Intel v2 is: {command}') client: Client = Client(params=params) if command == 'test-module': result: Union[str, Exception] = run_test_module(client) return_results(result) elif command == 'file': results: CommandResults = file_command(args['file'], client) return_results(results) elif command == 'ip': results = ip_command(args['ip'], client) return_results(results) elif command == 'url': results = url_command(args['url'], client) return_results(results) elif command == 'domain': results = domain_command(args['domain'], client) return_results(results) elif command == 'cs-actors': results = cs_actors_command(client, args) return_results(results) elif command == 'cs-indicators': results = cs_indicators_command(client, args) return_results(results) elif command == 'cs-reports': results = cs_reports_command(client, args) return_results(results) else: raise NotImplementedError(f'{command} command is not an existing CrowdStrike Falcon Intel v2 integration') except Exception as err: return_error(f'Unexpected error:\n{str(err)}', error=traceback.format_exc()) from CrowdStrikeApiModule import * # noqa: E402 if __name__ in ('__main__', 'builtin', 'builtins'): main()
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * from datetime import datetime, timezone from typing import Union, Any, Dict from dateparser import parse import urllib3 import traceback # Disable insecure warnings urllib3.disable_warnings() ''' GLOBAL VARIABLES ''' MALICIOUS_DICTIONARY: Dict[Any, int] = { 'low': Common.DBotScore.GOOD, 'medium': Common.DBotScore.SUSPICIOUS, 'high': Common.DBotScore.BAD } MALICIOUS_THRESHOLD = MALICIOUS_DICTIONARY.get(demisto.params().get('threshold', 'high')) ''' CLIENT ''' class Client: """ The integration's client """ def __init__(self, params: Dict[str, str]): self._cs_client: CrowdStrikeClient = CrowdStrikeClient(params=params) self.query_params: Dict[str, str] = {'offset': 'offset', 'limit': 'limit', 'sort': 'sort', 'free_search': 'q'} self.date_params: Dict[str, Dict[str, str]] = { 'created_date': {'operator': '', 'api_key': 'created_date'}, 'last_updated_date': {'operator': '', 'api_key': 'last_updated'}, 'max_last_modified_date': {'operator': '<=', 'api_key': 'last_modified_date'}, 'min_last_activity_date': {'operator': '>=', 'api_key': 'first_activity_date'}, 'max_last_activity_date': {'operator': '<=', 'api_key': 'last_activity_date'}, } def build_request_params(self, args: Dict[str, Any]) -> Dict[str, Any]: """ Build the params dict for the request :param args: Cortex XSOAR args :return: The params dict """ params: Dict[str, Any] = {key: args.get(arg) for arg, key in self.query_params.items()} query = args.get('query') params['filter'] = query if query else self.build_filter_query(args) return assign_params(**params) def build_filter_query(self, args: Dict[str, str]) -> str: """ Builds the filter query in Falcon Query Language (FQL) :param args: Cortex XSOAR args :return: The query """ filter_query: str = str() for key in args: if key not in self.query_params: if key not in self.date_params: values: List[str] = argToList(args[key], ',') for value in values: filter_query += f"{key}:'{value}'+" else: operator: Optional[str] = self.date_params.get(key, {}).get('operator') api_key: Optional[str] = self.date_params.get(key, {}).get('api_key') # Parsing date argument of ISO format or free language into datetime object, # replacing TZ with UTC, taking its timestamp format and rounding it up. filter_query += f"{api_key}:" \ f"{operator}{int(parse(args[key]).replace(tzinfo=timezone.utc).timestamp())}+" if filter_query.endswith('+'): filter_query = filter_query[:-1] return filter_query def check_quota_status(self) -> Dict[str, Any]: return self._cs_client.check_quota_status() def get_indicator(self, indicator_value: str, indicator_type: str) -> Dict[str, Any]: args: Dict[str, Any] = { 'indicator': indicator_value, 'limit': 1 } if indicator_type == 'hash': args['type'] = get_indicator_hash_type(indicator_value) elif indicator_type == 'ip': args['type'] = 'ip_address' else: args['type'] = indicator_type params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params) def cs_actors(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/actors/v1', params=params) def cs_indicators(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params) def cs_reports(self, args: Dict[str, str]) -> Dict[str, Any]: params: Dict[str, Any] = self.build_request_params(args) return self._cs_client.http_request(method='GET', url_suffix='intel/combined/reports/v1', params=params) ''' HELPER FUNCTIONS ''' def get_dbot_score_type(indicator_type: str) -> Union[Exception, DBotScoreType, str]: """ Returns the dbot score type :param indicator_type: The indicator type :return: The dbot score type """ if indicator_type == 'ip': return DBotScoreType.IP elif indicator_type == 'domain': return DBotScoreType.DOMAIN elif indicator_type == 'file' or indicator_type == 'hash': return DBotScoreType.FILE elif indicator_type == 'url': return DBotScoreType.URL else: raise DemistoException('Indicator type is not supported.') def get_score_from_resource(r: Dict[str, Any]) -> int: """ Calculates the DBotScore for the resource :param r: The resource :return: The DBotScore """ malicious_confidence: int = MALICIOUS_DICTIONARY.get(r.get('malicious_confidence'), 0) if malicious_confidence == 3 or MALICIOUS_THRESHOLD == 1: score = 3 elif malicious_confidence == 2 or MALICIOUS_THRESHOLD == 2: score = 2 else: score = 1 return score def get_indicator_hash_type(indicator_value: str) -> Union[str, Exception]: """ Calculates the type of the hash :param indicator_value: The hash value :return: The hash type """ length: int = len(indicator_value) if length == 32: return 'hash_md5' elif length == 40: return 'hash_sha1' elif length == 64: return 'hash_sha256' else: raise DemistoException(f'Invalid hash. Hash length is: {length}. Please provide either MD5 (32 length)' f', SHA1 (40 length) or SHA256 (64 length) hash.') def get_indicator_object(indicator_value: Any, indicator_type: str, dbot_score: Common.DBotScore) \ -> Union[Common.IP, Common.URL, Common.File, Common.Domain, None]: """ Returns the corresponding indicator common object :param indicator_value: The indicator value :param indicator_type: The indicator value :param dbot_score: The indicator DBotScore :return: The indicator common object """ if indicator_type == 'ip': return Common.IP( ip=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'url': return Common.URL( url=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'hash': hash_type: Union[str, Exception] = get_indicator_hash_type(indicator_value) if hash_type == 'hash_md5': return Common.File( md5=indicator_value, dbot_score=dbot_score ) elif hash_type == 'hash_sha1': return Common.File( sha1=indicator_value, dbot_score=dbot_score ) else: return Common.File( sha256=indicator_value, dbot_score=dbot_score ) elif indicator_type == 'domain': return Common.Domain( domain=indicator_value, dbot_score=dbot_score ) else: return None def build_indicator(indicator_value: str, indicator_type: str, title: str, client: Client) -> CommandResults: """ Builds an indicator entry :param indicator_value: The indicator value :param indicator_type: The indicator type :param title: The title to show to the user :param client: The integration's client :return: The indicator entry """ res: Dict[str, Any] = client.get_indicator(indicator_value, indicator_type) resources: List[Any] = res.get('resources', []) indicators: List[Union[Common.IP, Common.URL, Common.File, Common.Domain, None]] = list() outputs: List[Dict[str, Any]] = list() md: str = str() if resources: for r in resources: output = get_indicator_outputs(r) outputs.append(output) score = get_score_from_resource(r) dbot_score = Common.DBotScore( indicator=indicator_value, indicator_type=get_dbot_score_type(indicator_type), integration_name='CrowdStrike Falcon Intel v2', malicious_description='High confidence', score=score ) indicator = get_indicator_object(indicator_value, indicator_type, dbot_score) indicators.append(indicator) else: md = 'No indicator found.' results: CommandResults = CommandResults( outputs=outputs, outputs_prefix='FalconIntel.Indicator', outputs_key_field='ID', indicators=indicators, readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res ) return results def get_values(items_list: List[Any], return_type: str = 'str', keys: Union[str, List[Any]] = 'value') \ -> Union[str, List[Union[str, Dict]]]: """ Returns the values of list's items :param items_list: The items list :param return_type: Whether to return string or list :param keys: The key to get the data :return: The values list """ new_list: List[Any] = list() if isinstance(keys, str): new_list = [item.get(keys) for item in items_list] elif isinstance(keys, list): new_list = [{underscoreToCamelCase(f): item.get(f) for f in item if f in keys} for item in items_list] if return_type == 'list': return new_list return ', '.join(str(item) for item in new_list) def get_indicator_outputs(resource: Dict[str, Any]) -> Dict[str, Any]: """ Build the output and extra context of an indicator :param resource: The indicator's object :return: The indicator's human readable """ output: Dict[str, Any] = dict() if resource: indicator_id = resource.get('id') indicator_value = resource.get('indicator') indicator_type = resource.get('type') last_update = resource.get('last_update') publish_date = resource.get('publish_date') malicious_confidence = resource.get('malicious_confidence') reports = resource.get('reports') actors = resource.get('actors') malware_families = resource.get('malware_families') kill_chains = resource.get('kill_chains') domain_types = resource.get('domain_types') ip_address_types = resource.get('ip_address_types') relations: List[Any] = resource.get('relations', [])[:10] labels: List[Any] = resource.get('labels', [])[:10] output = assign_params(**{ 'ID': indicator_id, 'Type': indicator_type, 'Value': indicator_value, 'LastUpdate': datetime.fromtimestamp(last_update, timezone.utc).isoformat() if last_update else None, 'PublishDate': datetime.fromtimestamp(publish_date, timezone.utc).isoformat() if publish_date else None, 'MaliciousConfidence': malicious_confidence, 'Reports': reports, 'Actors': actors, 'MalwareFamilies': malware_families, 'KillChains': kill_chains, 'DomainTypes': domain_types, 'IPAddressTypes': ip_address_types, 'Relations': [f'{item.get("Type")}: {item.get("Indicator")}' for item in # type: ignore get_values(relations, return_type='list', keys=['indicator', 'type'])], 'Labels': get_values(labels, return_type='list', keys='name') }) return output ''' COMMANDS ''' def run_test_module(client: Client) -> Union[str, Exception]: """ If a client is successfully constructed then an access token was successfully created, therefore the username and password are valid and a connection was made. On top of the above, this function checks for allocated quota and validates the http request to actors & indicators. :param client: the client object with an access token :return: ok if got a valid access token and not all the quota is used at the moment """ output: Dict[str, Any] = client.check_quota_status() error = output.get('errors') if error and isinstance(error, list): return error[0] meta = output.get('meta') if meta is not None and isinstance(meta, dict): quota = meta.get('quota') if quota is not None: total = quota.get('total') used = quota.get('used') if total <= used: raise Exception(f'Quota limitation has been reached: {used}') else: client._cs_client.http_request('GET', 'intel/combined/indicators/v1', params={'limit': 1}) client._cs_client.http_request('GET', 'intel/combined/actors/v1', params={'limit': 1}) return 'ok' raise Exception('Quota limitation is unreachable') def file_command(file: str, client: Client) -> CommandResults: return build_indicator(file, 'hash', 'Falcon Intel file reputation:\n', client) def ip_command(ip: str, client: Client) -> CommandResults: return build_indicator(ip, 'ip', 'Falcon Intel IP reputation:\n', client) def url_command(url: str, client: Client) -> CommandResults: return build_indicator(url, 'url', 'Falcon Intel URL reputation:\n', client) def domain_command(domain: str, client: Client) -> CommandResults: return build_indicator(domain, 'domain', 'Falcon Intel domain reputation:\n', client) def cs_actors_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_actors(args) resources: List[Any] = res.get('resources', []) outputs: List[Dict[str, Any]] = list() md_outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Actor search:' if resources: for r in resources: image_url = r.get('image', {}).get('url') name = r.get('name') actor_id = r.get('id') url = r.get('url') slug = r.get('slug') short_description = r.get('short_description') first_activity_date = r.get('first_activity_date') last_activity_date = r.get('last_activity_date') active = r.get('active') known_as = r.get('known_as') target_industries = r.get('target_industries', []) target_countries = r.get('target_countries', []) origins = r.get('origins', []) motivations = r.get('motivations', []) capability = r.get('capability', {}).get('value') group = r.get('group') region = r.get('region', {}).get('value') kill_chain = r.get('kill_chain') output: Dict[str, Any] = assign_params(**{ 'ImageURL': image_url, 'Name': name, 'ID': actor_id, 'URL': url, 'Slug': slug, 'ShortDescription': short_description, 'FirstActivityDate': datetime.fromtimestamp(first_activity_date, timezone.utc).isoformat() if first_activity_date else None, 'LastActivityDate': datetime.fromtimestamp(last_activity_date, timezone.utc).isoformat() if last_activity_date else None, 'Active': active, 'KnownAs': known_as, 'TargetIndustries': get_values(target_industries, return_type='list'), 'TargetCountries': get_values(target_countries, return_type='list'), 'Origins': get_values(origins, return_type='list'), 'Motivations': get_values(motivations, return_type='list'), 'Capability': capability, 'Group': group, 'Region': region, 'KillChains': kill_chain }) outputs.append(output) md_output: Dict[str, Any] = output for key in ('URL', 'ImageURL'): if key in md_output: value = md_output[key] md_output[key] = f'[{value}]({value})' md_outputs.append(md_output) else: md = 'No actors found.' results: CommandResults = CommandResults( outputs=outputs, outputs_key_field='ID', outputs_prefix='FalconIntel.Actor', readable_output=md if md else tableToMarkdown(name=title, t=md_outputs, headerTransform=pascalToSpace), raw_response=res ) return results def cs_indicators_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_indicators(args) resources: List[Any] = res.get('resources', []) indicators: List[Union[Common.IP, Common.URL, Common.File, Common.Domain, None]] = list() outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Indicator search:' if resources: for r in resources: output = get_indicator_outputs(r) indicator_value = output.get('Value') outputs.append(output) indicator_type = output.get('Type') if indicator_type in ('hash_md5', 'hash_sha256', 'hash_sha1', 'ip_address', 'url', 'domain'): if indicator_type in ('hash_md5', 'hash_sha1', 'hash_sha256'): indicator_type = 'hash' elif indicator_type == 'ip_address': indicator_type = 'ip' score = get_score_from_resource(r) dbot_score = Common.DBotScore( indicator=indicator_value, indicator_type=get_dbot_score_type(indicator_type), integration_name='CrowdStrike Falcon Intel v2', malicious_description='High confidence', score=score ) indicator = get_indicator_object(indicator_value, indicator_type, dbot_score) indicators.append(indicator) else: md = 'No indicators found.' results: CommandResults = CommandResults( outputs=outputs, outputs_prefix='FalconIntel.Indicator', outputs_key_field='ID', readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res, indicators=indicators ) return results def cs_reports_command(client: Client, args: Dict[str, str]) -> CommandResults: res: Dict[str, Any] = client.cs_reports(args) resources: List[Any] = res.get('resources', []) outputs: List[Dict[str, Any]] = list() md_outputs: List[Dict[str, Any]] = list() md: str = str() title: str = 'Falcon Intel Report search:' if resources: for r in resources: report_id: int = r.get('id') url: str = r.get('url') name: str = r.get('name') report_type: str = r.get('type', {}).get('name') sub_type: str = r.get('sub_type', {}).get('name') slug: str = r.get('slug') created_date: int = r.get('created_date') last_modified_date: int = r.get('last_modified_date') short_description: str = r.get('short_description') target_industries: List[Any] = r.get('target_industries', []) target_countries: List[Any] = r.get('target_countries', []) motivations: List[Any] = r.get('motivations', []) tags: List[Any] = r.get('tags', []) actors: List[Any] = r.get('actors', []) output: Dict[str, Any] = assign_params(**{ 'ID': report_id, 'URL': url, 'Name': name, 'Type': report_type, 'SubType': sub_type, 'Slug': slug, 'CreatedDate': datetime.fromtimestamp(created_date, timezone.utc).isoformat() if created_date else None, 'LastModifiedSate': datetime.fromtimestamp(last_modified_date, timezone.utc).isoformat() if last_modified_date else None, 'ShortDescription': short_description, 'TargetIndustries': get_values(target_industries, return_type='list'), 'TargetCountries': get_values(target_countries, return_type='list'), 'Motivations': get_values(motivations, return_type='list'), 'Tags': get_values(tags, return_type='list'), 'Actors': get_values(actors, return_type='list', keys='name') }) outputs.append(output) md_output: Dict[str, Any] = output if 'URL' in md_output: value = md_output['URL'] md_output['URL'] = f'[{value}]({value})' md_outputs.append(md_output) else: md = 'No reports found.' results: CommandResults = CommandResults( outputs_prefix='FalconIntel.Report', outputs=outputs, outputs_key_field='ID', readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace), raw_response=res ) return results def main(): params: Dict[str, str] = demisto.params() args: Dict[str, str] = demisto.args() try: command: str = demisto.command() LOG(f'Command being called in CrowdStrike Falcon Intel v2 is: {command}') client: Client = Client(params=params) if command == 'test-module': result: Union[str, Exception] = run_test_module(client) return_results(result) elif command == 'file': results: CommandResults = file_command(args['file'], client) return_results(results) elif command == 'ip': results = ip_command(args['ip'], client) return_results(results) elif command == 'url': results = url_command(args['url'], client) return_results(results) elif command == 'domain': results = domain_command(args['domain'], client) return_results(results) elif command == 'cs-actors': results = cs_actors_command(client, args) return_results(results) elif command == 'cs-indicators': results = cs_indicators_command(client, args) return_results(results) elif command == 'cs-reports': results = cs_reports_command(client, args) return_results(results) else: raise NotImplementedError(f'{command} command is not an existing CrowdStrike Falcon Intel v2 integration') except Exception as err: return_error(f'Unexpected error:\n{str(err)}', error=traceback.format_exc()) from CrowdStrikeApiModule import * # noqa: E402 if __name__ in ('__main__', 'builtin', 'builtins'): main()
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause import warnings import numbers import time from traceback import format_exc from contextlib import suppress import numpy as np import scipy.sparse as sp from joblib import Parallel, logger from ..base import is_classifier, clone from ..utils import indexable, check_random_state, _safe_indexing from ..utils.validation import _check_fit_params from ..utils.validation import _num_samples from ..utils.validation import _deprecate_positional_args from ..utils.fixes import delayed from ..utils.metaestimators import _safe_split from ..metrics import check_scoring from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer from ..exceptions import FitFailedWarning, NotFittedError from ._split import check_cv from ..preprocessing import LabelEncoder __all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] @_deprecate_positional_args def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide <multimetric_cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list/tuple, or dict, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. For evaluating multiple metrics, either give a list of (unique) strings or a dict with names as keys and callables as values. NOTE that when using custom scorers, each scorer should return a single value. Metric functions returning a list/array of values can be wrapped into multiple scorers that return one value each. See :ref:`multimetric_grid_search` for an example. If None, the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. .. versionadded:: 0.19 .. versionchanged:: 0.21 Default value was changed from ``True`` to ``False`` return_estimator : bool, default=False Whether to return the estimators fitted on each split. .. versionadded:: 0.20 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- cross_val_score : Run cross-validation for single metric evaluation. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) if callable(scoring): scorers = scoring elif scoring is None or isinstance(scoring, str): scorers = check_scoring(estimator, scoring) else: scorers = _check_multimetric_scoring(estimator, scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) results = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) # For callabe scoring, the return type is only know after calling. If the # return type is a dictionary, the error scores can now be inserted with # the correct key. if callable(scoring): _insert_error_scores(results, error_score) results = _aggregate_score_dicts(results) ret = {} ret['fit_time'] = results["fit_time"] ret['score_time'] = results["score_time"] if return_estimator: ret['estimator'] = results["estimator"] test_scores_dict = _normalize_score_results(results["test_scores"]) if return_train_score: train_scores_dict = _normalize_score_results(results["train_scores"]) for name in test_scores_dict: ret['test_%s' % name] = test_scores_dict[name] if return_train_score: key = 'train_%s' % name ret[key] = train_scores_dict[name] return ret def _insert_error_scores(results, error_score): """Insert error in `results` by replacing them inplace with `error_score`. This only applies to multimetric scores because `_fit_and_score` will handle the single metric case. """ successful_score = None failed_indices = [] for i, result in enumerate(results): if result["fit_failed"]: failed_indices.append(i) elif successful_score is None: successful_score = result["test_scores"] if successful_score is None: raise NotFittedError("All estimators failed to fit") if isinstance(successful_score, dict): formatted_error = {name: error_score for name in successful_score} for i in failed_indices: results[i]["test_scores"] = formatted_error.copy() if "train_scores" in results[i]: results[i]["train_scores"] = formatted_error.copy() def _normalize_score_results(scores, scaler_score_key='score'): """Creates a scoring dictionary based on the type of `scores`""" if isinstance(scores[0], dict): # multimetric scoring return _aggregate_score_dicts(scores) # scaler return {scaler_score_key: scores} @_deprecate_positional_args def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', error_score=np.nan): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)`` which should return only a single value. Similar to :func:`cross_validate` but only a single metric is permitted. If None, the estimator's default scorer (if available) is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : ndarray of float of shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y, cv=3)) [0.33150734 0.08022311 0.03531764] See Also --------- cross_validate : To run cross-validation on multiple metrics and also to return train scores, fit times and score times. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ # To ensure multimetric format is not supported scorer = check_scoring(estimator, scoring=scoring) cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups, scoring={'score': scorer}, cv=cv, n_jobs=n_jobs, verbose=verbose, fit_params=fit_params, pre_dispatch=pre_dispatch, error_score=error_score) return cv_results['test_score'] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : {list, tuple} of int, default=None A list or tuple of format (<current_split_id>, <total_num_of_splits>). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (<current_candidate_id>, <total_number_of_candidates>). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """ if not isinstance(error_score, numbers.Number) and error_score != 'raise': raise ValueError( "error_score must be the string 'raise' or a numeric value. " "(Hint: if using 'raise', please make sure that it has been " "spelled correctly.)" ) progress_msg = "" if verbose > 2: if split_progress is not None: progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" if candidate_progress and verbose > 9: progress_msg += (f"; {candidate_progress[0]+1}/" f"{candidate_progress[1]}") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f"[CV{progress_msg}] START {params_msg}" print(f"{start_msg}{(80 - len(start_msg)) * "."}") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn("Estimator fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%s" % (error_score, format_exc()), FitFailedWarning) result["fit_failed"] = True else: result["fit_failed"] = False fit_time = time.time() - start_time test_scores = _score(estimator, X_test, y_test, scorer, error_score) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score( estimator, X_train, y_train, scorer, error_score ) if verbose > 1: total_time = score_time + fit_time end_msg = f"[CV{progress_msg}] END " result_msg = params_msg + (";" if params_msg else "") if verbose > 2 and isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f" {scorer_name}: (" if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f"train={scorer_scores:.3f}, " result_msg += f"test={test_scores[scorer_name]:.3f})" result_msg += f" total time={logger.short_format_time(total_time)}" # Right align the result_msg end_msg += "." * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result["test_scores"] = test_scores if return_train_score: result["train_scores"] = train_scores if return_n_test_samples: result["n_test_samples"] = _num_samples(X_test) if return_times: result["fit_time"] = fit_time result["score_time"] = score_time if return_parameters: result["parameters"] = parameters if return_estimator: result["estimator"] = estimator return result def _score(estimator, X_test, y_test, scorer, error_score="raise"): """Compute the score(s) of an estimator on a given test set. Will return a dict of floats if `scorer` is a dict, otherwise a single float is returned. """ if isinstance(scorer, dict): # will cache method calls if needed. scorer() returns a dict scorer = _MultimetricScorer(**scorer) try: if y_test is None: scores = scorer(estimator, X_test) else: scores = scorer(estimator, X_test, y_test) except Exception: if error_score == 'raise': raise else: if isinstance(scorer, _MultimetricScorer): scores = {name: error_score for name in scorer._scorers} else: scores = error_score warnings.warn( f"Scoring failed. The score on this train-test partition for " f"these parameters will be set to {error_score}. Details: \n" f"{format_exc()}", UserWarning, ) error_msg = ( "scoring must return a number, got %s (%s) instead. (scorer=%s)" ) if isinstance(scores, dict): for name, score in scores.items(): if hasattr(score, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars score = score.item() if not isinstance(score, numbers.Number): raise ValueError(error_msg % (score, type(score), name)) scores[name] = score else: # scalar if hasattr(scores, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars scores = scores.item() if not isinstance(scores, numbers.Number): raise ValueError(error_msg % (scores, type(scores), scorer)) return scores @_deprecate_positional_args def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', method='predict'): """Generate cross-validated estimates for each input data point The data is split according to the cv parameter. Each sample belongs to exactly one test set, and its prediction is computed with an estimator fitted on the corresponding training set. Passing these predictions into an evaluation metric may not be a valid way to measure generalization performance. Results can differ from :func:`cross_validate` and :func:`cross_val_score` unless all tests sets have equal size and the metric decomposes over samples. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be, for example a list, or an array at least 2d. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and predicting are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, defualt=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' method : {'predict', 'predict_proba', 'predict_log_proba', \ 'decision_function'}, default='predict' The method to be invoked by `estimator`. Returns ------- predictions : ndarray This is the result of calling `method`. Shape: - When `method` is 'predict' and in special case where `method` is 'decision_function' and the target is binary: (n_samples,) - When `method` is one of {'predict_proba', 'predict_log_proba', 'decision_function'} (unless special case above): (n_samples, n_classes) - If `estimator` is :term:`multioutput`, an extra dimension 'n_outputs' is added to the end of each shape above. See Also -------- cross_val_score : Calculate score for each CV split. cross_validate : Calculate one or more scores and timings for each CV split. Notes ----- In the case that one or more classes are absent in a training portion, a default score needs to be assigned to all instances for that class if ``method`` produces columns per class, as in {'decision_function', 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is 0. In order to ensure finite output, we approximate negative infinity by the minimum finite float value for the dtype in other cases. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y, cv=3) """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) splits = list(cv.split(X, y, groups)) test_indices = np.concatenate([test for _, test in splits]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') # If classification methods produce multiple columns of output, # we need to manually encode classes to ensure consistent column ordering. encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: y = np.asarray(y) if y.ndim == 1: le = LabelEncoder() y = le.fit_transform(y) elif y.ndim == 2: y_enc = np.zeros_like(y, dtype=int) for i_label in range(y.shape[1]): y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) y = y_enc # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) predictions = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params, method) for train, test in splits) inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) elif encode and isinstance(predictions[0], list): # `predictions` is a list of method outputs from each fold. # If each of those is also a list, then treat this as a # multioutput-multiclass task. We need to separately concatenate # the method outputs for each label into an `n_labels` long list. n_labels = y.shape[1] concat_pred = [] for i_label in range(n_labels): label_preds = np.concatenate([p[i_label] for p in predictions]) concat_pred.append(label_preds) predictions = concat_pred else: predictions = np.concatenate(predictions) if isinstance(predictions, list): return [p[inv_test_indices] for p in predictions] else: return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: if isinstance(predictions, list): predictions = [_enforce_prediction_order( estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method) for i_label in range(len(predictions))] else: # A 2D y array should be a binary label indicator matrix n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] predictions = _enforce_prediction_order( estimator.classes_, predictions, n_classes, method) return predictions def _enforce_prediction_order(classes, predictions, n_classes, method): """Ensure that prediction arrays have correct column order When doing cross-validation, if one or more classes are not present in the subset of data used for training, then the output prediction array might not have the same columns as other folds. Use the list of class names (assumed to be ints) to enforce the correct column order. Note that `classes` is the list of classes in this fold (a subset of the classes in the full training set) and `n_classes` is the number of classes in the full training set. """ if n_classes != len(classes): recommendation = ( 'To fix this, use a cross-validation ' 'technique resulting in properly ' 'stratified folds') warnings.warn('Number of classes in training fold ({}) does ' 'not match total number of classes ({}). ' 'Results may not be appropriate for your use case. ' '{}'.format(len(classes), n_classes, recommendation), RuntimeWarning) if method == 'decision_function': if (predictions.ndim == 2 and predictions.shape[1] != len(classes)): # This handles the case when the shape of predictions # does not match the number of classes used to train # it with. This case is found when sklearn.svm.SVC is # set to `decision_function_shape='ovo'`. raise ValueError('Output shape {} of {} does not match ' 'number of classes ({}) in fold. ' 'Irregular decision_function outputs ' 'are not currently supported by ' 'cross_val_predict'.format( predictions.shape, method, len(classes))) if len(classes) <= 2: # In this special case, `predictions` contains a 1D array. raise ValueError('Only {} class/es in training fold, but {} ' 'in overall dataset. This ' 'is not supported for decision_function ' 'with imbalanced folds. {}'.format( len(classes), n_classes, recommendation)) float_min = np.finfo(predictions.dtype).min default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0} predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method], dtype=predictions.dtype) predictions_for_all_classes[:, classes] = predictions predictions = predictions_for_all_classes return predictions def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True @_deprecate_positional_args def permutation_test_score(estimator, X, y, *, groups=None, cv=None, n_permutations=100, n_jobs=None, random_state=0, verbose=0, scoring=None, fit_params=None): """Evaluate the significance of a cross-validated score with permutations Permutes targets to generate 'randomized data' and compute the empirical p-value against the null hypothesis that features and targets are independent. The p-value represents the fraction of randomized data sets where the estimator performed as well or better than in the original data. A small p-value suggests that there is a real dependency between features and targets which has been used by the estimator to give good predictions. A large p-value may be due to lack of real dependency between features and targets or the estimator was not able to use the dependency to give good predictions. Read more in the :ref:`User Guide <permutation_test_score>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : str or callable, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. If None the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_permutations : int, default=100 Number of times to permute ``y``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the cross-validated score are parallelized over the permutations. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=0 Pass an int for reproducible output for permutation of ``y`` values among samples. See :term:`Glossary <random_state>`. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- score : float The true score without permuting targets. permutation_scores : array of shape (n_permutations,) The scores obtained for each permutations. pvalue : float The p-value, which approximates the probability that the score would be obtained by chance. This is calculated as: `(C + 1) / (n_permutations + 1)` Where C is the number of permutations whose score >= the true score. The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. Notes ----- This function implements Test 1 in: Ojala and Garriga. `Permutation Tests for Studying Classifier Performance <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer, fit_params=fit_params) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): """Auxiliary function for permutation_test_score""" # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) fit_params = _check_fit_params(X, fit_params, train) estimator.fit(X_train, y_train, **fit_params) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return _safe_indexing(y, indices) @_deprecate_positional_args def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). train_sizes : array-like of shape (n_ticks,), \ default=np.linspace(0.1, 1.0, 5) Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : bool, default=False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the different training and test sets. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. shuffle : bool, default=False Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : int, RandomState instance or None, default=None Used when ``shuffle`` is True. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 return_times : bool, default=False Whether to return the fit and score times. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. fit_times : array of shape (n_ticks, n_cv_folds) Times spent for fitting in seconds. Only present if ``return_times`` is True. score_times : array of shape (n_ticks, n_cv_folds) Times spent for scoring in seconds. Only present if ``return_times`` is True. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Store it as list as we will be iterating over the list multiple times cv_iter = list(cv.split(X, y, groups)) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for train, test in cv_iter ) out = np.asarray(out).transpose((2, 1, 0)) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for train, test in train_test_proportions ) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T out = [train_scores, test_scores] if return_times: fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T score_times = results["score_time"].reshape(-1, n_unique_ticks).T out.extend([fit_times, score_times]) ret = train_sizes_abs, out[0], out[1] if return_times: ret = ret + (out[2], out[3]) return ret def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like of shape (n_ticks,) Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.floating): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( dtype=int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose, return_times, error_score, fit_params): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores, fit_times, score_times = [], [], [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) if fit_params is None: fit_params = {} for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) start_fit = time.time() if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes, **fit_params) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes, **fit_params) fit_time = time.time() - start_fit fit_times.append(fit_time) start_score = time.time() test_scores.append( _score(estimator, X_test, y_test, scorer, error_score) ) train_scores.append( _score(estimator, X_train, y_train, scorer, error_score) ) score_time = time.time() - start_score score_times.append(score_time) ret = ((train_scores, test_scores, fit_times, score_times) if return_times else (train_scores, test_scores)) return np.array(ret).T @_deprecate_positional_args def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch="all", verbose=0, error_score=np.nan, fit_params=None): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None Target relative to X for classification or regression; None for unsupervised learning. param_name : str Name of the parameter that will be varied. param_range : array-like of shape (n_values,) The values of the parameter that will be evaluated. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the combinations of each parameter value and each cross-validation split. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) # NOTE do not change order of iteration to allow one time cv splitters for train, test in cv.split(X, y, groups) for v in param_range) n_params = len(param_range) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_params).T test_scores = results["test_scores"].reshape(-1, n_params).T return train_scores, test_scores def _aggregate_score_dicts(scores): """Aggregate the list of dict to dict of np ndarray The aggregated output of _aggregate_score_dicts will be a list of dict of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} Parameters ---------- scores : list of dict List of dicts of the scores for all scorers. This is a flat list, assumed originally to be of row major order. Example ------- >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, ... {'a': 10, 'b': 10}] # doctest: +SKIP >>> _aggregate_score_dicts(scores) # doctest: +SKIP {'a': array([1, 2, 3, 10]), 'b': array([10, 2, 3, 10])} """ return { key: np.asarray([score[key] for score in scores]) if isinstance(scores[0][key], numbers.Number) else [score[key] for score in scores] for key in scores[0] }
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause import warnings import numbers import time from traceback import format_exc from contextlib import suppress import numpy as np import scipy.sparse as sp from joblib import Parallel, logger from ..base import is_classifier, clone from ..utils import indexable, check_random_state, _safe_indexing from ..utils.validation import _check_fit_params from ..utils.validation import _num_samples from ..utils.validation import _deprecate_positional_args from ..utils.fixes import delayed from ..utils.metaestimators import _safe_split from ..metrics import check_scoring from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer from ..exceptions import FitFailedWarning, NotFittedError from ._split import check_cv from ..preprocessing import LabelEncoder __all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] @_deprecate_positional_args def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide <multimetric_cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list/tuple, or dict, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. For evaluating multiple metrics, either give a list of (unique) strings or a dict with names as keys and callables as values. NOTE that when using custom scorers, each scorer should return a single value. Metric functions returning a list/array of values can be wrapped into multiple scorers that return one value each. See :ref:`multimetric_grid_search` for an example. If None, the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. .. versionadded:: 0.19 .. versionchanged:: 0.21 Default value was changed from ``True`` to ``False`` return_estimator : bool, default=False Whether to return the estimators fitted on each split. .. versionadded:: 0.20 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- cross_val_score : Run cross-validation for single metric evaluation. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) if callable(scoring): scorers = scoring elif scoring is None or isinstance(scoring, str): scorers = check_scoring(estimator, scoring) else: scorers = _check_multimetric_scoring(estimator, scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) results = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) # For callabe scoring, the return type is only know after calling. If the # return type is a dictionary, the error scores can now be inserted with # the correct key. if callable(scoring): _insert_error_scores(results, error_score) results = _aggregate_score_dicts(results) ret = {} ret['fit_time'] = results["fit_time"] ret['score_time'] = results["score_time"] if return_estimator: ret['estimator'] = results["estimator"] test_scores_dict = _normalize_score_results(results["test_scores"]) if return_train_score: train_scores_dict = _normalize_score_results(results["train_scores"]) for name in test_scores_dict: ret['test_%s' % name] = test_scores_dict[name] if return_train_score: key = 'train_%s' % name ret[key] = train_scores_dict[name] return ret def _insert_error_scores(results, error_score): """Insert error in `results` by replacing them inplace with `error_score`. This only applies to multimetric scores because `_fit_and_score` will handle the single metric case. """ successful_score = None failed_indices = [] for i, result in enumerate(results): if result["fit_failed"]: failed_indices.append(i) elif successful_score is None: successful_score = result["test_scores"] if successful_score is None: raise NotFittedError("All estimators failed to fit") if isinstance(successful_score, dict): formatted_error = {name: error_score for name in successful_score} for i in failed_indices: results[i]["test_scores"] = formatted_error.copy() if "train_scores" in results[i]: results[i]["train_scores"] = formatted_error.copy() def _normalize_score_results(scores, scaler_score_key='score'): """Creates a scoring dictionary based on the type of `scores`""" if isinstance(scores[0], dict): # multimetric scoring return _aggregate_score_dicts(scores) # scaler return {scaler_score_key: scores} @_deprecate_positional_args def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', error_score=np.nan): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)`` which should return only a single value. Similar to :func:`cross_validate` but only a single metric is permitted. If None, the estimator's default scorer (if available) is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : ndarray of float of shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y, cv=3)) [0.33150734 0.08022311 0.03531764] See Also --------- cross_validate : To run cross-validation on multiple metrics and also to return train scores, fit times and score times. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ # To ensure multimetric format is not supported scorer = check_scoring(estimator, scoring=scoring) cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups, scoring={'score': scorer}, cv=cv, n_jobs=n_jobs, verbose=verbose, fit_params=fit_params, pre_dispatch=pre_dispatch, error_score=error_score) return cv_results['test_score'] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : {list, tuple} of int, default=None A list or tuple of format (<current_split_id>, <total_num_of_splits>). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (<current_candidate_id>, <total_number_of_candidates>). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """ if not isinstance(error_score, numbers.Number) and error_score != 'raise': raise ValueError( "error_score must be the string 'raise' or a numeric value. " "(Hint: if using 'raise', please make sure that it has been " "spelled correctly.)" ) progress_msg = "" if verbose > 2: if split_progress is not None: progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" if candidate_progress and verbose > 9: progress_msg += (f"; {candidate_progress[0]+1}/" f"{candidate_progress[1]}") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f"[CV{progress_msg}] START {params_msg}" print(f"{start_msg}{(80 - len(start_msg)) * '.'}") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn("Estimator fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%s" % (error_score, format_exc()), FitFailedWarning) result["fit_failed"] = True else: result["fit_failed"] = False fit_time = time.time() - start_time test_scores = _score(estimator, X_test, y_test, scorer, error_score) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score( estimator, X_train, y_train, scorer, error_score ) if verbose > 1: total_time = score_time + fit_time end_msg = f"[CV{progress_msg}] END " result_msg = params_msg + (";" if params_msg else "") if verbose > 2 and isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f" {scorer_name}: (" if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f"train={scorer_scores:.3f}, " result_msg += f"test={test_scores[scorer_name]:.3f})" result_msg += f" total time={logger.short_format_time(total_time)}" # Right align the result_msg end_msg += "." * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result["test_scores"] = test_scores if return_train_score: result["train_scores"] = train_scores if return_n_test_samples: result["n_test_samples"] = _num_samples(X_test) if return_times: result["fit_time"] = fit_time result["score_time"] = score_time if return_parameters: result["parameters"] = parameters if return_estimator: result["estimator"] = estimator return result def _score(estimator, X_test, y_test, scorer, error_score="raise"): """Compute the score(s) of an estimator on a given test set. Will return a dict of floats if `scorer` is a dict, otherwise a single float is returned. """ if isinstance(scorer, dict): # will cache method calls if needed. scorer() returns a dict scorer = _MultimetricScorer(**scorer) try: if y_test is None: scores = scorer(estimator, X_test) else: scores = scorer(estimator, X_test, y_test) except Exception: if error_score == 'raise': raise else: if isinstance(scorer, _MultimetricScorer): scores = {name: error_score for name in scorer._scorers} else: scores = error_score warnings.warn( f"Scoring failed. The score on this train-test partition for " f"these parameters will be set to {error_score}. Details: \n" f"{format_exc()}", UserWarning, ) error_msg = ( "scoring must return a number, got %s (%s) instead. (scorer=%s)" ) if isinstance(scores, dict): for name, score in scores.items(): if hasattr(score, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars score = score.item() if not isinstance(score, numbers.Number): raise ValueError(error_msg % (score, type(score), name)) scores[name] = score else: # scalar if hasattr(scores, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars scores = scores.item() if not isinstance(scores, numbers.Number): raise ValueError(error_msg % (scores, type(scores), scorer)) return scores @_deprecate_positional_args def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', method='predict'): """Generate cross-validated estimates for each input data point The data is split according to the cv parameter. Each sample belongs to exactly one test set, and its prediction is computed with an estimator fitted on the corresponding training set. Passing these predictions into an evaluation metric may not be a valid way to measure generalization performance. Results can differ from :func:`cross_validate` and :func:`cross_val_score` unless all tests sets have equal size and the metric decomposes over samples. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be, for example a list, or an array at least 2d. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and predicting are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, defualt=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' method : {'predict', 'predict_proba', 'predict_log_proba', \ 'decision_function'}, default='predict' The method to be invoked by `estimator`. Returns ------- predictions : ndarray This is the result of calling `method`. Shape: - When `method` is 'predict' and in special case where `method` is 'decision_function' and the target is binary: (n_samples,) - When `method` is one of {'predict_proba', 'predict_log_proba', 'decision_function'} (unless special case above): (n_samples, n_classes) - If `estimator` is :term:`multioutput`, an extra dimension 'n_outputs' is added to the end of each shape above. See Also -------- cross_val_score : Calculate score for each CV split. cross_validate : Calculate one or more scores and timings for each CV split. Notes ----- In the case that one or more classes are absent in a training portion, a default score needs to be assigned to all instances for that class if ``method`` produces columns per class, as in {'decision_function', 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is 0. In order to ensure finite output, we approximate negative infinity by the minimum finite float value for the dtype in other cases. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y, cv=3) """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) splits = list(cv.split(X, y, groups)) test_indices = np.concatenate([test for _, test in splits]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') # If classification methods produce multiple columns of output, # we need to manually encode classes to ensure consistent column ordering. encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: y = np.asarray(y) if y.ndim == 1: le = LabelEncoder() y = le.fit_transform(y) elif y.ndim == 2: y_enc = np.zeros_like(y, dtype=int) for i_label in range(y.shape[1]): y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) y = y_enc # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) predictions = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params, method) for train, test in splits) inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) elif encode and isinstance(predictions[0], list): # `predictions` is a list of method outputs from each fold. # If each of those is also a list, then treat this as a # multioutput-multiclass task. We need to separately concatenate # the method outputs for each label into an `n_labels` long list. n_labels = y.shape[1] concat_pred = [] for i_label in range(n_labels): label_preds = np.concatenate([p[i_label] for p in predictions]) concat_pred.append(label_preds) predictions = concat_pred else: predictions = np.concatenate(predictions) if isinstance(predictions, list): return [p[inv_test_indices] for p in predictions] else: return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: if isinstance(predictions, list): predictions = [_enforce_prediction_order( estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method) for i_label in range(len(predictions))] else: # A 2D y array should be a binary label indicator matrix n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] predictions = _enforce_prediction_order( estimator.classes_, predictions, n_classes, method) return predictions def _enforce_prediction_order(classes, predictions, n_classes, method): """Ensure that prediction arrays have correct column order When doing cross-validation, if one or more classes are not present in the subset of data used for training, then the output prediction array might not have the same columns as other folds. Use the list of class names (assumed to be ints) to enforce the correct column order. Note that `classes` is the list of classes in this fold (a subset of the classes in the full training set) and `n_classes` is the number of classes in the full training set. """ if n_classes != len(classes): recommendation = ( 'To fix this, use a cross-validation ' 'technique resulting in properly ' 'stratified folds') warnings.warn('Number of classes in training fold ({}) does ' 'not match total number of classes ({}). ' 'Results may not be appropriate for your use case. ' '{}'.format(len(classes), n_classes, recommendation), RuntimeWarning) if method == 'decision_function': if (predictions.ndim == 2 and predictions.shape[1] != len(classes)): # This handles the case when the shape of predictions # does not match the number of classes used to train # it with. This case is found when sklearn.svm.SVC is # set to `decision_function_shape='ovo'`. raise ValueError('Output shape {} of {} does not match ' 'number of classes ({}) in fold. ' 'Irregular decision_function outputs ' 'are not currently supported by ' 'cross_val_predict'.format( predictions.shape, method, len(classes))) if len(classes) <= 2: # In this special case, `predictions` contains a 1D array. raise ValueError('Only {} class/es in training fold, but {} ' 'in overall dataset. This ' 'is not supported for decision_function ' 'with imbalanced folds. {}'.format( len(classes), n_classes, recommendation)) float_min = np.finfo(predictions.dtype).min default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0} predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method], dtype=predictions.dtype) predictions_for_all_classes[:, classes] = predictions predictions = predictions_for_all_classes return predictions def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True @_deprecate_positional_args def permutation_test_score(estimator, X, y, *, groups=None, cv=None, n_permutations=100, n_jobs=None, random_state=0, verbose=0, scoring=None, fit_params=None): """Evaluate the significance of a cross-validated score with permutations Permutes targets to generate 'randomized data' and compute the empirical p-value against the null hypothesis that features and targets are independent. The p-value represents the fraction of randomized data sets where the estimator performed as well or better than in the original data. A small p-value suggests that there is a real dependency between features and targets which has been used by the estimator to give good predictions. A large p-value may be due to lack of real dependency between features and targets or the estimator was not able to use the dependency to give good predictions. Read more in the :ref:`User Guide <permutation_test_score>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : str or callable, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. If None the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_permutations : int, default=100 Number of times to permute ``y``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the cross-validated score are parallelized over the permutations. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=0 Pass an int for reproducible output for permutation of ``y`` values among samples. See :term:`Glossary <random_state>`. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- score : float The true score without permuting targets. permutation_scores : array of shape (n_permutations,) The scores obtained for each permutations. pvalue : float The p-value, which approximates the probability that the score would be obtained by chance. This is calculated as: `(C + 1) / (n_permutations + 1)` Where C is the number of permutations whose score >= the true score. The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. Notes ----- This function implements Test 1 in: Ojala and Garriga. `Permutation Tests for Studying Classifier Performance <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer, fit_params=fit_params) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): """Auxiliary function for permutation_test_score""" # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) fit_params = _check_fit_params(X, fit_params, train) estimator.fit(X_train, y_train, **fit_params) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return _safe_indexing(y, indices) @_deprecate_positional_args def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). train_sizes : array-like of shape (n_ticks,), \ default=np.linspace(0.1, 1.0, 5) Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : bool, default=False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the different training and test sets. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. shuffle : bool, default=False Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : int, RandomState instance or None, default=None Used when ``shuffle`` is True. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 return_times : bool, default=False Whether to return the fit and score times. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. fit_times : array of shape (n_ticks, n_cv_folds) Times spent for fitting in seconds. Only present if ``return_times`` is True. score_times : array of shape (n_ticks, n_cv_folds) Times spent for scoring in seconds. Only present if ``return_times`` is True. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Store it as list as we will be iterating over the list multiple times cv_iter = list(cv.split(X, y, groups)) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for train, test in cv_iter ) out = np.asarray(out).transpose((2, 1, 0)) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for train, test in train_test_proportions ) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T out = [train_scores, test_scores] if return_times: fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T score_times = results["score_time"].reshape(-1, n_unique_ticks).T out.extend([fit_times, score_times]) ret = train_sizes_abs, out[0], out[1] if return_times: ret = ret + (out[2], out[3]) return ret def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like of shape (n_ticks,) Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.floating): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( dtype=int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose, return_times, error_score, fit_params): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores, fit_times, score_times = [], [], [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) if fit_params is None: fit_params = {} for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) start_fit = time.time() if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes, **fit_params) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes, **fit_params) fit_time = time.time() - start_fit fit_times.append(fit_time) start_score = time.time() test_scores.append( _score(estimator, X_test, y_test, scorer, error_score) ) train_scores.append( _score(estimator, X_train, y_train, scorer, error_score) ) score_time = time.time() - start_score score_times.append(score_time) ret = ((train_scores, test_scores, fit_times, score_times) if return_times else (train_scores, test_scores)) return np.array(ret).T @_deprecate_positional_args def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch="all", verbose=0, error_score=np.nan, fit_params=None): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None Target relative to X for classification or regression; None for unsupervised learning. param_name : str Name of the parameter that will be varied. param_range : array-like of shape (n_values,) The values of the parameter that will be evaluated. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the combinations of each parameter value and each cross-validation split. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) # NOTE do not change order of iteration to allow one time cv splitters for train, test in cv.split(X, y, groups) for v in param_range) n_params = len(param_range) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_params).T test_scores = results["test_scores"].reshape(-1, n_params).T return train_scores, test_scores def _aggregate_score_dicts(scores): """Aggregate the list of dict to dict of np ndarray The aggregated output of _aggregate_score_dicts will be a list of dict of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} Parameters ---------- scores : list of dict List of dicts of the scores for all scorers. This is a flat list, assumed originally to be of row major order. Example ------- >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, ... {'a': 10, 'b': 10}] # doctest: +SKIP >>> _aggregate_score_dicts(scores) # doctest: +SKIP {'a': array([1, 2, 3, 10]), 'b': array([10, 2, 3, 10])} """ return { key: np.asarray([score[key] for score in scores]) if isinstance(scores[0][key], numbers.Number) else [score[key] for score in scores] for key in scores[0] }
#!/usr/bin/env python3 import os import re import logging import botocore.credentials import botocore.session import boto3 logger = logging.getLogger("ssm-session") class InstanceResolver(): def __init__(self, args): # aws-cli compatible MFA cache cli_cache = os.path.join(os.path.expanduser('~'),'.aws/cli/cache') # Construct boto3 session with MFA cache session = boto3.session.Session(profile_name=args.profile, region_name=args.region) session._session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache(cli_cache) # Create boto3 clients from session self.ssm_client = session.client('ssm') self.ec2_client = session.client('ec2') def get_list(self): def _try_append(_list, _dict, _key): if _key in _dict: _list.append(_dict[_key]) items = {} # List instances from SSM logger.debug("Fetching SSM inventory") paginator = self.ssm_client.get_paginator('get_inventory') response_iterator = paginator.paginate() for inventory in response_iterator: for entity in inventory["Entities"]: try: content = entity['Data']['AWS:InstanceInformation']["Content"][0] # At the moment we only support EC2 Instances and ManagedInstances if content["ResourceType"] not in [ "EC2Instance", "ManagedInstance" ]: logger.warning("Unknown instance type: %s: %s", entity['Id'], content['ResourceType']) logger.debug(entity) continue # Ignore Terminated instances if content.get("InstanceStatus") == "Terminated": logger.debug("Ignoring terminated instance: %s", entity) continue # Add to the list instance_id = content['InstanceId'] items[instance_id] = { "InstanceId": instance_id, "InstanceName": "", "HostName": content.get("ComputerName", ""), "Addresses": [ content.get("IpAddress") ], } logger.debug("Added instance: %s: %r", instance_id, items[instance_id]) except (KeyError, ValueError): logger.warning("SSM inventory entity not recognised: %s", entity) continue # Add attributes from EC2 paginator = self.ec2_client.get_paginator('describe_instances') ec2_instance_ids = list(filter(lambda x: x.startswith("i-"), items)) tries = 5 while tries: # The SSM inventory sometimes returns instances that have been terminated # a short while ago which makes the following call fail # with InvalidInstanceID.NotFound exception. We'll try and remove the invalid # instance ids a {tries} times or until we succeed. If unsuccessful we'll remove # the list obtained from SSM without extra details (host name, public IPs, etc). # This mostly / only affects accounts with high churn of starting / stopping # instances - most users will pass this loop only once. try: response_iterator = paginator.paginate(InstanceIds=ec2_instance_ids) for reservations in response_iterator: for reservation in reservations['Reservations']: for instance in reservation['Instances']: instance_id = instance['InstanceId'] if not instance_id in items: continue # Find instance IPs items[instance_id]['Addresses'] = [] _try_append(items[instance_id]['Addresses'], instance, 'PrivateIpAddress') _try_append(items[instance_id]['Addresses'], instance, 'PublicIpAddress') # Find instance name from tag Name for tag in instance['Tags']: if tag['Key'] == 'Name' and tag['Value']: items[instance_id]['InstanceName'] = tag['Value'] logger.debug("Updated instance: %s: %r", instance_id, items[instance_id]) return items except botocore.exceptions.ClientError as ex: if ex.response.get('Error', {}).get('Code', '') != 'InvalidInstanceID.NotFound': raise message = ex.response.get('Error', {}).get('Message', '') if not message.startswith("The instance ID") or not message.endswith("not exist"): logger.warning("Unexpected InvalidInstanceID.NotFound message:", message) # Try to extract instace ids ... remove_instance_ids = re.findall('i-[0-9a-f]+', message) logger.debug("Removing non-existent InstanceIds: %s", remove_instance_ids) # Remove the failed ids from the list and try again ec2_instance_ids = list(set(ec2_instance_ids) - set(remove_instance_ids)) tries -= 1 if not tries: logger.warning("Unable to list instance details. Some instance names and IPs may be missing.") return items def print_list(self): hostname_len = 1 # Minimum of 1 char, otherwise f-string below fails for empty hostnames instname_len = 1 items = self.get_list().values() if not items: logger.warning("No instances registered in SSM!") return items = list(items) items.sort(key=lambda x: x.get('InstanceName') or x.get('HostName')) for item in items: hostname_len = max(hostname_len, len(item['HostName'])) instname_len = max(instname_len, len(item['InstanceName'])) for item in items: print(f"{item["InstanceId"]:20} {item["HostName"]:{hostname_len}} {item["InstanceName"]:{instname_len}} {" ".join(item["Addresses"])}") def resolve_instance(self, instance): # Is it a valid Instance ID? if re.match('^i-[a-f0-9]+$', instance): return instance # It is not - find it in the list instances = [] items = self.get_list() for instance_id in items: item = items[instance_id] if instance.lower() in [item['HostName'].lower(), item['InstanceName'].lower()] + item['Addresses']: instances.append(instance_id) if not instances: return None if len(instances) > 1: logger.warning("Found %d instances for '%s': %s", len(instances), instance, " ".join(instances)) logger.warning("Use INSTANCE_ID to connect to a specific one") quit(1) # Found only one instance - return it return instances[0]
#!/usr/bin/env python3 import os import re import logging import botocore.credentials import botocore.session import boto3 logger = logging.getLogger("ssm-session") class InstanceResolver(): def __init__(self, args): # aws-cli compatible MFA cache cli_cache = os.path.join(os.path.expanduser('~'),'.aws/cli/cache') # Construct boto3 session with MFA cache session = boto3.session.Session(profile_name=args.profile, region_name=args.region) session._session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache(cli_cache) # Create boto3 clients from session self.ssm_client = session.client('ssm') self.ec2_client = session.client('ec2') def get_list(self): def _try_append(_list, _dict, _key): if _key in _dict: _list.append(_dict[_key]) items = {} # List instances from SSM logger.debug("Fetching SSM inventory") paginator = self.ssm_client.get_paginator('get_inventory') response_iterator = paginator.paginate() for inventory in response_iterator: for entity in inventory["Entities"]: try: content = entity['Data']['AWS:InstanceInformation']["Content"][0] # At the moment we only support EC2 Instances and ManagedInstances if content["ResourceType"] not in [ "EC2Instance", "ManagedInstance" ]: logger.warning("Unknown instance type: %s: %s", entity['Id'], content['ResourceType']) logger.debug(entity) continue # Ignore Terminated instances if content.get("InstanceStatus") == "Terminated": logger.debug("Ignoring terminated instance: %s", entity) continue # Add to the list instance_id = content['InstanceId'] items[instance_id] = { "InstanceId": instance_id, "InstanceName": "", "HostName": content.get("ComputerName", ""), "Addresses": [ content.get("IpAddress") ], } logger.debug("Added instance: %s: %r", instance_id, items[instance_id]) except (KeyError, ValueError): logger.warning("SSM inventory entity not recognised: %s", entity) continue # Add attributes from EC2 paginator = self.ec2_client.get_paginator('describe_instances') ec2_instance_ids = list(filter(lambda x: x.startswith("i-"), items)) tries = 5 while tries: # The SSM inventory sometimes returns instances that have been terminated # a short while ago which makes the following call fail # with InvalidInstanceID.NotFound exception. We'll try and remove the invalid # instance ids a {tries} times or until we succeed. If unsuccessful we'll remove # the list obtained from SSM without extra details (host name, public IPs, etc). # This mostly / only affects accounts with high churn of starting / stopping # instances - most users will pass this loop only once. try: response_iterator = paginator.paginate(InstanceIds=ec2_instance_ids) for reservations in response_iterator: for reservation in reservations['Reservations']: for instance in reservation['Instances']: instance_id = instance['InstanceId'] if not instance_id in items: continue # Find instance IPs items[instance_id]['Addresses'] = [] _try_append(items[instance_id]['Addresses'], instance, 'PrivateIpAddress') _try_append(items[instance_id]['Addresses'], instance, 'PublicIpAddress') # Find instance name from tag Name for tag in instance['Tags']: if tag['Key'] == 'Name' and tag['Value']: items[instance_id]['InstanceName'] = tag['Value'] logger.debug("Updated instance: %s: %r", instance_id, items[instance_id]) return items except botocore.exceptions.ClientError as ex: if ex.response.get('Error', {}).get('Code', '') != 'InvalidInstanceID.NotFound': raise message = ex.response.get('Error', {}).get('Message', '') if not message.startswith("The instance ID") or not message.endswith("not exist"): logger.warning("Unexpected InvalidInstanceID.NotFound message:", message) # Try to extract instace ids ... remove_instance_ids = re.findall('i-[0-9a-f]+', message) logger.debug("Removing non-existent InstanceIds: %s", remove_instance_ids) # Remove the failed ids from the list and try again ec2_instance_ids = list(set(ec2_instance_ids) - set(remove_instance_ids)) tries -= 1 if not tries: logger.warning("Unable to list instance details. Some instance names and IPs may be missing.") return items def print_list(self): hostname_len = 1 # Minimum of 1 char, otherwise f-string below fails for empty hostnames instname_len = 1 items = self.get_list().values() if not items: logger.warning("No instances registered in SSM!") return items = list(items) items.sort(key=lambda x: x.get('InstanceName') or x.get('HostName')) for item in items: hostname_len = max(hostname_len, len(item['HostName'])) instname_len = max(instname_len, len(item['InstanceName'])) for item in items: print(f"{item['InstanceId']:20} {item['HostName']:{hostname_len}} {item['InstanceName']:{instname_len}} {' '.join(item['Addresses'])}") def resolve_instance(self, instance): # Is it a valid Instance ID? if re.match('^i-[a-f0-9]+$', instance): return instance # It is not - find it in the list instances = [] items = self.get_list() for instance_id in items: item = items[instance_id] if instance.lower() in [item['HostName'].lower(), item['InstanceName'].lower()] + item['Addresses']: instances.append(instance_id) if not instances: return None if len(instances) > 1: logger.warning("Found %d instances for '%s': %s", len(instances), instance, " ".join(instances)) logger.warning("Use INSTANCE_ID to connect to a specific one") quit(1) # Found only one instance - return it return instances[0]
import discord from discord.ext import commands import sys import traceback class ErrorHander(commands.Cog): def __init__(self, bot): self.bot = bot async def __build_error_embed(self, title, description=discord.Embed.Empty, color=discord.Color.red()): e = discord.Embed(title=title, description=description, color=color) return e @commands.Cog.listener() async def on_command_error(self, ctx, error): if hasattr(ctx.command, "on_error"): return ignored = (commands.CommandNotFound, commands.UserInputError, discord.errors.Forbidden) error = getattr(error, "original", error) if isinstance(error, ignored): return elif isinstance(error, commands.NoPrivateMessage): try: return await ctx.author.send(f":x: | **{ctx.command} can not be used in Private Messages**") except: pass elif isinstance(error, commands.CommandOnCooldown): e = discord.Embed(title=f":clock1: | Oh? No, you can use the command again after {round(error.retry_after)}s", color=discord.Color.orange()) return await ctx.send(embed=e) elif isinstance(error, commands.BotMissingPermissions): missing = [perms.replace("_", ' ').replace("guild", "server").title() for perms in error.missing_perms] if len(missing) > 2: fmt = f"{", ".join(missing[:-1])}, and {missing[-1]}" else: fmt = f" and ".join(missing) e = await self.__build_error_embed(discord.Embed.Empty, f":x: | I am missing some permissions to run this command: ```{fmt}```") return await ctx.send(embed=e) elif isinstance(error, commands.NSFWChannelRequired): e = await self.__build_error_embed(":x: | This command can be used only in NSFW channels") return await ctx.send(embed=e) elif isinstance(error, commands.DisabledCommand): e = await self.__build_error_embed(":x: | This command is dissabled") return await ctx.send(embed=e) elif isinstance(error, commands.UserInputError): pass elif isinstance(error, commands.MissingPermissions): missing = [perms.replace("_", ' ').replace("guild", "server").title() for perms in error.missing_perms] if len(missing) > 2: fmt = f"{", ".join(missing[:-1])}, and {missing[-1]}" else: fmt = f" and ".join(missing) e = await self.__build_error_embed(discord.Embed.Empty, f":x: | You need permissions to run this command: ```{fmt}```") return await ctx.send(embed=e) elif isinstance(error, commands.CheckFailure): e = await self.__build_error_embed(":x: | You don't have permissions to use this command") await ctx.send(embed=e) #ignore all other exception types, but print them in stderr print(f"Ignoring exception in command {ctx.command}:", file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) def setup(bot): bot.add_cog(ErrorHander(bot))
import discord from discord.ext import commands import sys import traceback class ErrorHander(commands.Cog): def __init__(self, bot): self.bot = bot async def __build_error_embed(self, title, description=discord.Embed.Empty, color=discord.Color.red()): e = discord.Embed(title=title, description=description, color=color) return e @commands.Cog.listener() async def on_command_error(self, ctx, error): if hasattr(ctx.command, "on_error"): return ignored = (commands.CommandNotFound, commands.UserInputError, discord.errors.Forbidden) error = getattr(error, "original", error) if isinstance(error, ignored): return elif isinstance(error, commands.NoPrivateMessage): try: return await ctx.author.send(f":x: | **{ctx.command} can not be used in Private Messages**") except: pass elif isinstance(error, commands.CommandOnCooldown): e = discord.Embed(title=f":clock1: | Oh? No, you can use the command again after {round(error.retry_after)}s", color=discord.Color.orange()) return await ctx.send(embed=e) elif isinstance(error, commands.BotMissingPermissions): missing = [perms.replace("_", ' ').replace("guild", "server").title() for perms in error.missing_perms] if len(missing) > 2: fmt = f"{', '.join(missing[:-1])}, and {missing[-1]}" else: fmt = f" and ".join(missing) e = await self.__build_error_embed(discord.Embed.Empty, f":x: | I am missing some permissions to run this command: ```{fmt}```") return await ctx.send(embed=e) elif isinstance(error, commands.NSFWChannelRequired): e = await self.__build_error_embed(":x: | This command can be used only in NSFW channels") return await ctx.send(embed=e) elif isinstance(error, commands.DisabledCommand): e = await self.__build_error_embed(":x: | This command is dissabled") return await ctx.send(embed=e) elif isinstance(error, commands.UserInputError): pass elif isinstance(error, commands.MissingPermissions): missing = [perms.replace("_", ' ').replace("guild", "server").title() for perms in error.missing_perms] if len(missing) > 2: fmt = f"{', '.join(missing[:-1])}, and {missing[-1]}" else: fmt = f" and ".join(missing) e = await self.__build_error_embed(discord.Embed.Empty, f":x: | You need permissions to run this command: ```{fmt}```") return await ctx.send(embed=e) elif isinstance(error, commands.CheckFailure): e = await self.__build_error_embed(":x: | You don't have permissions to use this command") await ctx.send(embed=e) #ignore all other exception types, but print them in stderr print(f"Ignoring exception in command {ctx.command}:", file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) def setup(bot): bot.add_cog(ErrorHander(bot))
import torch,cv2 from torch.utils.data import Dataset import json from tqdm import tqdm import os from PIL import Image from torchvision import transforms as T from .ray_utils import * class YourOwnDataset(Dataset): def __init__(self, datadir, split='train', downsample=1.0, is_stack=False, N_vis=-1): self.N_vis = N_vis self.root_dir = datadir self.split = split self.is_stack = is_stack self.downsample = downsample self.define_transforms() self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.read_meta() self.define_proj_mat() self.white_bg = True self.near_far = [0.1,100.0] self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3) self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3) self.downsample=downsample def read_depth(self, filename): depth = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) return depth def read_meta(self): with open(os.path.join(self.root_dir, f"transforms_{self.split}.json"), 'r') as f: self.meta = json.load(f) w, h = int(self.meta['w']/self.downsample), int(self.meta['h']/self.downsample) self.img_wh = [w,h] self.focal_x = 0.5 * w / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length self.focal_y = 0.5 * h / np.tan(0.5 * self.meta['camera_angle_y']) # original focal length self.cx, self.cy = self.meta['cx'],self.meta['cy'] # ray directions for all pixels, same for all images (same H, W, focal) self.directions = get_ray_directions(h, w, [self.focal_x,self.focal_y], center=[self.cx, self.cy]) # (h, w, 3) self.directions = self.directions / torch.norm(self.directions, dim=-1, keepdim=True) self.intrinsics = torch.tensor([[self.focal_x,0,self.cx],[0,self.focal_y,self.cy],[0,0,1]]).float() self.image_paths = [] self.poses = [] self.all_rays = [] self.all_rgbs = [] self.all_masks = [] self.all_depth = [] img_eval_interval = 1 if self.N_vis < 0 else len(self.meta['frames']) // self.N_vis idxs = list(range(0, len(self.meta['frames']), img_eval_interval)) for i in tqdm(idxs, desc=f'Loading data {self.split} ({len(idxs)})'):#img_list:# frame = self.meta['frames'][i] pose = np.array(frame['transform_matrix']) @ self.blender2opencv c2w = torch.FloatTensor(pose) self.poses += [c2w] image_path = os.path.join(self.root_dir, f"{frame["file_path"]}.png") self.image_paths += [image_path] img = Image.open(image_path) if self.downsample!=1.0: img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) img = img.view(-1, w*h).permute(1, 0) # (h*w, 4) RGBA if img.shape[-1]==4: img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB self.all_rgbs += [img] rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6) self.poses = torch.stack(self.poses) if not self.is_stack: self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3) self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3) # self.all_depth = torch.cat(self.all_depth, 0) # (len(self.meta['frames])*h*w, 3) else: self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames]),h*w, 3) self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3) # self.all_masks = torch.stack(self.all_masks, 0).reshape(-1,*self.img_wh[::-1]) # (len(self.meta['frames]),h,w,3) def define_transforms(self): self.transform = T.ToTensor() def define_proj_mat(self): self.proj_mat = self.intrinsics.unsqueeze(0) @ torch.inverse(self.poses)[:,:3] def world2ndc(self,points,lindisp=None): device = points.device return (points - self.center.to(device)) / self.radius.to(device) def __len__(self): return len(self.all_rgbs) def __getitem__(self, idx): if self.split == 'train': # use data in the buffers sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} else: # create data for each image separately img = self.all_rgbs[idx] rays = self.all_rays[idx] mask = self.all_masks[idx] # for quantity evaluation sample = {'rays': rays, 'rgbs': img} return sample
import torch,cv2 from torch.utils.data import Dataset import json from tqdm import tqdm import os from PIL import Image from torchvision import transforms as T from .ray_utils import * class YourOwnDataset(Dataset): def __init__(self, datadir, split='train', downsample=1.0, is_stack=False, N_vis=-1): self.N_vis = N_vis self.root_dir = datadir self.split = split self.is_stack = is_stack self.downsample = downsample self.define_transforms() self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.read_meta() self.define_proj_mat() self.white_bg = True self.near_far = [0.1,100.0] self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3) self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3) self.downsample=downsample def read_depth(self, filename): depth = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) return depth def read_meta(self): with open(os.path.join(self.root_dir, f"transforms_{self.split}.json"), 'r') as f: self.meta = json.load(f) w, h = int(self.meta['w']/self.downsample), int(self.meta['h']/self.downsample) self.img_wh = [w,h] self.focal_x = 0.5 * w / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length self.focal_y = 0.5 * h / np.tan(0.5 * self.meta['camera_angle_y']) # original focal length self.cx, self.cy = self.meta['cx'],self.meta['cy'] # ray directions for all pixels, same for all images (same H, W, focal) self.directions = get_ray_directions(h, w, [self.focal_x,self.focal_y], center=[self.cx, self.cy]) # (h, w, 3) self.directions = self.directions / torch.norm(self.directions, dim=-1, keepdim=True) self.intrinsics = torch.tensor([[self.focal_x,0,self.cx],[0,self.focal_y,self.cy],[0,0,1]]).float() self.image_paths = [] self.poses = [] self.all_rays = [] self.all_rgbs = [] self.all_masks = [] self.all_depth = [] img_eval_interval = 1 if self.N_vis < 0 else len(self.meta['frames']) // self.N_vis idxs = list(range(0, len(self.meta['frames']), img_eval_interval)) for i in tqdm(idxs, desc=f'Loading data {self.split} ({len(idxs)})'):#img_list:# frame = self.meta['frames'][i] pose = np.array(frame['transform_matrix']) @ self.blender2opencv c2w = torch.FloatTensor(pose) self.poses += [c2w] image_path = os.path.join(self.root_dir, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) if self.downsample!=1.0: img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) img = img.view(-1, w*h).permute(1, 0) # (h*w, 4) RGBA if img.shape[-1]==4: img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB self.all_rgbs += [img] rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6) self.poses = torch.stack(self.poses) if not self.is_stack: self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3) self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3) # self.all_depth = torch.cat(self.all_depth, 0) # (len(self.meta['frames])*h*w, 3) else: self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames]),h*w, 3) self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3) # self.all_masks = torch.stack(self.all_masks, 0).reshape(-1,*self.img_wh[::-1]) # (len(self.meta['frames]),h,w,3) def define_transforms(self): self.transform = T.ToTensor() def define_proj_mat(self): self.proj_mat = self.intrinsics.unsqueeze(0) @ torch.inverse(self.poses)[:,:3] def world2ndc(self,points,lindisp=None): device = points.device return (points - self.center.to(device)) / self.radius.to(device) def __len__(self): return len(self.all_rgbs) def __getitem__(self, idx): if self.split == 'train': # use data in the buffers sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} else: # create data for each image separately img = self.all_rgbs[idx] rays = self.all_rays[idx] mask = self.all_masks[idx] # for quantity evaluation sample = {'rays': rays, 'rgbs': img} return sample
""" Credits: This file was adopted from: https://github.com/pydata/xarray # noqa Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa """ import os import re import warnings from collections import OrderedDict from distutils.version import LooseVersion import numpy as np import rasterio from rasterio.vrt import WarpedVRT from xarray import DataArray, Dataset from xarray.backends.common import BackendArray from xarray.backends.file_manager import CachingFileManager from xarray.backends.locks import SerializableLock from xarray.core import indexing from xarray.core.utils import is_scalar from rioxarray.exceptions import RioXarrayError from rioxarray.rioxarray import affine_to_coords # TODO: should this be GDAL_LOCK instead? RASTERIO_LOCK = SerializableLock() class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" def __init__(self, manager, lock, vrt_params=None, masked=False): from rasterio.vrt import WarpedVRT self.manager = manager self.lock = lock self.masked = masked # cannot save riods as an attribute: this would break pickleability riods = manager.acquire() if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) self.vrt_params = vrt_params self._shape = (riods.count, riods.height, riods.width) dtypes = riods.dtypes if not np.all(np.asarray(dtypes) == dtypes[0]): raise ValueError("All bands should have the same dtype") self._dtype = np.dtype("float64") if self.masked else np.dtype(dtypes[0]) @property def dtype(self): return self._dtype @property def shape(self): return self._shape def _get_indexer(self, key): """ Get indexer for rasterio array. Parameter --------- key: tuple of int Returns ------- band_key: an indexer for the 1st dimension window: two tuples. Each consists of (start, stop). squeeze_axis: axes to be squeezed np_ind: indexer for loaded numpy array See also -------- indexing.decompose_indexer """ if len(key) != 3: raise RioXarrayError("rasterio datasets should always be 3D") # bands cannot be windowed but they can be listed band_key = key[0] np_inds = [] # bands (axis=0) cannot be windowed but they can be listed if isinstance(band_key, slice): start, stop, step = band_key.indices(self.shape[0]) band_key = np.arange(start, stop, step) # be sure we give out a list band_key = (np.asarray(band_key) + 1).tolist() if isinstance(band_key, list): # if band_key is not a scalar np_inds.append(slice(None)) # but other dims can only be windowed window = [] squeeze_axis = [] for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])): if isinstance(k, slice): # step is always positive. see indexing.decompose_indexer start, stop, step = k.indices(n) np_inds.append(slice(None, None, step)) elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later squeeze_axis.append(-(2 - i)) start = k stop = k + 1 else: start, stop = np.min(k), np.max(k) + 1 np_inds.append(k - start) window.append((start, stop)) if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray): # do outer-style indexing np_inds[-2:] = np.ix_(*np_inds[-2:]) return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds) def _getitem(self, key): from rasterio.vrt import WarpedVRT band_key, window, squeeze_axis, np_inds = self._get_indexer(key) if not band_key or any(start == stop for (start, stop) in window): # no need to do IO shape = (len(band_key),) + tuple(stop - start for (start, stop) in window) out = np.zeros(shape, dtype=self.dtype) else: with self.lock: riods = self.manager.acquire(needs_lock=False) if self.vrt_params is not None: riods = WarpedVRT(riods, **self.vrt_params) out = riods.read(band_key, window=window, masked=self.masked) if self.masked: out = np.ma.filled(out.astype(self.dtype), np.nan) if squeeze_axis: out = np.squeeze(out, axis=squeeze_axis) return out[np_inds] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem ) def _parse_envi(meta): """Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns='ENVI') call. Returns ------- parsed_meta : dict Dictionary containing the original keys and the parsed values """ def parsevec(s): return np.fromstring(s.strip("{}"), dtype="float", sep=",") def default(s): return s.strip("{}") parse = {"wavelength": parsevec, "fwhm": parsevec} parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()} return parsed_meta def _parse_tags(tags): def parsevec(s): return np.fromstring(s.strip("{}"), dtype="float", sep=",") parsed_tags = {} for key, value in tags.items(): if value.startswith("{") and value.endswith("}"): new_val = parsevec(value) value = new_val if len(new_val) else value else: try: value = int(value) except (TypeError, ValueError): try: value = float(value) except (TypeError, ValueError): pass parsed_tags[key] = value return parsed_tags def build_subdataset_filter(group_names, variable_names): """ Example:: 'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf": MODIS_Grid_2D:sur_refl_b01_1' Parameters ---------- group_names: str or list or tuple Name or names of netCDF groups to filter by. variable_names: str or list or tuple Name or names of netCDF variables to filter by. Returns ------- re.SRE_Pattern: output of re.compile() """ variable_query = r"\w+" if variable_names is not None: if not isinstance(variable_names, (tuple, list)): variable_names = [variable_names] variable_names = [re.escape(variable_name) for variable_name in variable_names] variable_query = rf"(?:{"|".join(variable_names)})" if group_names is not None: if not isinstance(group_names, (tuple, list)): group_names = [group_names] group_names = [re.escape(group_name) for group_name in group_names] group_query = rf"(?:{"|".join(group_names)})" else: return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"])) return re.compile( r"".join( [r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"] ) ) def _rio_transform(riods): """ Get the transform from a rasterio dataset reguardless of rasterio version. """ try: return riods.transform except AttributeError: return riods.affine # rasterio < 1.0 def _get_rasterio_attrs(riods, masked): """ Get rasterio specific attributes/encoding """ # Add rasterio attributes attrs = _parse_tags(riods.tags(1)) encoding = dict() # Affine transformation matrix (always available) # This describes coefficients mapping pixel coordinates to CRS # For serialization store as tuple of 6 floats, the last row being # always (0, 0, 1) per definition (see # https://github.com/sgillies/affine) attrs["transform"] = tuple(_rio_transform(riods))[:6] if hasattr(riods, "nodata") and riods.nodata is not None: # The nodata values for the raster bands if masked: encoding["_FillValue"] = riods.nodata else: attrs["_FillValue"] = riods.nodata if hasattr(riods, "scales"): # The scale values for the raster bands attrs["scales"] = riods.scales if hasattr(riods, "offsets"): # The offset values for the raster bands attrs["offsets"] = riods.offsets if hasattr(riods, "descriptions") and any(riods.descriptions): if len(riods.descriptions) == 1: attrs["long_name"] = riods.descriptions[0] else: # Descriptions for each dataset band attrs["long_name"] = riods.descriptions if hasattr(riods, "units") and any(riods.units): # A list of units string for each dataset band if len(riods.units) == 1: attrs["units"] = riods.units[0] else: attrs["units"] = riods.units return attrs, encoding def _parse_driver_tags(riods, attrs, coords): # Parse extra metadata from tags, if supported parsers = {"ENVI": _parse_envi} driver = riods.driver if driver in parsers: meta = parsers[driver](riods.tags(ns=driver)) for k, v in meta.items(): # Add values as coordinates if they match the band count, # as attributes otherwise if isinstance(v, (list, np.ndarray)) and len(v) == riods.count: coords[k] = ("band", np.asarray(v)) else: attrs[k] = v def _load_subdatasets( riods, group, variable, parse_coordinates, chunks, cache, lock, masked ): """ Load in rasterio subdatasets """ base_tags = _parse_tags(riods.tags()) dim_groups = {} subdataset_filter = None if any((group, variable)): subdataset_filter = build_subdataset_filter(group, variable) for iii, subdataset in enumerate(riods.subdatasets): if subdataset_filter is not None and not subdataset_filter.match(subdataset): continue with rasterio.open(subdataset) as rds: shape = rds.shape rioda = open_rasterio( subdataset, parse_coordinates=shape not in dim_groups and parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"), ) if shape not in dim_groups: dim_groups[shape] = {rioda.name: rioda} else: dim_groups[shape][rioda.name] = rioda if len(dim_groups) > 1: dataset = [ Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values() ] elif not dim_groups: dataset = Dataset(attrs=base_tags) else: dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags) return dataset def _prepare_dask(result, riods, filename, chunks): """ Prepare the data for dask computations """ from dask.base import tokenize # augment the token with the file modification time try: mtime = os.path.getmtime(filename) except OSError: # the filename is probably an s3 bucket rather than a regular file mtime = None if chunks in (True, "auto"): from dask.array.core import normalize_chunks import dask if LooseVersion(dask.__version__) < LooseVersion("0.18.0"): msg = ( "Automatic chunking requires dask.__version__ >= 0.18.0 . " "You currently have version %s" % dask.__version__ ) raise NotImplementedError(msg) block_shape = (1,) + riods.block_shapes[0] chunks = normalize_chunks( chunks=(1, "auto", "auto"), shape=(riods.count, riods.height, riods.width), dtype=riods.dtypes[0], previous_chunks=tuple((c,) for c in block_shape), ) token = tokenize(filename, mtime, chunks) name_prefix = "open_rasterio-%s" % token return result.chunk(chunks, name_prefix=name_prefix, token=token) def open_rasterio( filename, parse_coordinates=None, chunks=None, cache=None, lock=None, masked=False, variable=None, group=None, default_name=None, **open_kwargs, ): """Open a file with rasterio (experimental). This should work with any file that rasterio can open (most often: geoTIFF). The x and y coordinates are generated automatically from the file's geoinformation, shifted to the center of each pixel (see `"PixelIsArea" Raster Space <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_ for more information). You can generate 2D coordinates from the file's attributes with:: from affine import Affine da = xr.open_rasterio('path_to_file.tif') transform = Affine.from_gdal(*da.attrs['transform']) nx, ny = da.sizes['x'], da.sizes['y'] x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform Parameters ---------- filename: str, rasterio.DatasetReader, or rasterio.WarpedVRT Path to the file to open. Or already open rasterio dataset. parse_coordinates: bool, optional Whether to parse the x and y coordinates out of the file's ``transform`` attribute or not. The default is to automatically parse the coordinates only if they are rectilinear (1D). It can be useful to set ``parse_coordinates=False`` if your files are very large or if you don't need the coordinates. chunks: int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new DataArray into a dask array. Chunks can also be set to ``True`` or ``"auto"`` to choose sensible chunk sizes according to ``dask.config.get("array.chunk-size"). cache: bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. lock: False, True or threading.Lock, optional If chunks is provided, this argument is passed on to :py:func:`dask.array.from_array`. By default, a global lock is used to avoid issues with concurrent access to the same file when using dask's multithreaded backend. masked: bool, optional If True, read the mask and to set values to NaN. Defaults to False. variable: str or list or tuple, optional Variable name or names to use to filter loading. group: str or list or tuple, optional Group name or names to use to filter loading. default_name: str, optional The name of the data array if none exists. Default is None. **open_kwargs: kwargs, optional Optional keyword arguments to pass into rasterio.open(). Returns ------- data : DataArray The newly created DataArray. """ parse_coordinates = True if parse_coordinates is None else parse_coordinates vrt_params = None if isinstance(filename, rasterio.io.DatasetReader): filename = filename.name elif isinstance(filename, rasterio.vrt.WarpedVRT): vrt = filename filename = vrt.src_dataset.name vrt_params = dict( crs=vrt.crs.to_string(), resampling=vrt.resampling, src_nodata=vrt.src_nodata, dst_nodata=vrt.dst_nodata, tolerance=vrt.tolerance, transform=vrt.transform, width=vrt.width, height=vrt.height, warp_extras=vrt.warp_extras, ) if lock is None: lock = RASTERIO_LOCK # ensure default for sharing is False # ref https://github.com/mapbox/rasterio/issues/1504 open_kwargs["sharing"] = open_kwargs.get("sharing", False) manager = CachingFileManager( rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs ) riods = manager.acquire() # open the subdatasets if they exist if riods.subdatasets: return _load_subdatasets( riods=riods, group=group, variable=variable, parse_coordinates=parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, ) if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) if cache is None: cache = chunks is None # Get bands if riods.count < 1: raise ValueError("Unknown dims") coords = OrderedDict() coords["band"] = np.asarray(riods.indexes) # parse tags attrs, encoding = _get_rasterio_attrs(riods=riods, masked=masked) _parse_driver_tags(riods=riods, attrs=attrs, coords=coords) # Get geospatial coordinates transform = _rio_transform(riods) if parse_coordinates and transform.is_rectilinear: # 1d coordinates coords.update(affine_to_coords(riods.transform, riods.width, riods.height)) elif parse_coordinates: # 2d coordinates warnings.warn( "The file coordinates' transformation isn't " "rectilinear: xarray won't parse the coordinates " "in this case. Set `parse_coordinates=False` to " "suppress this warning.", RuntimeWarning, stacklevel=3, ) data = indexing.LazilyOuterIndexedArray( RasterioArrayWrapper(manager, lock, vrt_params, masked=masked) ) # this lets you write arrays loaded with rasterio data = indexing.CopyOnWriteArray(data) if cache and chunks is None: data = indexing.MemoryCachedArray(data) # create the output data array da_name = attrs.pop("NETCDF_VARNAME", default_name) result = DataArray( data=data, dims=("band", "y", "x"), coords=coords, attrs=attrs, name=da_name ) result.encoding = encoding if hasattr(riods, "crs") and riods.crs: result.rio.write_crs(riods.crs, inplace=True) if chunks is not None: result = _prepare_dask(result, riods, filename, chunks) # Make the file closeable result._file_obj = manager return result
""" Credits: This file was adopted from: https://github.com/pydata/xarray # noqa Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa """ import os import re import warnings from collections import OrderedDict from distutils.version import LooseVersion import numpy as np import rasterio from rasterio.vrt import WarpedVRT from xarray import DataArray, Dataset from xarray.backends.common import BackendArray from xarray.backends.file_manager import CachingFileManager from xarray.backends.locks import SerializableLock from xarray.core import indexing from xarray.core.utils import is_scalar from rioxarray.exceptions import RioXarrayError from rioxarray.rioxarray import affine_to_coords # TODO: should this be GDAL_LOCK instead? RASTERIO_LOCK = SerializableLock() class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" def __init__(self, manager, lock, vrt_params=None, masked=False): from rasterio.vrt import WarpedVRT self.manager = manager self.lock = lock self.masked = masked # cannot save riods as an attribute: this would break pickleability riods = manager.acquire() if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) self.vrt_params = vrt_params self._shape = (riods.count, riods.height, riods.width) dtypes = riods.dtypes if not np.all(np.asarray(dtypes) == dtypes[0]): raise ValueError("All bands should have the same dtype") self._dtype = np.dtype("float64") if self.masked else np.dtype(dtypes[0]) @property def dtype(self): return self._dtype @property def shape(self): return self._shape def _get_indexer(self, key): """ Get indexer for rasterio array. Parameter --------- key: tuple of int Returns ------- band_key: an indexer for the 1st dimension window: two tuples. Each consists of (start, stop). squeeze_axis: axes to be squeezed np_ind: indexer for loaded numpy array See also -------- indexing.decompose_indexer """ if len(key) != 3: raise RioXarrayError("rasterio datasets should always be 3D") # bands cannot be windowed but they can be listed band_key = key[0] np_inds = [] # bands (axis=0) cannot be windowed but they can be listed if isinstance(band_key, slice): start, stop, step = band_key.indices(self.shape[0]) band_key = np.arange(start, stop, step) # be sure we give out a list band_key = (np.asarray(band_key) + 1).tolist() if isinstance(band_key, list): # if band_key is not a scalar np_inds.append(slice(None)) # but other dims can only be windowed window = [] squeeze_axis = [] for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])): if isinstance(k, slice): # step is always positive. see indexing.decompose_indexer start, stop, step = k.indices(n) np_inds.append(slice(None, None, step)) elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later squeeze_axis.append(-(2 - i)) start = k stop = k + 1 else: start, stop = np.min(k), np.max(k) + 1 np_inds.append(k - start) window.append((start, stop)) if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray): # do outer-style indexing np_inds[-2:] = np.ix_(*np_inds[-2:]) return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds) def _getitem(self, key): from rasterio.vrt import WarpedVRT band_key, window, squeeze_axis, np_inds = self._get_indexer(key) if not band_key or any(start == stop for (start, stop) in window): # no need to do IO shape = (len(band_key),) + tuple(stop - start for (start, stop) in window) out = np.zeros(shape, dtype=self.dtype) else: with self.lock: riods = self.manager.acquire(needs_lock=False) if self.vrt_params is not None: riods = WarpedVRT(riods, **self.vrt_params) out = riods.read(band_key, window=window, masked=self.masked) if self.masked: out = np.ma.filled(out.astype(self.dtype), np.nan) if squeeze_axis: out = np.squeeze(out, axis=squeeze_axis) return out[np_inds] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem ) def _parse_envi(meta): """Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns='ENVI') call. Returns ------- parsed_meta : dict Dictionary containing the original keys and the parsed values """ def parsevec(s): return np.fromstring(s.strip("{}"), dtype="float", sep=",") def default(s): return s.strip("{}") parse = {"wavelength": parsevec, "fwhm": parsevec} parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()} return parsed_meta def _parse_tags(tags): def parsevec(s): return np.fromstring(s.strip("{}"), dtype="float", sep=",") parsed_tags = {} for key, value in tags.items(): if value.startswith("{") and value.endswith("}"): new_val = parsevec(value) value = new_val if len(new_val) else value else: try: value = int(value) except (TypeError, ValueError): try: value = float(value) except (TypeError, ValueError): pass parsed_tags[key] = value return parsed_tags def build_subdataset_filter(group_names, variable_names): """ Example:: 'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf": MODIS_Grid_2D:sur_refl_b01_1' Parameters ---------- group_names: str or list or tuple Name or names of netCDF groups to filter by. variable_names: str or list or tuple Name or names of netCDF variables to filter by. Returns ------- re.SRE_Pattern: output of re.compile() """ variable_query = r"\w+" if variable_names is not None: if not isinstance(variable_names, (tuple, list)): variable_names = [variable_names] variable_names = [re.escape(variable_name) for variable_name in variable_names] variable_query = rf"(?:{'|'.join(variable_names)})" if group_names is not None: if not isinstance(group_names, (tuple, list)): group_names = [group_names] group_names = [re.escape(group_name) for group_name in group_names] group_query = rf"(?:{'|'.join(group_names)})" else: return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"])) return re.compile( r"".join( [r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"] ) ) def _rio_transform(riods): """ Get the transform from a rasterio dataset reguardless of rasterio version. """ try: return riods.transform except AttributeError: return riods.affine # rasterio < 1.0 def _get_rasterio_attrs(riods, masked): """ Get rasterio specific attributes/encoding """ # Add rasterio attributes attrs = _parse_tags(riods.tags(1)) encoding = dict() # Affine transformation matrix (always available) # This describes coefficients mapping pixel coordinates to CRS # For serialization store as tuple of 6 floats, the last row being # always (0, 0, 1) per definition (see # https://github.com/sgillies/affine) attrs["transform"] = tuple(_rio_transform(riods))[:6] if hasattr(riods, "nodata") and riods.nodata is not None: # The nodata values for the raster bands if masked: encoding["_FillValue"] = riods.nodata else: attrs["_FillValue"] = riods.nodata if hasattr(riods, "scales"): # The scale values for the raster bands attrs["scales"] = riods.scales if hasattr(riods, "offsets"): # The offset values for the raster bands attrs["offsets"] = riods.offsets if hasattr(riods, "descriptions") and any(riods.descriptions): if len(riods.descriptions) == 1: attrs["long_name"] = riods.descriptions[0] else: # Descriptions for each dataset band attrs["long_name"] = riods.descriptions if hasattr(riods, "units") and any(riods.units): # A list of units string for each dataset band if len(riods.units) == 1: attrs["units"] = riods.units[0] else: attrs["units"] = riods.units return attrs, encoding def _parse_driver_tags(riods, attrs, coords): # Parse extra metadata from tags, if supported parsers = {"ENVI": _parse_envi} driver = riods.driver if driver in parsers: meta = parsers[driver](riods.tags(ns=driver)) for k, v in meta.items(): # Add values as coordinates if they match the band count, # as attributes otherwise if isinstance(v, (list, np.ndarray)) and len(v) == riods.count: coords[k] = ("band", np.asarray(v)) else: attrs[k] = v def _load_subdatasets( riods, group, variable, parse_coordinates, chunks, cache, lock, masked ): """ Load in rasterio subdatasets """ base_tags = _parse_tags(riods.tags()) dim_groups = {} subdataset_filter = None if any((group, variable)): subdataset_filter = build_subdataset_filter(group, variable) for iii, subdataset in enumerate(riods.subdatasets): if subdataset_filter is not None and not subdataset_filter.match(subdataset): continue with rasterio.open(subdataset) as rds: shape = rds.shape rioda = open_rasterio( subdataset, parse_coordinates=shape not in dim_groups and parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"), ) if shape not in dim_groups: dim_groups[shape] = {rioda.name: rioda} else: dim_groups[shape][rioda.name] = rioda if len(dim_groups) > 1: dataset = [ Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values() ] elif not dim_groups: dataset = Dataset(attrs=base_tags) else: dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags) return dataset def _prepare_dask(result, riods, filename, chunks): """ Prepare the data for dask computations """ from dask.base import tokenize # augment the token with the file modification time try: mtime = os.path.getmtime(filename) except OSError: # the filename is probably an s3 bucket rather than a regular file mtime = None if chunks in (True, "auto"): from dask.array.core import normalize_chunks import dask if LooseVersion(dask.__version__) < LooseVersion("0.18.0"): msg = ( "Automatic chunking requires dask.__version__ >= 0.18.0 . " "You currently have version %s" % dask.__version__ ) raise NotImplementedError(msg) block_shape = (1,) + riods.block_shapes[0] chunks = normalize_chunks( chunks=(1, "auto", "auto"), shape=(riods.count, riods.height, riods.width), dtype=riods.dtypes[0], previous_chunks=tuple((c,) for c in block_shape), ) token = tokenize(filename, mtime, chunks) name_prefix = "open_rasterio-%s" % token return result.chunk(chunks, name_prefix=name_prefix, token=token) def open_rasterio( filename, parse_coordinates=None, chunks=None, cache=None, lock=None, masked=False, variable=None, group=None, default_name=None, **open_kwargs, ): """Open a file with rasterio (experimental). This should work with any file that rasterio can open (most often: geoTIFF). The x and y coordinates are generated automatically from the file's geoinformation, shifted to the center of each pixel (see `"PixelIsArea" Raster Space <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_ for more information). You can generate 2D coordinates from the file's attributes with:: from affine import Affine da = xr.open_rasterio('path_to_file.tif') transform = Affine.from_gdal(*da.attrs['transform']) nx, ny = da.sizes['x'], da.sizes['y'] x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform Parameters ---------- filename: str, rasterio.DatasetReader, or rasterio.WarpedVRT Path to the file to open. Or already open rasterio dataset. parse_coordinates: bool, optional Whether to parse the x and y coordinates out of the file's ``transform`` attribute or not. The default is to automatically parse the coordinates only if they are rectilinear (1D). It can be useful to set ``parse_coordinates=False`` if your files are very large or if you don't need the coordinates. chunks: int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new DataArray into a dask array. Chunks can also be set to ``True`` or ``"auto"`` to choose sensible chunk sizes according to ``dask.config.get("array.chunk-size"). cache: bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. lock: False, True or threading.Lock, optional If chunks is provided, this argument is passed on to :py:func:`dask.array.from_array`. By default, a global lock is used to avoid issues with concurrent access to the same file when using dask's multithreaded backend. masked: bool, optional If True, read the mask and to set values to NaN. Defaults to False. variable: str or list or tuple, optional Variable name or names to use to filter loading. group: str or list or tuple, optional Group name or names to use to filter loading. default_name: str, optional The name of the data array if none exists. Default is None. **open_kwargs: kwargs, optional Optional keyword arguments to pass into rasterio.open(). Returns ------- data : DataArray The newly created DataArray. """ parse_coordinates = True if parse_coordinates is None else parse_coordinates vrt_params = None if isinstance(filename, rasterio.io.DatasetReader): filename = filename.name elif isinstance(filename, rasterio.vrt.WarpedVRT): vrt = filename filename = vrt.src_dataset.name vrt_params = dict( crs=vrt.crs.to_string(), resampling=vrt.resampling, src_nodata=vrt.src_nodata, dst_nodata=vrt.dst_nodata, tolerance=vrt.tolerance, transform=vrt.transform, width=vrt.width, height=vrt.height, warp_extras=vrt.warp_extras, ) if lock is None: lock = RASTERIO_LOCK # ensure default for sharing is False # ref https://github.com/mapbox/rasterio/issues/1504 open_kwargs["sharing"] = open_kwargs.get("sharing", False) manager = CachingFileManager( rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs ) riods = manager.acquire() # open the subdatasets if they exist if riods.subdatasets: return _load_subdatasets( riods=riods, group=group, variable=variable, parse_coordinates=parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, ) if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) if cache is None: cache = chunks is None # Get bands if riods.count < 1: raise ValueError("Unknown dims") coords = OrderedDict() coords["band"] = np.asarray(riods.indexes) # parse tags attrs, encoding = _get_rasterio_attrs(riods=riods, masked=masked) _parse_driver_tags(riods=riods, attrs=attrs, coords=coords) # Get geospatial coordinates transform = _rio_transform(riods) if parse_coordinates and transform.is_rectilinear: # 1d coordinates coords.update(affine_to_coords(riods.transform, riods.width, riods.height)) elif parse_coordinates: # 2d coordinates warnings.warn( "The file coordinates' transformation isn't " "rectilinear: xarray won't parse the coordinates " "in this case. Set `parse_coordinates=False` to " "suppress this warning.", RuntimeWarning, stacklevel=3, ) data = indexing.LazilyOuterIndexedArray( RasterioArrayWrapper(manager, lock, vrt_params, masked=masked) ) # this lets you write arrays loaded with rasterio data = indexing.CopyOnWriteArray(data) if cache and chunks is None: data = indexing.MemoryCachedArray(data) # create the output data array da_name = attrs.pop("NETCDF_VARNAME", default_name) result = DataArray( data=data, dims=("band", "y", "x"), coords=coords, attrs=attrs, name=da_name ) result.encoding = encoding if hasattr(riods, "crs") and riods.crs: result.rio.write_crs(riods.crs, inplace=True) if chunks is not None: result = _prepare_dask(result, riods, filename, chunks) # Make the file closeable result._file_obj = manager return result
import os from django.db import models from django.utils.text import slugify from django.contrib.postgres.fields import ArrayField from django.db.models.signals import post_save from django.dispatch import receiver from notifications.signals import notify from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.utils.encoding import force_bytes, force_text from authors.apps.authentication.models import User from authors.apps.profiles.models import Profile from authors.apps.core.email_with_celery import SendEmail class TimestampedModel(models.Model): ''' Model to take care of when an instance occurs in the database Appends created at and updated at fields using datetime.now()''' # Timestamp shows when an object was first created in the database created_at = models.DateTimeField(auto_now_add=True) # represents when an object was last changed updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # It is a good practice to have ordering in reverse chronology. # ordering = ['-created_at', '-updated_at'] class Article(TimestampedModel): slug = models.SlugField(db_index=True, max_length=255, unique=True) title = models.CharField(db_index=True, max_length=255) description = models.TextField() body = models.TextField() tagList = ArrayField(models.CharField( max_length=255), default=None, null=True, blank=True) image = models.ImageField( upload_to='myphoto/%Y/%m/%d/', null=True, max_length=255) # blank = True # a many-to-many field will map to a serializer field that # requires at least one input, unless the model field has blank=True like = models.ManyToManyField(User, blank=True, related_name='like') # define related_name argument for 'Article.like' or 'Article.dislike'. # to ensure that the fields were not conflicting with each other, dislike = models.ManyToManyField(User, blank=True, related_name='dislike') # Bookmarked is set as False bookmarked = models.BooleanField(default=False) # An author is the creator of the article, usually the current logged in user. # I create a foreign key r/ship. # This r/ship can help returns all articles of a particular author. author = models.ForeignKey( 'authentication.User', on_delete=models.CASCADE, related_name='articles' ) ratings_counter = models.IntegerField(default=0) prepopulated_fields = {"slug": ("title",)} def _get_unique_slug(self): slug = slugify(self.title) unique_slug = slug num = 1 while Article.objects.filter(slug=unique_slug).exists(): unique_slug = '{}-{}'.format(slug, num) num += 1 return unique_slug def save(self, *args, **kwargs): ''' Creates a slug based on Article title Example: Title: ArticleOne Slug: ArticleOne-1 ''' self.slug = self._get_unique_slug() super(Article, self).save(*args, **kwargs) def updaterate(self, rating): ''' ''' self.ratings_counter = rating def __str__(self): ''' Returns a title of the article as object representation''' return self.title class Comment(TimestampedModel): ''' Comment class implementation ''' body = models.TextField() author = models.ForeignKey('authentication.User', on_delete=models.CASCADE) article = models.ForeignKey(Article, on_delete=models.CASCADE) likes = models.ManyToManyField('authentication.User', related_name='likes', blank=True) dislikes = models.ManyToManyField('authentication.User', related_name='dislikes', blank=True) def __str__(self): return self.body class ArticleRating(models.Model): """ Defines the ratings fields for a rater """ rater = models.ForeignKey( 'authentication.User', on_delete=models.CASCADE, related_name='articlesrating' ) note =models.TextField() article = models.ForeignKey( Article, on_delete=models.CASCADE, related_name="articlerating") rating = models.IntegerField() def __str__(self): return self.note class Report(TimestampedModel): """Reporting an article model""" body = models.TextField() author = models.ForeignKey('authentication.User', on_delete=models.CASCADE) article = models.ForeignKey(Article, on_delete=models.CASCADE) def __str__(self): return self.body @receiver(post_save, sender=Article) def send_notifications_to_all_users(sender, instance, created, *args, **kwargs): """Create a Signal that sends email to all users that follow the author. Arguments: sender {[type]} -- [Instance of ] created {[type]} -- [If the article is posted.] """ if instance and created: users_following = instance.author.profile.get_followers( instance.author.profile) users_follow = [u.user for u in users_following if u.get_notifications] link = f'{os.getenv('HEROKU_BACKEND_URL')}/api/articles/{instance.slug}' users_foll = [u.user.id for u in users_following] if users_foll: uuid = urlsafe_base64_encode(force_bytes(users_foll[0]) ).decode("utf-8") subscription = f'{os.getenv('HEROKU_BACKEND_URL')}/api/users/subscription/{uuid}/' SendEmail( template="create_article.html", context={ "article": instance, "author": instance.author, "url_link": link, "subscription": subscription }, subject="New Article", e_to=[u.email for u in users_follow], ).send() @receiver(post_save, sender=Comment) def send_notifications_to_all_users_on_comments(sender, instance, created, *args, **kwargs): """Create a Signal that sends email to all users that follow the author. Arguments: sender {[type]} -- [Instance of ] created {[type]} -- [If the article is posted.] """ if instance and created: user_following = Profile.objects.all() user_follow = [u.user for u in user_following if \ u.has_favorited(instance.article) and u.get_notifications] author = User.objects.get(email=instance.author) if author: comment = Comment.objects.get(id=instance.id) link = f'{os.getenv('HEROKU_BACKEND_URL')}/api/articles/{comment.article.slug}/comments/{instance.id}' uuid = urlsafe_base64_encode(force_bytes(author.id) ).decode("utf-8") subscription = f'{os.getenv('HEROKU_BACKEND_URL')}/api/users/subscription/{uuid}/' SendEmail( template="comment_notification.html", context={ "article": instance.article, "comment": instance, "url_link": link, "subscription": subscription }, subject=" New Comment.", e_to=[u.email for u in user_follow], ).send()
import os from django.db import models from django.utils.text import slugify from django.contrib.postgres.fields import ArrayField from django.db.models.signals import post_save from django.dispatch import receiver from notifications.signals import notify from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.utils.encoding import force_bytes, force_text from authors.apps.authentication.models import User from authors.apps.profiles.models import Profile from authors.apps.core.email_with_celery import SendEmail class TimestampedModel(models.Model): ''' Model to take care of when an instance occurs in the database Appends created at and updated at fields using datetime.now()''' # Timestamp shows when an object was first created in the database created_at = models.DateTimeField(auto_now_add=True) # represents when an object was last changed updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True # It is a good practice to have ordering in reverse chronology. # ordering = ['-created_at', '-updated_at'] class Article(TimestampedModel): slug = models.SlugField(db_index=True, max_length=255, unique=True) title = models.CharField(db_index=True, max_length=255) description = models.TextField() body = models.TextField() tagList = ArrayField(models.CharField( max_length=255), default=None, null=True, blank=True) image = models.ImageField( upload_to='myphoto/%Y/%m/%d/', null=True, max_length=255) # blank = True # a many-to-many field will map to a serializer field that # requires at least one input, unless the model field has blank=True like = models.ManyToManyField(User, blank=True, related_name='like') # define related_name argument for 'Article.like' or 'Article.dislike'. # to ensure that the fields were not conflicting with each other, dislike = models.ManyToManyField(User, blank=True, related_name='dislike') # Bookmarked is set as False bookmarked = models.BooleanField(default=False) # An author is the creator of the article, usually the current logged in user. # I create a foreign key r/ship. # This r/ship can help returns all articles of a particular author. author = models.ForeignKey( 'authentication.User', on_delete=models.CASCADE, related_name='articles' ) ratings_counter = models.IntegerField(default=0) prepopulated_fields = {"slug": ("title",)} def _get_unique_slug(self): slug = slugify(self.title) unique_slug = slug num = 1 while Article.objects.filter(slug=unique_slug).exists(): unique_slug = '{}-{}'.format(slug, num) num += 1 return unique_slug def save(self, *args, **kwargs): ''' Creates a slug based on Article title Example: Title: ArticleOne Slug: ArticleOne-1 ''' self.slug = self._get_unique_slug() super(Article, self).save(*args, **kwargs) def updaterate(self, rating): ''' ''' self.ratings_counter = rating def __str__(self): ''' Returns a title of the article as object representation''' return self.title class Comment(TimestampedModel): ''' Comment class implementation ''' body = models.TextField() author = models.ForeignKey('authentication.User', on_delete=models.CASCADE) article = models.ForeignKey(Article, on_delete=models.CASCADE) likes = models.ManyToManyField('authentication.User', related_name='likes', blank=True) dislikes = models.ManyToManyField('authentication.User', related_name='dislikes', blank=True) def __str__(self): return self.body class ArticleRating(models.Model): """ Defines the ratings fields for a rater """ rater = models.ForeignKey( 'authentication.User', on_delete=models.CASCADE, related_name='articlesrating' ) note =models.TextField() article = models.ForeignKey( Article, on_delete=models.CASCADE, related_name="articlerating") rating = models.IntegerField() def __str__(self): return self.note class Report(TimestampedModel): """Reporting an article model""" body = models.TextField() author = models.ForeignKey('authentication.User', on_delete=models.CASCADE) article = models.ForeignKey(Article, on_delete=models.CASCADE) def __str__(self): return self.body @receiver(post_save, sender=Article) def send_notifications_to_all_users(sender, instance, created, *args, **kwargs): """Create a Signal that sends email to all users that follow the author. Arguments: sender {[type]} -- [Instance of ] created {[type]} -- [If the article is posted.] """ if instance and created: users_following = instance.author.profile.get_followers( instance.author.profile) users_follow = [u.user for u in users_following if u.get_notifications] link = f'{os.getenv("HEROKU_BACKEND_URL")}/api/articles/{instance.slug}' users_foll = [u.user.id for u in users_following] if users_foll: uuid = urlsafe_base64_encode(force_bytes(users_foll[0]) ).decode("utf-8") subscription = f'{os.getenv("HEROKU_BACKEND_URL")}/api/users/subscription/{uuid}/' SendEmail( template="create_article.html", context={ "article": instance, "author": instance.author, "url_link": link, "subscription": subscription }, subject="New Article", e_to=[u.email for u in users_follow], ).send() @receiver(post_save, sender=Comment) def send_notifications_to_all_users_on_comments(sender, instance, created, *args, **kwargs): """Create a Signal that sends email to all users that follow the author. Arguments: sender {[type]} -- [Instance of ] created {[type]} -- [If the article is posted.] """ if instance and created: user_following = Profile.objects.all() user_follow = [u.user for u in user_following if \ u.has_favorited(instance.article) and u.get_notifications] author = User.objects.get(email=instance.author) if author: comment = Comment.objects.get(id=instance.id) link = f'{os.getenv("HEROKU_BACKEND_URL")}/api/articles/{comment.article.slug}/comments/{instance.id}' uuid = urlsafe_base64_encode(force_bytes(author.id) ).decode("utf-8") subscription = f'{os.getenv("HEROKU_BACKEND_URL")}/api/users/subscription/{uuid}/' SendEmail( template="comment_notification.html", context={ "article": instance.article, "comment": instance, "url_link": link, "subscription": subscription }, subject=" New Comment.", e_to=[u.email for u in user_follow], ).send()
import os from typing import Tuple import boto3 from botocore.exceptions import ClientError from dotenv import load_dotenv load_dotenv() AWS_REGION = 'eu-west-1' def handler(event, context): subject, message = parse_params(event, context) sender = os.getenv('SENDER_EMAIL') recipient = os.getenv('RECIPIENT_EMAIL') send_email(subject, message, sender, recipient) def parse_params(event, _) -> Tuple[str, str]: data = event['body'] if 'subject' not in data: raise Exception('"subject" param is required') if 'message' not in data: raise Exception('"message" param is required') return data['subject'], data['message'] def send_email(subject: str, message: str, sender: str, recipient: str): client = boto3.client('ses', region_name=AWS_REGION) try: response = client.send_email( Destination={ 'ToAddresses': [recipient], }, Message={ 'Body': { 'Text': { 'Charset': 'UTF-8', 'Data': message, }, }, 'Subject': { 'Charset': 'UTF-8', 'Data': subject, }, }, Source=sender, ) except ClientError as e: raise Exception(e.response['Error']['Message']) else: print(f"Email sent! Message ID: {response["MessageId"]}")
import os from typing import Tuple import boto3 from botocore.exceptions import ClientError from dotenv import load_dotenv load_dotenv() AWS_REGION = 'eu-west-1' def handler(event, context): subject, message = parse_params(event, context) sender = os.getenv('SENDER_EMAIL') recipient = os.getenv('RECIPIENT_EMAIL') send_email(subject, message, sender, recipient) def parse_params(event, _) -> Tuple[str, str]: data = event['body'] if 'subject' not in data: raise Exception('"subject" param is required') if 'message' not in data: raise Exception('"message" param is required') return data['subject'], data['message'] def send_email(subject: str, message: str, sender: str, recipient: str): client = boto3.client('ses', region_name=AWS_REGION) try: response = client.send_email( Destination={ 'ToAddresses': [recipient], }, Message={ 'Body': { 'Text': { 'Charset': 'UTF-8', 'Data': message, }, }, 'Subject': { 'Charset': 'UTF-8', 'Data': subject, }, }, Source=sender, ) except ClientError as e: raise Exception(e.response['Error']['Message']) else: print(f"Email sent! Message ID: {response['MessageId']}")
import datetime import logging import math import time import torch from os import path as osp from basicsr.data import build_dataloader, build_dataset from basicsr.data.data_sampler import EnlargedSampler from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher from basicsr.models import build_model from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str, init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir) from basicsr.utils.options import copy_opt_file, dict2str, parse_options def init_tb_loggers(opt): # initialize wandb logger before tensorboard logger to allow proper sync if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') is not None) and ('debug' not in opt['name']): assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb') init_wandb_logger(opt) tb_logger = None if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']: tb_logger = init_tb_logger(log_dir=osp.join(opt['root_path'], 'tb_logger', opt['name'])) return tb_logger def create_train_val_dataloader(opt, logger): # create train and val dataloaders train_loader, val_loader = None, None for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) train_set = build_dataset(dataset_opt) train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio) train_loader = build_dataloader( train_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=train_sampler, seed=opt['manual_seed']) num_iter_per_epoch = math.ceil( len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size'])) total_iters = int(opt['train']['total_iter']) total_epochs = math.ceil(total_iters / (num_iter_per_epoch)) logger.info('Training statistics:' f'\n\tNumber of train images: {len(train_set)}' f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}' f'\n\tBatch size per gpu: {dataset_opt['batch_size_per_gpu']}' f'\n\tWorld size (gpu number): {opt['world_size']}' f'\n\tRequire iter number per epoch: {num_iter_per_epoch}' f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.') elif phase == 'val': val_set = build_dataset(dataset_opt) val_loader = build_dataloader( val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) logger.info(f'Number of val images/folders in {dataset_opt['name']}: {len(val_set)}') else: raise ValueError(f'Dataset phase {phase} is not recognized.') return train_loader, train_sampler, val_loader, total_epochs, total_iters def load_resume_state(opt): resume_state_path = None if opt['auto_resume']: state_path = osp.join('experiments', opt['name'], 'training_states') if osp.isdir(state_path): states = list(scandir(state_path, suffix='state', recursive=False, full_path=False)) if len(states) != 0: states = [float(v.split('.state')[0]) for v in states] resume_state_path = osp.join(state_path, f'{max(states):.0f}.state') opt['path']['resume_state'] = resume_state_path else: if opt['path'].get('resume_state'): resume_state_path = opt['path']['resume_state'] if resume_state_path is None: resume_state = None else: device_id = torch.cuda.current_device() resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id)) check_resume(opt, resume_state['iter']) return resume_state def train_pipeline(root_path): # parse options, set distributed setting, set ramdom seed opt, args = parse_options(root_path, is_train=True) opt['root_path'] = root_path torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # load resume states if necessary resume_state = load_resume_state(opt) # mkdir for experiments and logger if resume_state is None: make_exp_dirs(opt) if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0: mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name'])) # copy the yml file to the experiment root copy_opt_file(args.opt, opt['path']['experiments_root']) # WARNING: should not use get_root_logger in the above codes, including the called functions # Otherwise the logger will not be properly initialized log_file = osp.join(opt['path']['log'], f"train_{opt["name"]}_{get_time_str()}.log") logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) logger.info(get_env_info()) logger.info(dict2str(opt)) # initialize wandb and tb loggers tb_logger = init_tb_loggers(opt) # create train and validation dataloaders result = create_train_val_dataloader(opt, logger) train_loader, train_sampler, val_loader, total_epochs, total_iters = result # create model model = build_model(opt) if resume_state: # resume training model.resume_training(resume_state) # handle optimizers and schedulers logger.info(f"Resuming training from epoch: {resume_state["epoch"]}, " f"iter: {resume_state["iter"]}.") start_epoch = resume_state['epoch'] current_iter = resume_state['iter'] else: start_epoch = 0 current_iter = 0 # create message logger (formatted outputs) msg_logger = MessageLogger(opt, current_iter, tb_logger) # dataloader prefetcher prefetch_mode = opt['datasets']['train'].get('prefetch_mode') if prefetch_mode is None or prefetch_mode == 'cpu': prefetcher = CPUPrefetcher(train_loader) elif prefetch_mode == 'cuda': prefetcher = CUDAPrefetcher(train_loader, opt) logger.info(f'Use {prefetch_mode} prefetch dataloader') if opt['datasets']['train'].get('pin_memory') is not True: raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') else: raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.") # training logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}') data_timer, iter_timer = AvgTimer(), AvgTimer() start_time = time.time() for epoch in range(start_epoch, total_epochs + 1): train_sampler.set_epoch(epoch) prefetcher.reset() train_data = prefetcher.next() while train_data is not None: data_timer.record() current_iter += 1 if current_iter > total_iters: break # update learning rate model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) # training model.feed_data(train_data) model.optimize_parameters(current_iter) iter_timer.record() if current_iter == 1: # reset start time in msg_logger for more accurate eta_time # not work in resume mode msg_logger.reset_start_time() # log if current_iter % opt['logger']['print_freq'] == 0: log_vars = {'epoch': epoch, 'iter': current_iter} log_vars.update({'lrs': model.get_current_learning_rate()}) log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()}) log_vars.update(model.get_current_log()) msg_logger(log_vars) # save models and training states if current_iter % opt['logger']['save_checkpoint_freq'] == 0: logger.info('Saving models and training states.') model.save(epoch, current_iter) # validation if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0): model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) data_timer.start() iter_timer.start() train_data = prefetcher.next() # end of iter # end of epoch consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time))) logger.info(f'End of training. Time consumed: {consumed_time}') logger.info('Save the latest model.') model.save(epoch=-1, current_iter=-1) # -1 stands for the latest if opt.get('val') is not None: model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) if tb_logger: tb_logger.close() if __name__ == '__main__': root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) train_pipeline(root_path)
import datetime import logging import math import time import torch from os import path as osp from basicsr.data import build_dataloader, build_dataset from basicsr.data.data_sampler import EnlargedSampler from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher from basicsr.models import build_model from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str, init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir) from basicsr.utils.options import copy_opt_file, dict2str, parse_options def init_tb_loggers(opt): # initialize wandb logger before tensorboard logger to allow proper sync if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') is not None) and ('debug' not in opt['name']): assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb') init_wandb_logger(opt) tb_logger = None if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']: tb_logger = init_tb_logger(log_dir=osp.join(opt['root_path'], 'tb_logger', opt['name'])) return tb_logger def create_train_val_dataloader(opt, logger): # create train and val dataloaders train_loader, val_loader = None, None for phase, dataset_opt in opt['datasets'].items(): if phase == 'train': dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) train_set = build_dataset(dataset_opt) train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio) train_loader = build_dataloader( train_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=train_sampler, seed=opt['manual_seed']) num_iter_per_epoch = math.ceil( len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size'])) total_iters = int(opt['train']['total_iter']) total_epochs = math.ceil(total_iters / (num_iter_per_epoch)) logger.info('Training statistics:' f'\n\tNumber of train images: {len(train_set)}' f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}' f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}' f'\n\tWorld size (gpu number): {opt["world_size"]}' f'\n\tRequire iter number per epoch: {num_iter_per_epoch}' f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.') elif phase == 'val': val_set = build_dataset(dataset_opt) val_loader = build_dataloader( val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) logger.info(f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}') else: raise ValueError(f'Dataset phase {phase} is not recognized.') return train_loader, train_sampler, val_loader, total_epochs, total_iters def load_resume_state(opt): resume_state_path = None if opt['auto_resume']: state_path = osp.join('experiments', opt['name'], 'training_states') if osp.isdir(state_path): states = list(scandir(state_path, suffix='state', recursive=False, full_path=False)) if len(states) != 0: states = [float(v.split('.state')[0]) for v in states] resume_state_path = osp.join(state_path, f'{max(states):.0f}.state') opt['path']['resume_state'] = resume_state_path else: if opt['path'].get('resume_state'): resume_state_path = opt['path']['resume_state'] if resume_state_path is None: resume_state = None else: device_id = torch.cuda.current_device() resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id)) check_resume(opt, resume_state['iter']) return resume_state def train_pipeline(root_path): # parse options, set distributed setting, set ramdom seed opt, args = parse_options(root_path, is_train=True) opt['root_path'] = root_path torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # load resume states if necessary resume_state = load_resume_state(opt) # mkdir for experiments and logger if resume_state is None: make_exp_dirs(opt) if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0: mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name'])) # copy the yml file to the experiment root copy_opt_file(args.opt, opt['path']['experiments_root']) # WARNING: should not use get_root_logger in the above codes, including the called functions # Otherwise the logger will not be properly initialized log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log") logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) logger.info(get_env_info()) logger.info(dict2str(opt)) # initialize wandb and tb loggers tb_logger = init_tb_loggers(opt) # create train and validation dataloaders result = create_train_val_dataloader(opt, logger) train_loader, train_sampler, val_loader, total_epochs, total_iters = result # create model model = build_model(opt) if resume_state: # resume training model.resume_training(resume_state) # handle optimizers and schedulers logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.") start_epoch = resume_state['epoch'] current_iter = resume_state['iter'] else: start_epoch = 0 current_iter = 0 # create message logger (formatted outputs) msg_logger = MessageLogger(opt, current_iter, tb_logger) # dataloader prefetcher prefetch_mode = opt['datasets']['train'].get('prefetch_mode') if prefetch_mode is None or prefetch_mode == 'cpu': prefetcher = CPUPrefetcher(train_loader) elif prefetch_mode == 'cuda': prefetcher = CUDAPrefetcher(train_loader, opt) logger.info(f'Use {prefetch_mode} prefetch dataloader') if opt['datasets']['train'].get('pin_memory') is not True: raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') else: raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.") # training logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}') data_timer, iter_timer = AvgTimer(), AvgTimer() start_time = time.time() for epoch in range(start_epoch, total_epochs + 1): train_sampler.set_epoch(epoch) prefetcher.reset() train_data = prefetcher.next() while train_data is not None: data_timer.record() current_iter += 1 if current_iter > total_iters: break # update learning rate model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) # training model.feed_data(train_data) model.optimize_parameters(current_iter) iter_timer.record() if current_iter == 1: # reset start time in msg_logger for more accurate eta_time # not work in resume mode msg_logger.reset_start_time() # log if current_iter % opt['logger']['print_freq'] == 0: log_vars = {'epoch': epoch, 'iter': current_iter} log_vars.update({'lrs': model.get_current_learning_rate()}) log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()}) log_vars.update(model.get_current_log()) msg_logger(log_vars) # save models and training states if current_iter % opt['logger']['save_checkpoint_freq'] == 0: logger.info('Saving models and training states.') model.save(epoch, current_iter) # validation if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0): model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) data_timer.start() iter_timer.start() train_data = prefetcher.next() # end of iter # end of epoch consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time))) logger.info(f'End of training. Time consumed: {consumed_time}') logger.info('Save the latest model.') model.save(epoch=-1, current_iter=-1) # -1 stands for the latest if opt.get('val') is not None: model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) if tb_logger: tb_logger.close() if __name__ == '__main__': root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) train_pipeline(root_path)
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import os import re import unittest from collections import namedtuple from unittest import mock import pytest import sqlalchemy from cryptography.fernet import Fernet from parameterized import parameterized from airflow import AirflowException from airflow.hooks.base import BaseHook from airflow.models import Connection, crypto from airflow.providers.sqlite.hooks.sqlite import SqliteHook from tests.test_utils.config import conf_vars ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"]) class UriTestCaseConfig: def __init__( self, test_conn_uri: str, test_conn_attributes: dict, description: str, ): """ :param test_conn_uri: URI that we use to create connection :param test_conn_attributes: we expect a connection object created with `test_uri` to have these attributes :param description: human-friendly name appended to parameterized test """ self.test_uri = test_conn_uri self.test_conn_attributes = test_conn_attributes self.description = description @staticmethod def uri_test_name(func, num, param): return f"{func.__name__}_{num}_{param.args[0].description.replace(" ", "_")}" class TestConnection(unittest.TestCase): def setUp(self): crypto._fernet = None patcher = mock.patch('airflow.models.connection.mask_secret', autospec=True) self.mask_secret = patcher.start() self.addCleanup(patcher.stop) def tearDown(self): crypto._fernet = None @conf_vars({('core', 'fernet_key'): ''}) def test_connection_extra_no_encryption(self): """ Tests extras on a new connection without encryption. The fernet key is set to a non-base64-encoded string and the extra is stored without encryption. """ test_connection = Connection(extra='testextra') assert not test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' @conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()}) def test_connection_extra_with_encryption(self): """ Tests extras on a new connection with encryption. """ test_connection = Connection(extra='testextra') assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' def test_connection_extra_with_encryption_rotate_fernet_key(self): """ Tests rotating encrypted extras. """ key1 = Fernet.generate_key() key2 = Fernet.generate_key() with conf_vars({('core', 'fernet_key'): key1.decode()}): test_connection = Connection(extra='testextra') assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'testextra' # Test decrypt of old value with new key with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}): crypto._fernet = None assert test_connection.extra == 'testextra' # Test decrypt of new value with new key test_connection.rotate_fernet_key() assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'testextra' test_from_uri_params = [ UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra=None, ), description='without extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' 'extra1=a%20value&extra2=%2Fpath%2F', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': '/path/'}, ), description='with extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?__extra__=single+value', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra='single value', ), description='with extras single value', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=arbitrary+string+%2A%29%2A%24', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra='arbitrary string *)*$', ), description='with extra non-json', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=%5B%22list%22%2C+%22of%22%2C+%22values%22%5D', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson=['list', 'of', 'values'], ), description='with extras list', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D', # noqa: E501 test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'my_val': ['list', 'of', 'values'], 'extra': {'nested': {'json': 'val'}}}, ), description='with nested json', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': ''}, ), description='with empty extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?' 'extra1=a%20value&extra2=%2Fpath%2F', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': '/path/'}, ), description='with colon in hostname', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='user', password='password with space', port=1234, ), description='with encoded password', ), UriTestCaseConfig( test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='domain/user', password='password', port=1234, ), description='with encoded user', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest', test_conn_attributes=dict( conn_type='scheme', host='host', schema='schema/test', login='user', password='password with space', port=1234, ), description='with encoded schema', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host:1234', test_conn_attributes=dict( conn_type='scheme', host='host', schema='', login='user', password='password with space', port=1234, ), description='no schema', ), UriTestCaseConfig( test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_' 'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope=' 'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra' '__google_cloud_platform__project=airflow', test_conn_attributes=dict( conn_type='google_cloud_platform', host='', schema='', login=None, password=None, port=None, extra_dejson=dict( extra__google_cloud_platform__key_path='/keys/key.json', extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform', extra__google_cloud_platform__project='airflow', ), ), description='with underscore', ), UriTestCaseConfig( test_conn_uri='scheme://host:1234', test_conn_attributes=dict( conn_type='scheme', host='host', schema='', login=None, password=None, port=1234, ), description='without auth info', ), UriTestCaseConfig( test_conn_uri='scheme://%2FTmP%2F:1234', test_conn_attributes=dict( conn_type='scheme', host='/TmP/', schema='', login=None, password=None, port=1234, ), description='with path', ), UriTestCaseConfig( test_conn_uri='scheme:///airflow', test_conn_attributes=dict( conn_type='scheme', schema='airflow', ), description='schema only', ), UriTestCaseConfig( test_conn_uri='scheme://@:1234', test_conn_attributes=dict( conn_type='scheme', port=1234, ), description='port only', ), UriTestCaseConfig( test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@', test_conn_attributes=dict( conn_type='scheme', password='password/!@#$%^&*(){}', ), description='password only', ), UriTestCaseConfig( test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@', test_conn_attributes=dict( conn_type='scheme', login='login/!@#$%^&*(){}', ), description='login only', ), ] @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_from_uri(self, test_config: UriTestCaseConfig): connection = Connection(uri=test_config.test_uri) for conn_attr, expected_val in test_config.test_conn_attributes.items(): actual_val = getattr(connection, conn_attr) if expected_val is None: assert expected_val is None if isinstance(expected_val, dict): assert expected_val == actual_val else: assert expected_val == actual_val expected_calls = [] if test_config.test_conn_attributes.get('password'): expected_calls.append(mock.call(test_config.test_conn_attributes['password'])) if test_config.test_conn_attributes.get('extra_dejson'): expected_calls.append(mock.call(test_config.test_conn_attributes['extra_dejson'])) self.mask_secret.assert_has_calls(expected_calls) @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig): """ This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that when we create a conn_2 from the generated URI, we get an equivalent conn. 1. Parse URI to create `Connection` object, `connection`. 2. Using this connection, generate URI `generated_uri`.. 3. Using this`generated_uri`, parse and create new Connection `new_conn`. 4. Verify that `new_conn` has same attributes as `connection`. """ connection = Connection(uri=test_config.test_uri) generated_uri = connection.get_uri() new_conn = Connection(uri=generated_uri) assert connection.conn_type == new_conn.conn_type assert connection.login == new_conn.login assert connection.password == new_conn.password assert connection.host == new_conn.host assert connection.port == new_conn.port assert connection.schema == new_conn.schema assert connection.extra_dejson == new_conn.extra_dejson @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig): """ This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a URI, that when we create conn_2 from this URI, we get an equivalent conn. 1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs` 2. Instantiate conn `connection` from `conn_kwargs`. 3. Generate uri `get_uri` from this conn. 4. Create conn `new_conn` from this uri. 5. Verify `new_conn` has same attributes as `connection`. """ conn_kwargs = {} for k, v in test_config.test_conn_attributes.items(): if k == 'extra_dejson': conn_kwargs.update({'extra': json.dumps(v)}) else: conn_kwargs.update({k: v}) connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore gen_uri = connection.get_uri() new_conn = Connection(conn_id='test_conn', uri=gen_uri) for conn_attr, expected_val in test_config.test_conn_attributes.items(): actual_val = getattr(new_conn, conn_attr) if expected_val is None: assert actual_val is None else: assert actual_val == expected_val @parameterized.expand( [ ( "http://:password@host:80/database", ConnectionParts( conn_type="http", login='', password="password", host="host", port=80, schema="database" ), ), ( "http://user:@host:80/database", ConnectionParts( conn_type="http", login="user", password=None, host="host", port=80, schema="database" ), ), ( "http://user:password@/database", ConnectionParts( conn_type="http", login="user", password="password", host="", port=None, schema="database" ), ), ( "http://user:password@host:80/", ConnectionParts( conn_type="http", login="user", password="password", host="host", port=80, schema="" ), ), ( "http://user:password@/", ConnectionParts( conn_type="http", login="user", password="password", host="", port=None, schema="" ), ), ( "postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb", ConnectionParts( conn_type="postgres", login="user", password="password", host="/tmp/z6rqdzqh/example:west1:testdb", port=None, schema="testdb", ), ), ( "postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb", ConnectionParts( conn_type="postgres", login="user", password=None, host="/tmp/z6rqdzqh/example:europe-west1:testdb", port=None, schema="testdb", ), ), ( "postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb", ConnectionParts( conn_type="postgres", login=None, password=None, host="/tmp/z6rqdzqh/example:europe-west1:testdb", port=None, schema="", ), ), ] ) def test_connection_from_with_auth_info(self, uri, uri_parts): connection = Connection(uri=uri) assert connection.conn_type == uri_parts.conn_type assert connection.login == uri_parts.login assert connection.password == uri_parts.password assert connection.host == uri_parts.host assert connection.port == uri_parts.port assert connection.schema == uri_parts.schema @parameterized.expand( [ ('{"extra": null}', None), ('{"extra": "hi"}', 'hi'), ('{"extra": {"yo": "hi"}}', '{"yo": "hi"}'), ('{"extra": "{\\"yo\\": \\"hi\\"}"}', '{"yo": "hi"}'), ] ) def test_from_json_extra(self, extra, expected): """json serialization should support extra stored as object _or_ as string""" assert Connection.from_json(extra).extra == expected @parameterized.expand( [ ('{"conn_type": "abc-abc"}', 'abc_abc'), ('{"conn_type": "abc_abc"}', 'abc_abc'), ('{"conn_type": "postgresql"}', 'postgres'), ] ) def test_from_json_conn_type(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" assert Connection.from_json(val).conn_type == expected @parameterized.expand( [ ('{"port": 1}', 1), ('{"port": "1"}', 1), ('{"port": null}', None), ] ) def test_from_json_port(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" assert Connection.from_json(val).port == expected @parameterized.expand( [ ('pass :/!@#$%^&*(){}"', 'pass :/!@#$%^&*(){}"'), # these are the same (None, None), ('', None), # this is a consequence of the password getter ] ) def test_from_json_special_characters(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" json_val = json.dumps(dict(password=val)) assert Connection.from_json(json_val).password == expected @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', }, ) def test_using_env_var(self): conn = SqliteHook.get_connection(conn_id='test_uri') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert 'username' == conn.login assert 'password' == conn.password assert 5432 == conn.port self.mask_secret.assert_called_once_with('password') @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_using_unix_socket_env_var(self): conn = SqliteHook.get_connection(conn_id='test_uri_no_creds') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert conn.login is None assert conn.password is None assert conn.port is None def test_param_setup(self): conn = Connection( conn_id='local_mysql', conn_type='mysql', host='localhost', login='airflow', password='airflow', schema='airflow', ) assert 'localhost' == conn.host assert 'airflow' == conn.schema assert 'airflow' == conn.login assert 'airflow' == conn.password assert conn.port is None def test_env_var_priority(self): conn = SqliteHook.get_connection(conn_id='airflow_db') assert 'ec2.compute.com' != conn.host with mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_AIRFLOW_DB': 'postgresql://username:password@ec2.compute.com:5432/the_database', }, ): conn = SqliteHook.get_connection(conn_id='airflow_db') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert 'username' == conn.login assert 'password' == conn.password assert 5432 == conn.port @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_dbapi_get_uri(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() assert 'postgresql://username:password@ec2.compute.com:5432/the_database' == hook.get_uri() conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds') hook2 = conn2.get_hook() assert 'postgresql://ec2.compute.com/the_database' == hook2.get_uri() @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_dbapi_get_sqlalchemy_engine(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() engine = hook.get_sqlalchemy_engine() assert isinstance(engine, sqlalchemy.engine.Engine) assert 'postgresql://username:password@ec2.compute.com:5432/the_database' == str(engine.url) @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_get_connections_env_var(self): conns = SqliteHook.get_connection(conn_id='test_uri') assert conns.host == 'ec2.compute.com' assert conns.schema == 'the_database' assert conns.login == 'username' assert conns.password == 'password' assert conns.port == 5432 def test_connection_mixed(self): with pytest.raises( AirflowException, match=re.escape( "You must create an object using the URI or individual values (conn_type, host, login, " "password, schema, port or extra).You can't mix these two ways to create this object." ), ): Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA") def test_masking_from_db(self): """Test secrets are masked when loaded directly from the DB""" from airflow.settings import Session session = Session() try: conn = Connection( conn_id=f"test-{os.getpid()}", conn_type="http", password="s3cr3t", extra='{"apikey":"masked too"}', ) session.add(conn) session.flush() # Make sure we re-load it, not just get the cached object back session.expunge(conn) self.mask_secret.reset_mock() from_db = session.query(Connection).get(conn.id) from_db.extra_dejson assert self.mask_secret.mock_calls == [ # We should have called it _again_ when loading from the DB mock.call("s3cr3t"), mock.call({"apikey": "masked too"}), ] finally: session.rollback() @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'sqlite://', }, ) def test_connection_test_success(self): conn = Connection(conn_id='test_uri', conn_type='sqlite') res = conn.test_connection() assert res[0] is True assert res[1] == 'Connection successfully tested' @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_NO_HOOK': 'fs://', }, ) def test_connection_test_no_hook(self): conn = Connection(conn_id='test_uri_no_hook', conn_type='fs') res = conn.test_connection() assert res[0] is False assert res[1] == 'Unknown hook type "fs"' @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING': 'grpc://', }, ) def test_connection_test_hook_method_missing(self): conn = Connection(conn_id='test_uri_hook_method_missing', conn_type='grpc') res = conn.test_connection() assert res[0] is False assert res[1] == "Hook GrpcHook doesn't implement or inherit test_connection method" def test_extra_warnings_non_json(self): with pytest.warns(DeprecationWarning, match='non-JSON'): Connection(conn_id='test_extra', conn_type='none', extra='hi') def test_extra_warnings_non_dict_json(self): with pytest.warns(DeprecationWarning, match='not parse as a dictionary'): Connection(conn_id='test_extra', conn_type='none', extra='"hi"')
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import os import re import unittest from collections import namedtuple from unittest import mock import pytest import sqlalchemy from cryptography.fernet import Fernet from parameterized import parameterized from airflow import AirflowException from airflow.hooks.base import BaseHook from airflow.models import Connection, crypto from airflow.providers.sqlite.hooks.sqlite import SqliteHook from tests.test_utils.config import conf_vars ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"]) class UriTestCaseConfig: def __init__( self, test_conn_uri: str, test_conn_attributes: dict, description: str, ): """ :param test_conn_uri: URI that we use to create connection :param test_conn_attributes: we expect a connection object created with `test_uri` to have these attributes :param description: human-friendly name appended to parameterized test """ self.test_uri = test_conn_uri self.test_conn_attributes = test_conn_attributes self.description = description @staticmethod def uri_test_name(func, num, param): return f"{func.__name__}_{num}_{param.args[0].description.replace(' ', '_')}" class TestConnection(unittest.TestCase): def setUp(self): crypto._fernet = None patcher = mock.patch('airflow.models.connection.mask_secret', autospec=True) self.mask_secret = patcher.start() self.addCleanup(patcher.stop) def tearDown(self): crypto._fernet = None @conf_vars({('core', 'fernet_key'): ''}) def test_connection_extra_no_encryption(self): """ Tests extras on a new connection without encryption. The fernet key is set to a non-base64-encoded string and the extra is stored without encryption. """ test_connection = Connection(extra='testextra') assert not test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' @conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()}) def test_connection_extra_with_encryption(self): """ Tests extras on a new connection with encryption. """ test_connection = Connection(extra='testextra') assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' def test_connection_extra_with_encryption_rotate_fernet_key(self): """ Tests rotating encrypted extras. """ key1 = Fernet.generate_key() key2 = Fernet.generate_key() with conf_vars({('core', 'fernet_key'): key1.decode()}): test_connection = Connection(extra='testextra') assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'testextra' # Test decrypt of old value with new key with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}): crypto._fernet = None assert test_connection.extra == 'testextra' # Test decrypt of new value with new key test_connection.rotate_fernet_key() assert test_connection.is_extra_encrypted assert test_connection.extra == 'testextra' assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'testextra' test_from_uri_params = [ UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra=None, ), description='without extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' 'extra1=a%20value&extra2=%2Fpath%2F', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': '/path/'}, ), description='with extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?__extra__=single+value', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra='single value', ), description='with extras single value', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=arbitrary+string+%2A%29%2A%24', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra='arbitrary string *)*$', ), description='with extra non-json', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=%5B%22list%22%2C+%22of%22%2C+%22values%22%5D', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson=['list', 'of', 'values'], ), description='with extras list', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D', # noqa: E501 test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'my_val': ['list', 'of', 'values'], 'extra': {'nested': {'json': 'val'}}}, ), description='with nested json', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=', test_conn_attributes=dict( conn_type='scheme', host='host/location', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': ''}, ), description='with empty extras', ), UriTestCaseConfig( test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?' 'extra1=a%20value&extra2=%2Fpath%2F', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='user', password='password', port=1234, extra_dejson={'extra1': 'a value', 'extra2': '/path/'}, ), description='with colon in hostname', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='user', password='password with space', port=1234, ), description='with encoded password', ), UriTestCaseConfig( test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema', test_conn_attributes=dict( conn_type='scheme', host='host/location:x:y', schema='schema', login='domain/user', password='password', port=1234, ), description='with encoded user', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest', test_conn_attributes=dict( conn_type='scheme', host='host', schema='schema/test', login='user', password='password with space', port=1234, ), description='with encoded schema', ), UriTestCaseConfig( test_conn_uri='scheme://user:password%20with%20space@host:1234', test_conn_attributes=dict( conn_type='scheme', host='host', schema='', login='user', password='password with space', port=1234, ), description='no schema', ), UriTestCaseConfig( test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_' 'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope=' 'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra' '__google_cloud_platform__project=airflow', test_conn_attributes=dict( conn_type='google_cloud_platform', host='', schema='', login=None, password=None, port=None, extra_dejson=dict( extra__google_cloud_platform__key_path='/keys/key.json', extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform', extra__google_cloud_platform__project='airflow', ), ), description='with underscore', ), UriTestCaseConfig( test_conn_uri='scheme://host:1234', test_conn_attributes=dict( conn_type='scheme', host='host', schema='', login=None, password=None, port=1234, ), description='without auth info', ), UriTestCaseConfig( test_conn_uri='scheme://%2FTmP%2F:1234', test_conn_attributes=dict( conn_type='scheme', host='/TmP/', schema='', login=None, password=None, port=1234, ), description='with path', ), UriTestCaseConfig( test_conn_uri='scheme:///airflow', test_conn_attributes=dict( conn_type='scheme', schema='airflow', ), description='schema only', ), UriTestCaseConfig( test_conn_uri='scheme://@:1234', test_conn_attributes=dict( conn_type='scheme', port=1234, ), description='port only', ), UriTestCaseConfig( test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@', test_conn_attributes=dict( conn_type='scheme', password='password/!@#$%^&*(){}', ), description='password only', ), UriTestCaseConfig( test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@', test_conn_attributes=dict( conn_type='scheme', login='login/!@#$%^&*(){}', ), description='login only', ), ] @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_from_uri(self, test_config: UriTestCaseConfig): connection = Connection(uri=test_config.test_uri) for conn_attr, expected_val in test_config.test_conn_attributes.items(): actual_val = getattr(connection, conn_attr) if expected_val is None: assert expected_val is None if isinstance(expected_val, dict): assert expected_val == actual_val else: assert expected_val == actual_val expected_calls = [] if test_config.test_conn_attributes.get('password'): expected_calls.append(mock.call(test_config.test_conn_attributes['password'])) if test_config.test_conn_attributes.get('extra_dejson'): expected_calls.append(mock.call(test_config.test_conn_attributes['extra_dejson'])) self.mask_secret.assert_has_calls(expected_calls) @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig): """ This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that when we create a conn_2 from the generated URI, we get an equivalent conn. 1. Parse URI to create `Connection` object, `connection`. 2. Using this connection, generate URI `generated_uri`.. 3. Using this`generated_uri`, parse and create new Connection `new_conn`. 4. Verify that `new_conn` has same attributes as `connection`. """ connection = Connection(uri=test_config.test_uri) generated_uri = connection.get_uri() new_conn = Connection(uri=generated_uri) assert connection.conn_type == new_conn.conn_type assert connection.login == new_conn.login assert connection.password == new_conn.password assert connection.host == new_conn.host assert connection.port == new_conn.port assert connection.schema == new_conn.schema assert connection.extra_dejson == new_conn.extra_dejson @parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name) def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig): """ This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a URI, that when we create conn_2 from this URI, we get an equivalent conn. 1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs` 2. Instantiate conn `connection` from `conn_kwargs`. 3. Generate uri `get_uri` from this conn. 4. Create conn `new_conn` from this uri. 5. Verify `new_conn` has same attributes as `connection`. """ conn_kwargs = {} for k, v in test_config.test_conn_attributes.items(): if k == 'extra_dejson': conn_kwargs.update({'extra': json.dumps(v)}) else: conn_kwargs.update({k: v}) connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore gen_uri = connection.get_uri() new_conn = Connection(conn_id='test_conn', uri=gen_uri) for conn_attr, expected_val in test_config.test_conn_attributes.items(): actual_val = getattr(new_conn, conn_attr) if expected_val is None: assert actual_val is None else: assert actual_val == expected_val @parameterized.expand( [ ( "http://:password@host:80/database", ConnectionParts( conn_type="http", login='', password="password", host="host", port=80, schema="database" ), ), ( "http://user:@host:80/database", ConnectionParts( conn_type="http", login="user", password=None, host="host", port=80, schema="database" ), ), ( "http://user:password@/database", ConnectionParts( conn_type="http", login="user", password="password", host="", port=None, schema="database" ), ), ( "http://user:password@host:80/", ConnectionParts( conn_type="http", login="user", password="password", host="host", port=80, schema="" ), ), ( "http://user:password@/", ConnectionParts( conn_type="http", login="user", password="password", host="", port=None, schema="" ), ), ( "postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb", ConnectionParts( conn_type="postgres", login="user", password="password", host="/tmp/z6rqdzqh/example:west1:testdb", port=None, schema="testdb", ), ), ( "postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb", ConnectionParts( conn_type="postgres", login="user", password=None, host="/tmp/z6rqdzqh/example:europe-west1:testdb", port=None, schema="testdb", ), ), ( "postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb", ConnectionParts( conn_type="postgres", login=None, password=None, host="/tmp/z6rqdzqh/example:europe-west1:testdb", port=None, schema="", ), ), ] ) def test_connection_from_with_auth_info(self, uri, uri_parts): connection = Connection(uri=uri) assert connection.conn_type == uri_parts.conn_type assert connection.login == uri_parts.login assert connection.password == uri_parts.password assert connection.host == uri_parts.host assert connection.port == uri_parts.port assert connection.schema == uri_parts.schema @parameterized.expand( [ ('{"extra": null}', None), ('{"extra": "hi"}', 'hi'), ('{"extra": {"yo": "hi"}}', '{"yo": "hi"}'), ('{"extra": "{\\"yo\\": \\"hi\\"}"}', '{"yo": "hi"}'), ] ) def test_from_json_extra(self, extra, expected): """json serialization should support extra stored as object _or_ as string""" assert Connection.from_json(extra).extra == expected @parameterized.expand( [ ('{"conn_type": "abc-abc"}', 'abc_abc'), ('{"conn_type": "abc_abc"}', 'abc_abc'), ('{"conn_type": "postgresql"}', 'postgres'), ] ) def test_from_json_conn_type(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" assert Connection.from_json(val).conn_type == expected @parameterized.expand( [ ('{"port": 1}', 1), ('{"port": "1"}', 1), ('{"port": null}', None), ] ) def test_from_json_port(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" assert Connection.from_json(val).port == expected @parameterized.expand( [ ('pass :/!@#$%^&*(){}"', 'pass :/!@#$%^&*(){}"'), # these are the same (None, None), ('', None), # this is a consequence of the password getter ] ) def test_from_json_special_characters(self, val, expected): """two conn_type normalizations are applied: replace - with _ and postgresql with postgres""" json_val = json.dumps(dict(password=val)) assert Connection.from_json(json_val).password == expected @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', }, ) def test_using_env_var(self): conn = SqliteHook.get_connection(conn_id='test_uri') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert 'username' == conn.login assert 'password' == conn.password assert 5432 == conn.port self.mask_secret.assert_called_once_with('password') @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_using_unix_socket_env_var(self): conn = SqliteHook.get_connection(conn_id='test_uri_no_creds') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert conn.login is None assert conn.password is None assert conn.port is None def test_param_setup(self): conn = Connection( conn_id='local_mysql', conn_type='mysql', host='localhost', login='airflow', password='airflow', schema='airflow', ) assert 'localhost' == conn.host assert 'airflow' == conn.schema assert 'airflow' == conn.login assert 'airflow' == conn.password assert conn.port is None def test_env_var_priority(self): conn = SqliteHook.get_connection(conn_id='airflow_db') assert 'ec2.compute.com' != conn.host with mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_AIRFLOW_DB': 'postgresql://username:password@ec2.compute.com:5432/the_database', }, ): conn = SqliteHook.get_connection(conn_id='airflow_db') assert 'ec2.compute.com' == conn.host assert 'the_database' == conn.schema assert 'username' == conn.login assert 'password' == conn.password assert 5432 == conn.port @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_dbapi_get_uri(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() assert 'postgresql://username:password@ec2.compute.com:5432/the_database' == hook.get_uri() conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds') hook2 = conn2.get_hook() assert 'postgresql://ec2.compute.com/the_database' == hook2.get_uri() @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_dbapi_get_sqlalchemy_engine(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() engine = hook.get_sqlalchemy_engine() assert isinstance(engine, sqlalchemy.engine.Engine) assert 'postgresql://username:password@ec2.compute.com:5432/the_database' == str(engine.url) @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'postgresql://username:password@ec2.compute.com:5432/the_database', 'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database', }, ) def test_get_connections_env_var(self): conns = SqliteHook.get_connection(conn_id='test_uri') assert conns.host == 'ec2.compute.com' assert conns.schema == 'the_database' assert conns.login == 'username' assert conns.password == 'password' assert conns.port == 5432 def test_connection_mixed(self): with pytest.raises( AirflowException, match=re.escape( "You must create an object using the URI or individual values (conn_type, host, login, " "password, schema, port or extra).You can't mix these two ways to create this object." ), ): Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA") def test_masking_from_db(self): """Test secrets are masked when loaded directly from the DB""" from airflow.settings import Session session = Session() try: conn = Connection( conn_id=f"test-{os.getpid()}", conn_type="http", password="s3cr3t", extra='{"apikey":"masked too"}', ) session.add(conn) session.flush() # Make sure we re-load it, not just get the cached object back session.expunge(conn) self.mask_secret.reset_mock() from_db = session.query(Connection).get(conn.id) from_db.extra_dejson assert self.mask_secret.mock_calls == [ # We should have called it _again_ when loading from the DB mock.call("s3cr3t"), mock.call({"apikey": "masked too"}), ] finally: session.rollback() @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI': 'sqlite://', }, ) def test_connection_test_success(self): conn = Connection(conn_id='test_uri', conn_type='sqlite') res = conn.test_connection() assert res[0] is True assert res[1] == 'Connection successfully tested' @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_NO_HOOK': 'fs://', }, ) def test_connection_test_no_hook(self): conn = Connection(conn_id='test_uri_no_hook', conn_type='fs') res = conn.test_connection() assert res[0] is False assert res[1] == 'Unknown hook type "fs"' @mock.patch.dict( 'os.environ', { 'AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING': 'grpc://', }, ) def test_connection_test_hook_method_missing(self): conn = Connection(conn_id='test_uri_hook_method_missing', conn_type='grpc') res = conn.test_connection() assert res[0] is False assert res[1] == "Hook GrpcHook doesn't implement or inherit test_connection method" def test_extra_warnings_non_json(self): with pytest.warns(DeprecationWarning, match='non-JSON'): Connection(conn_id='test_extra', conn_type='none', extra='hi') def test_extra_warnings_non_dict_json(self): with pytest.warns(DeprecationWarning, match='not parse as a dictionary'): Connection(conn_id='test_extra', conn_type='none', extra='"hi"')
import dataclasses import datetime import json import logging import re from typing import Iterable import discord from bot.consts import Colors, DesignatedChannels, OwnerDesignatedChannels from bot.messaging.events import Events from bot.services.base_service import BaseService log = logging.getLogger(__name__) MESSAGE_BATCH_SIZE = 20 @dataclasses.dataclass() class MessageDto: id: int content: str guild: int author: int channel: int time: datetime.datetime @dataclasses.dataclass() class MessageEditDto: id: int content: str time: datetime.datetime class MessageHandlingService(BaseService): def __init__(self, *, bot): super().__init__(bot) self.message_batch = {} self.message_edit_batch = [] async def batch_send_message(self, message: discord.Message): """ Batch the messages to send them all at once to the api to avoid sending hundreds a second """ if len(self.message_batch) > MESSAGE_BATCH_SIZE: # Copy the list values and clear the batch list BEFORE # we send them. This way we can accept new messages while # the current batch is being sent batch_values = list(self.message_batch.values()) self.message_batch.clear() await self.bot.message_route.batch_create_message(batch_values, raise_on_error=False) self.message_batch[message.id] = MessageDto(message.id, message.content, message.guild.id, message.author.id, message.channel.id, datetime.datetime.utcnow()) async def batch_send_message_edit(self, id: int, content: str): """ Batch the message edits to send them all at once to the api to avoid sending hundreds a second """ if message := self.message_batch.get(id, None): self.message_batch[message.id].content = content return if len(self.message_edit_batch) > MESSAGE_BATCH_SIZE: # Copy the list and clear the batch edit list BEFORE # we send them. This way we can accept new message edits while # the current batch is being sent batch_values = list(self.message_edit_batch) self.message_edit_batch.clear() await self.bot.message_route.batch_edit_message(batch_values, raise_on_error=False) self.message_edit_batch.append(MessageEditDto(id, content, datetime.datetime.utcnow())) @BaseService.Listener(Events.on_guild_message_received) async def on_guild_message_received(self, message: discord.Message) -> None: log.info(f'Message from {message.author}: "{message.content}" Guild {message.guild.id}') # Check if the message is a discord message link and check if this server has # Enabled embed message links await self.handle_message_links(message) # Primary entry point for handling commands await self.bot.process_commands(message) if not message.content: log.warning('Invalid Message event received') return await self.batch_send_message(message) @BaseService.Listener(Events.on_dm_message_received) async def on_dm_message_received(self, message: discord.Message) -> None: embed = discord.Embed(title=f'Bot Direct Message', color=Colors.ClemsonOrange, description=f'{message.content}') embed.set_footer(text=message.author, icon_url=message.author.display_avatar.url) log.info(f'Message from {message.author}: "{message.content}" Guild Unknown (DM)') await self.messenger.publish(Events.on_broadcast_designated_channel, OwnerDesignatedChannels.bot_dm_log, embed) await message.author.send( 'Hello there, I dont currently support DM commands. Please run my commands in a server') # https://discordpy.readthedocs.io/en/latest/faq.html#how-do-i-send-a-dm @BaseService.Listener(Events.on_message_edit) async def on_message_edit(self, before: discord.Message, after: discord.Message): log.info(f'Message edited in #{before.channel.name} By: \ {self.get_full_name(before.author)} \nBefore: {before.content} \nAfter: {after.content}') await self.batch_send_message_edit(after.id, after.content) embed = discord.Embed(title=f':repeat: **Message Edited in #{before.channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name=f'Message Link', value=f'[Click Here]({after.jump_url})') before_chunk = self.split_string_chunks(before.content, 900) after_chunk = self.split_string_chunks(after.content, 900) for i, val in enumerate(before_chunk): embed.add_field(name='**Before**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'{self.get_full_name(before.author)}', icon_url=before.author.display_avatar.url) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, after.guild.id, embed) @BaseService.Listener(Events.on_raw_message_edit) async def on_raw_message_edit(self, payload): message = await self.bot.message_route.get_message(payload.message_id) channel = self.bot.get_channel(payload.channel_id) try: if message is not None: log.info(f'Uncached message edited in #{channel.name} By: \ {message["userId"]} \nBefore: {message["content"]} \nAfter: {payload.data["content"]}') await self.batch_send_message_edit(message['id'], payload.data['content']) embed = discord.Embed(title=f':repeat: **Uncached message edited in #{channel.name}**', color=Colors.ClemsonOrange) before_chunk = self.split_string_chunks(message['content'], 900) after_chunk = self.split_string_chunks(payload.data['content'], 900) for i, val in enumerate(before_chunk): embed.add_field(name='**Before**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'Author id: {payload.data['author']['id']}') await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.data['guild_id']), embed) else: try: log.info(f'Uncached message edited in #{channel.name} By: \ {payload.data["author"]["id"]} \nBefore: Unknown Content \nAfter: {payload.data["Content"]}') except KeyError: log.error(json.dumps(payload.data)) embed = discord.Embed(title=f':repeat: **Uncached message edited in #{channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name='Before', value='Unknown, message not stored in the database', inline=False) after_chunk = self.split_string_chunks(payload.data['content'], 900) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'Author id: {payload.data['author']['id']}') await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.data['guild_id']), embed) except KeyError as e: log.error(f'raw_message_edit Error: {e} \n') @BaseService.Listener(Events.on_message_delete) async def on_message_delete(self, message: discord.Message): log.info(f'Uncached message deleted in #{message.channel.name} by \ {self.get_full_name(message.author)}: {message.content}') embed = discord.Embed(title=f':wastebasket: **Message Deleted in #{message.channel.name}**', color=Colors.ClemsonOrange) message_chunk = self.split_string_chunks(message.content, 900) for i, val in enumerate(message_chunk): embed.add_field(name='**Message**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'{self.get_full_name(message.author)}', icon_url=message.author.display_avatar.url) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, message.guild.id, embed) @BaseService.Listener(Events.on_raw_message_delete) async def on_raw_message_delete(self, payload): message = await self.bot.message_route.get_message(payload.message_id) channel = self.bot.get_channel(payload.channel_id) log.info(f'Uncached message deleted id:{payload.message_id} in #{channel.name}') if message is not None: embed = discord.Embed(title=f':wastebasket: **Uncached message deleted in #{channel.name}**', color=Colors.ClemsonOrange) message_chunk = self.split_string_chunks(message['content'], 900) for i, val in enumerate(message_chunk): embed.add_field(name='**Message**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) else: embed = discord.Embed(title=f':wastebasket: **Uncached message deleted in #{channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name='Message', value='Unknown, message not in the database', inline=False) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.guild_id), embed) async def handle_message_links(self, message: discord.Message) -> None: """ Searches all incoming messages for a discord message link and replies to the context with that message Args: message (discord.Message): the original message containing the link """ pattern = r'^http(s)?:\/\/(www.)?discord(app)?.com\/channels\/(?P<guild_id>\d{18})\/(?P<channel_id>\d{18})\/(?P<message_id>\d{18})\n*$' # noqa: E501 result = re.search(pattern, message.content) if not result: return if not await self.bot.guild_route.get_can_embed_link(message.guild.id): return matches = result.groupdict() avi = message.author.display_avatar.url source_channel = message.channel link_channel = await self.bot.fetch_channel(matches['channel_id']) link_message = await link_channel.fetch_message(matches['message_id']) if len(link_message.embeds) > 0: embed = link_message.embeds[0] full_name = f'{self.get_full_name(message.author)}' embed.add_field(name=f'Quoted by:', value=f'{full_name} from [Click Me]({link_message.jump_url})') await message.delete() await source_channel.send(embed=embed) return embed = discord.Embed(title=f'Message linked from #{link_channel.name}', color=Colors.ClemsonOrange) embed.set_author(name=f'Quoted by: {self.get_full_name(message.author)}', icon_url=avi) if link_message.content: embed.add_field(name='Content', value=link_message.content, inline=False) image = None if link_message.attachments: att = link_message.attachments[0] image = att.proxy_url embed.add_field(name="Attachments", value=f"[{att.filename}]({att.url})", inline=False) if image: embed.set_image(url=image) embed.add_field(name='Author', value=f'{self.get_full_name(link_message.author)}', inline=True) embed.add_field(name='Message Link', value=f'[Click Me]({link_message.jump_url})', inline=True) await source_channel.send(embed=embed) await message.delete() def get_full_name(self, author) -> str: return f'{author.name}#{author.discriminator}' def split_string_chunks(self, string: str, n: int) -> Iterable[str]: return (string[i: i + n] for i in range(0, len(string), n)) async def load_service(self): pass
import dataclasses import datetime import json import logging import re from typing import Iterable import discord from bot.consts import Colors, DesignatedChannels, OwnerDesignatedChannels from bot.messaging.events import Events from bot.services.base_service import BaseService log = logging.getLogger(__name__) MESSAGE_BATCH_SIZE = 20 @dataclasses.dataclass() class MessageDto: id: int content: str guild: int author: int channel: int time: datetime.datetime @dataclasses.dataclass() class MessageEditDto: id: int content: str time: datetime.datetime class MessageHandlingService(BaseService): def __init__(self, *, bot): super().__init__(bot) self.message_batch = {} self.message_edit_batch = [] async def batch_send_message(self, message: discord.Message): """ Batch the messages to send them all at once to the api to avoid sending hundreds a second """ if len(self.message_batch) > MESSAGE_BATCH_SIZE: # Copy the list values and clear the batch list BEFORE # we send them. This way we can accept new messages while # the current batch is being sent batch_values = list(self.message_batch.values()) self.message_batch.clear() await self.bot.message_route.batch_create_message(batch_values, raise_on_error=False) self.message_batch[message.id] = MessageDto(message.id, message.content, message.guild.id, message.author.id, message.channel.id, datetime.datetime.utcnow()) async def batch_send_message_edit(self, id: int, content: str): """ Batch the message edits to send them all at once to the api to avoid sending hundreds a second """ if message := self.message_batch.get(id, None): self.message_batch[message.id].content = content return if len(self.message_edit_batch) > MESSAGE_BATCH_SIZE: # Copy the list and clear the batch edit list BEFORE # we send them. This way we can accept new message edits while # the current batch is being sent batch_values = list(self.message_edit_batch) self.message_edit_batch.clear() await self.bot.message_route.batch_edit_message(batch_values, raise_on_error=False) self.message_edit_batch.append(MessageEditDto(id, content, datetime.datetime.utcnow())) @BaseService.Listener(Events.on_guild_message_received) async def on_guild_message_received(self, message: discord.Message) -> None: log.info(f'Message from {message.author}: "{message.content}" Guild {message.guild.id}') # Check if the message is a discord message link and check if this server has # Enabled embed message links await self.handle_message_links(message) # Primary entry point for handling commands await self.bot.process_commands(message) if not message.content: log.warning('Invalid Message event received') return await self.batch_send_message(message) @BaseService.Listener(Events.on_dm_message_received) async def on_dm_message_received(self, message: discord.Message) -> None: embed = discord.Embed(title=f'Bot Direct Message', color=Colors.ClemsonOrange, description=f'{message.content}') embed.set_footer(text=message.author, icon_url=message.author.display_avatar.url) log.info(f'Message from {message.author}: "{message.content}" Guild Unknown (DM)') await self.messenger.publish(Events.on_broadcast_designated_channel, OwnerDesignatedChannels.bot_dm_log, embed) await message.author.send( 'Hello there, I dont currently support DM commands. Please run my commands in a server') # https://discordpy.readthedocs.io/en/latest/faq.html#how-do-i-send-a-dm @BaseService.Listener(Events.on_message_edit) async def on_message_edit(self, before: discord.Message, after: discord.Message): log.info(f'Message edited in #{before.channel.name} By: \ {self.get_full_name(before.author)} \nBefore: {before.content} \nAfter: {after.content}') await self.batch_send_message_edit(after.id, after.content) embed = discord.Embed(title=f':repeat: **Message Edited in #{before.channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name=f'Message Link', value=f'[Click Here]({after.jump_url})') before_chunk = self.split_string_chunks(before.content, 900) after_chunk = self.split_string_chunks(after.content, 900) for i, val in enumerate(before_chunk): embed.add_field(name='**Before**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'{self.get_full_name(before.author)}', icon_url=before.author.display_avatar.url) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, after.guild.id, embed) @BaseService.Listener(Events.on_raw_message_edit) async def on_raw_message_edit(self, payload): message = await self.bot.message_route.get_message(payload.message_id) channel = self.bot.get_channel(payload.channel_id) try: if message is not None: log.info(f'Uncached message edited in #{channel.name} By: \ {message["userId"]} \nBefore: {message["content"]} \nAfter: {payload.data["content"]}') await self.batch_send_message_edit(message['id'], payload.data['content']) embed = discord.Embed(title=f':repeat: **Uncached message edited in #{channel.name}**', color=Colors.ClemsonOrange) before_chunk = self.split_string_chunks(message['content'], 900) after_chunk = self.split_string_chunks(payload.data['content'], 900) for i, val in enumerate(before_chunk): embed.add_field(name='**Before**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'Author id: {payload.data["author"]["id"]}') await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.data['guild_id']), embed) else: try: log.info(f'Uncached message edited in #{channel.name} By: \ {payload.data["author"]["id"]} \nBefore: Unknown Content \nAfter: {payload.data["Content"]}') except KeyError: log.error(json.dumps(payload.data)) embed = discord.Embed(title=f':repeat: **Uncached message edited in #{channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name='Before', value='Unknown, message not stored in the database', inline=False) after_chunk = self.split_string_chunks(payload.data['content'], 900) for i, val in enumerate(after_chunk): embed.add_field(name='**After**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'Author id: {payload.data["author"]["id"]}') await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.data['guild_id']), embed) except KeyError as e: log.error(f'raw_message_edit Error: {e} \n') @BaseService.Listener(Events.on_message_delete) async def on_message_delete(self, message: discord.Message): log.info(f'Uncached message deleted in #{message.channel.name} by \ {self.get_full_name(message.author)}: {message.content}') embed = discord.Embed(title=f':wastebasket: **Message Deleted in #{message.channel.name}**', color=Colors.ClemsonOrange) message_chunk = self.split_string_chunks(message.content, 900) for i, val in enumerate(message_chunk): embed.add_field(name='**Message**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) embed.set_footer(text=f'{self.get_full_name(message.author)}', icon_url=message.author.display_avatar.url) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, message.guild.id, embed) @BaseService.Listener(Events.on_raw_message_delete) async def on_raw_message_delete(self, payload): message = await self.bot.message_route.get_message(payload.message_id) channel = self.bot.get_channel(payload.channel_id) log.info(f'Uncached message deleted id:{payload.message_id} in #{channel.name}') if message is not None: embed = discord.Embed(title=f':wastebasket: **Uncached message deleted in #{channel.name}**', color=Colors.ClemsonOrange) message_chunk = self.split_string_chunks(message['content'], 900) for i, val in enumerate(message_chunk): embed.add_field(name='**Message**' if i == 0 else 'Cont...', value=f'```{val}```', inline=False) else: embed = discord.Embed(title=f':wastebasket: **Uncached message deleted in #{channel.name}**', color=Colors.ClemsonOrange) embed.add_field(name='Message', value='Unknown, message not in the database', inline=False) await self.bot.messenger.publish(Events.on_send_in_designated_channel, DesignatedChannels.message_log, int(payload.guild_id), embed) async def handle_message_links(self, message: discord.Message) -> None: """ Searches all incoming messages for a discord message link and replies to the context with that message Args: message (discord.Message): the original message containing the link """ pattern = r'^http(s)?:\/\/(www.)?discord(app)?.com\/channels\/(?P<guild_id>\d{18})\/(?P<channel_id>\d{18})\/(?P<message_id>\d{18})\n*$' # noqa: E501 result = re.search(pattern, message.content) if not result: return if not await self.bot.guild_route.get_can_embed_link(message.guild.id): return matches = result.groupdict() avi = message.author.display_avatar.url source_channel = message.channel link_channel = await self.bot.fetch_channel(matches['channel_id']) link_message = await link_channel.fetch_message(matches['message_id']) if len(link_message.embeds) > 0: embed = link_message.embeds[0] full_name = f'{self.get_full_name(message.author)}' embed.add_field(name=f'Quoted by:', value=f'{full_name} from [Click Me]({link_message.jump_url})') await message.delete() await source_channel.send(embed=embed) return embed = discord.Embed(title=f'Message linked from #{link_channel.name}', color=Colors.ClemsonOrange) embed.set_author(name=f'Quoted by: {self.get_full_name(message.author)}', icon_url=avi) if link_message.content: embed.add_field(name='Content', value=link_message.content, inline=False) image = None if link_message.attachments: att = link_message.attachments[0] image = att.proxy_url embed.add_field(name="Attachments", value=f"[{att.filename}]({att.url})", inline=False) if image: embed.set_image(url=image) embed.add_field(name='Author', value=f'{self.get_full_name(link_message.author)}', inline=True) embed.add_field(name='Message Link', value=f'[Click Me]({link_message.jump_url})', inline=True) await source_channel.send(embed=embed) await message.delete() def get_full_name(self, author) -> str: return f'{author.name}#{author.discriminator}' def split_string_chunks(self, string: str, n: int) -> Iterable[str]: return (string[i: i + n] for i in range(0, len(string), n)) async def load_service(self): pass
import contextlib import ipaddress import json import os import random import re import time import warnings from collections import Counter from typing import Any, Dict, List, Optional, Set, Union import requests import test_infra.utils.waiting import waiting import yaml from assisted_service_client import models from assisted_service_client.models.operator_type import OperatorType from junit_report import JunitTestCase from netaddr import IPAddress, IPNetwork from test_infra import consts, utils from test_infra.assisted_service_api import InventoryClient from test_infra.controllers.load_balancer_controller import LoadBalancerController from test_infra.controllers.node_controllers import Node from test_infra.helper_classes.cluster_host import ClusterHost from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig from test_infra.helper_classes.entity import Entity from test_infra.helper_classes.events_handler import EventsHandler from test_infra.helper_classes.infra_env import InfraEnv from test_infra.helper_classes.nodes import Nodes from test_infra.tools import static_network, terraform_utils from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils from test_infra.utils.entity_name import ClusterName class Cluster(Entity): MINIMUM_NODES_TO_WAIT = 1 EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto _config: BaseClusterConfig def __init__( self, api_client: InventoryClient, config: BaseClusterConfig, infra_env_config: BaseInfraEnvConfig, nodes: Optional[Nodes] = None, ): super().__init__(api_client, config, nodes) self._infra_env_config = infra_env_config self._infra_env = None # Update infraEnv configurations self._infra_env_config.cluster_id = config.cluster_id self._infra_env_config.openshift_version = self._config.openshift_version self._infra_env_config.pull_secret = self._config.pull_secret self._high_availability_mode = config.high_availability_mode self.name = config.cluster_name.get() @property def kubeconfig_path(self): return self._config.kubeconfig_path @property def iso_download_path(self): return self._config.iso_download_path @property def enable_image_download(self): return self._config.download_image def _update_day2_config(self, api_client: InventoryClient, cluster_id: str): day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id) self.update_config( **dict( openshift_version=day2_cluster.openshift_version, cluster_name=ClusterName(day2_cluster.name), additional_ntp_source=day2_cluster.additional_ntp_source, user_managed_networking=day2_cluster.user_managed_networking, high_availability_mode=day2_cluster.high_availability_mode, olm_operators=day2_cluster.monitored_operators, base_dns_domain=day2_cluster.base_dns_domain, vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation, ) ) def _create(self) -> str: disk_encryption = models.DiskEncryption( enable_on=self._config.disk_encryption_roles, mode=self._config.disk_encryption_mode, ) if self._config.cluster_id: log.info(f"Fetching day2 cluster with id {self._config.cluster_id}") self._update_day2_config(self.api_client, self._config.cluster_id) return self._config.cluster_id cluster = self.api_client.create_cluster( self._config.cluster_name.get(), ssh_public_key=self._config.ssh_public_key, openshift_version=self._config.openshift_version, pull_secret=self._config.pull_secret, base_dns_domain=self._config.base_dns_domain, vip_dhcp_allocation=self._config.vip_dhcp_allocation, additional_ntp_source=self._config.additional_ntp_source, user_managed_networking=self._config.user_managed_networking, high_availability_mode=self._config.high_availability_mode, olm_operators=[{"name": name} for name in self._config.olm_operators], network_type=self._config.network_type, disk_encryption=disk_encryption, ) self._config.cluster_id = cluster.id return cluster.id def delete(self): self.api_client.delete_cluster(self.id) def get_details(self): return self.api_client.cluster_get(self.id) def get_cluster_name(self): return self.get_details().name def get_hosts(self): return self.api_client.get_cluster_hosts(self.id) def get_host_ids(self): return [host["id"] for host in self.get_hosts()] def get_host_ids_names_mapping(self): return {host["id"]: host["requested_hostname"] for host in self.get_hosts()} def get_host_assigned_roles(self): hosts = self.get_hosts() return {h["id"]: h["role"] for h in hosts} def get_operators(self): return self.api_client.get_cluster_operators(self.id) # TODO remove in favor of generate_infra_env def generate_image(self): warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning) self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key) def generate_infra_env( self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None ) -> InfraEnv: self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type self._infra_env_config.static_network_config = static_network_config self._infra_env_config.ignition_config_override = ignition_info self._infra_env_config.proxy = proxy or self._config.proxy infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config) self._infra_env = infra_env return infra_env def update_infra_env_proxy(self, proxy: models.Proxy) -> None: self._infra_env_config.proxy = proxy self._infra_env.update_proxy(proxy=proxy) def download_infra_env_image(self, iso_download_path=None) -> Path: iso_download_path = iso_download_path or self._config.iso_download_path return self._infra_env.download_image(iso_download_path=iso_download_path) @JunitTestCase() def generate_and_download_infra_env( self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None, ) -> Path: if self._config.is_static_ip and static_network_config is None: static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder) self.generate_infra_env( static_network_config=static_network_config, iso_image_type=iso_image_type, ssh_key=ssh_key, ignition_info=ignition_info, proxy=proxy, ) return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path) @JunitTestCase() def generate_and_download_image( self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None ): warnings.warn( "generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.", DeprecationWarning, ) iso_download_path = iso_download_path or self._config.iso_download_path # ensure file path exists before downloading if not os.path.exists(iso_download_path): utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False) self.api_client.generate_and_download_image( cluster_id=self.id, ssh_key=ssh_key or self._config.ssh_public_key, image_path=iso_download_path, image_type=iso_image_type or self._config.iso_image_type, static_network_config=static_network_config, ) def wait_until_hosts_are_disconnected(self, nodes_count: int = None): statuses = [consts.NodesStatus.DISCONNECTED] test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, nodes_count=nodes_count or self.nodes.nodes_count, statuses=statuses, timeout=consts.DISCONNECTED_TIMEOUT, ) @JunitTestCase() def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None): statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN] if allow_insufficient: statuses.append(consts.NodesStatus.INSUFFICIENT) test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, nodes_count=nodes_count or self.nodes.nodes_count, statuses=statuses, timeout=consts.NODES_REGISTERED_TIMEOUT, ) def _get_matching_hosts(self, host_type, count): hosts = self.get_hosts() return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count] def set_cluster_name(self, cluster_name: str): log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}") self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None)) self.api_client.update_cluster(self.id, {"name": cluster_name}) def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None: self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths) def set_ocs(self, properties=None): self.set_olm_operator(consts.OperatorType.OCS, properties=properties) def set_cnv(self, properties=None): self.set_olm_operator(consts.OperatorType.CNV, properties=properties) def unset_ocs(self): self.unset_olm_operator(consts.OperatorType.OCS) def unset_cnv(self): self.unset_olm_operator(consts.OperatorType.CNV) def unset_olm_operator(self, operator_name): log.info(f"Unsetting {operator_name} for cluster: {self.id}") cluster = self.api_client.cluster_get(self.id) olm_operators = [] for operator in cluster.monitored_operators: if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN: continue olm_operators.append({"name": operator.name, "properties": operator.properties}) self.api_client.update_cluster(self.id, {"olm_operators": olm_operators}) def set_olm_operator(self, operator_name, properties=None): log.info(f"Setting {operator_name} for cluster: {self.id}") cluster = self.api_client.cluster_get(self.id) if operator_name in [o.name for o in cluster.monitored_operators]: return olm_operators = [] for operator in cluster.monitored_operators: if operator.operator_type == OperatorType.BUILTIN: continue olm_operators.append({"name": operator.name, "properties": operator.properties}) olm_operators.append({"name": operator_name, "properties": properties}) self._config.olm_operators = olm_operators self.api_client.update_cluster(self.id, {"olm_operators": olm_operators}) def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None): if requested_roles is None: requested_roles = Counter( master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count ) assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"]) assigned_roles.extend( self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"]) ) for role in assigned_roles: self._infra_env.update_host(host_id=role["id"], host_role=role["role"]) return assigned_roles def set_specific_host_role(self, host, role): self._infra_env.update_host(host_id=host["id"], host_role=role) def set_network_params(self, controller=None): # Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests if self._config.platform == consts.Platforms.NONE: log.info("On None platform, leaving network management to the user") api_vip = ingress_vip = machine_networks = None elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE: log.info("Letting access VIPs be deducted from machine networks") api_vip = ingress_vip = None machine_networks = self.get_machine_networks() else: log.info("Assigning VIPs statically") access_vips = controller.get_ingress_and_api_vips() api_vip = access_vips["api_vip"] ingress_vip = access_vips["ingress_vip"] machine_networks = None if self._config.is_ipv4 and self._config.is_ipv6: machine_networks = controller.get_all_machine_addresses() self.set_advanced_networking( vip_dhcp_allocation=self._config.vip_dhcp_allocation, cluster_networks=self._config.cluster_networks, service_networks=self._config.service_networks, machine_networks=machine_networks, api_vip=api_vip, ingress_vip=ingress_vip, ) def get_primary_machine_cidr(self): cidr = self.nodes.controller.get_primary_machine_cidr() if not cidr: # Support controllers which the machine cidr is not configurable. taking it from the AI instead matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details())) if not matching_cidrs: raise RuntimeError("No matching cidr for DHCP") cidr = next(iter(matching_cidrs)) return cidr def get_machine_networks(self): networks = [] primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr() if primary_machine_cidr: networks.append(primary_machine_cidr) secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr() if secondary_machine_cidr: networks.append(secondary_machine_cidr) if not networks: # Support controllers which the machine cidr is not configurable. taking it from the AI instead networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details())) if not networks: raise RuntimeError("No matching cidr for DHCP") return networks def set_ingress_and_api_vips(self, vips): log.info(f"Setting API VIP:{vips["api_vip"]} and ingress VIP:{vips["ingress_vip"]} for cluster: {self.id}") self.api_client.update_cluster(self.id, vips) def set_ssh_key(self, ssh_key: str): log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}") self.update_config(ssh_public_key=ssh_key) self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key}) def set_base_dns_domain(self, base_dns_domain: str): log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}") self.update_config(base_dns_domain=base_dns_domain) self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain}) def set_advanced_networking( self, vip_dhcp_allocation: Optional[bool] = None, cluster_networks: Optional[List[models.ClusterNetwork]] = None, service_networks: Optional[List[models.ServiceNetwork]] = None, machine_networks: Optional[List[models.MachineNetwork]] = None, api_vip: Optional[str] = None, ingress_vip: Optional[str] = None, ): if machine_networks is None: machine_networks = self._config.machine_networks else: machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks] if vip_dhcp_allocation is None: vip_dhcp_allocation = self._config.vip_dhcp_allocation advanced_networking = { "vip_dhcp_allocation": vip_dhcp_allocation, "cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks, "service_networks": service_networks if service_networks is not None else self._config.service_networks, "machine_networks": machine_networks, "api_vip": api_vip if api_vip is not None else self._config.api_vip, "ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip, } log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}") self.update_config(**advanced_networking) self.api_client.update_cluster(self.id, advanced_networking) def set_pull_secret(self, pull_secret: str): log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}") self.update_config(pull_secret=pull_secret) self.api_client.update_cluster(self.id, {"pull_secret": pull_secret}) def set_host_name(self, host_id, requested_name): log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}") self._infra_env.update_host(host_id=host_id, host_name=requested_name) def set_additional_ntp_source(self, ntp_source: List[str]): log.info(f"Setting Additional NTP source:{ntp_source}") if isinstance(ntp_source, List): ntp_source_string = ",".join(ntp_source) elif isinstance(ntp_source, str): ntp_source_string = ntp_source else: raise TypeError( f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}" ) self.update_config(additional_ntp_source=ntp_source_string) self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string}) def patch_discovery_ignition(self, ignition): self._infra_env.patch_discovery_ignition(ignition_info=ignition) def set_proxy_values(self, proxy_values: models.Proxy) -> None: log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}") self.update_config(proxy=proxy_values) self.api_client.set_cluster_proxy( self.id, http_proxy=self._config.proxy.http_proxy, https_proxy=self._config.proxy.https_proxy, no_proxy=self._config.proxy.no_proxy, ) @JunitTestCase() def start_install(self): self.api_client.install_cluster(cluster_id=self.id) def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False): logs_utils.wait_for_logs_complete( client=self.api_client, cluster_id=self.id, timeout=timeout, interval=interval, check_host_logs_only=check_host_logs_only, ) def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS], nodes_count=nodes_count, timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT, ) def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING], nodes_count=nodes_count, ) def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=statuses, nodes_count=nodes_count, fall_on_error_status=fall_on_error_status, ) def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_specific_host_is_in_status( client=self.api_client, cluster_id=self.id, host_name=host.get("requested_hostname"), statuses=statuses, nodes_count=nodes_count, ) def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True): index = consts.all_host_stages.index(stage) test_infra.utils.waiting.wait_till_specific_host_is_in_stage( client=self.api_client, cluster_id=self.id, host_name=host.get("requested_hostname"), stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :], ) def wait_for_cluster_in_error_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR], timeout=consts.ERROR_TIMEOUT, ) def wait_for_pending_for_input_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.PENDING_FOR_INPUT], timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.REBOOTING], nodes_count=nodes_count, ) def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None): num_masters = num_masters if num_masters is not None else self.nodes.masters_count test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.CONFIGURING], nodes_count=num_masters - 1, ) def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None): num_masters = num_masters if num_masters is not None else self.nodes.masters_count test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.JOINED], nodes_count=num_masters - 1, ) def wait_for_hosts_stage(self, stage: str, inclusive: bool = True): index = consts.all_host_stages.index(stage) test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :], nodes_count=self.nodes.nodes_count, ) @JunitTestCase() def start_install_and_wait_for_installed( self, wait_for_hosts=True, wait_for_operators=True, wait_for_cluster_install=True, download_kubeconfig=True, ): self.start_install() if wait_for_hosts: self.wait_for_hosts_to_install() if wait_for_operators: self.wait_for_operators_to_finish() if wait_for_cluster_install: self.wait_for_install() if download_kubeconfig: self.download_kubeconfig() def disable_worker_hosts(self): hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER) for host in hosts: self.disable_host(host) def disable_host(self, host): host_name = host["requested_hostname"] log.info(f"Going to disable host: {host_name} in cluster: {self.id}") self._infra_env.unbind_host(host_id=host["id"]) def enable_host(self, host): host_name = host["requested_hostname"] log.info(f"Going to enable host: {host_name} in cluster: {self.id}") self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id) def delete_host(self, host): host_id = host["id"] log.info(f"Going to delete host: {host_id} in cluster: {self.id}") self._infra_env.delete_host(host_id=host_id) def cancel_install(self): self.api_client.cancel_cluster_install(cluster_id=self.id) def get_bootstrap_hostname(self): hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER) for host in hosts: if host.get("bootstrap"): log.info("Bootstrap node is: %s", host["requested_hostname"]) return host["requested_hostname"] def get_hosts_by_role(self, role, hosts=None): hosts = hosts or self.api_client.get_cluster_hosts(self.id) nodes_by_role = [] for host in hosts: if host["role"] == role: nodes_by_role.append(host) log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}") return nodes_by_role def get_random_host_by_role(self, role): return random.choice(self.get_hosts_by_role(role)) def get_reboot_required_hosts(self): return self.api_client.get_hosts_in_statuses( cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION] ) def reboot_required_nodes_into_iso_after_reset(self): hosts_to_reboot = self.get_reboot_required_hosts() self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset") def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER, fall_on_error_status=fall_on_error_status, timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.REBOOT_TIMEOUT, nodes_count=nodes_count, fall_on_error_status=fall_on_error_status, timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_hosts_to_be_in_wrong_boot_order( self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True ): test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER, nodes_count=nodes_count, timeout=timeout, fall_on_error_status=fall_on_error_status, ) def wait_for_ready_to_install(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.READY], timeout=consts.READY_TIMEOUT, ) # This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure time.sleep(10) utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.READY], timeout=consts.READY_TIMEOUT, ) def is_in_cancelled_status(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED] ) def is_in_error(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR] ) def is_finalizing(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING] ) def is_installing(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING] ) def reset_install(self): self.api_client.reset_cluster_install(cluster_id=self.id) def is_in_insufficient_status(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT] ) def wait_for_hosts_to_install( self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None ): test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLED], nodes_count=nodes_count or self.nodes.nodes_count, timeout=timeout, fall_on_error_status=fall_on_error_status, ) def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True): operators = self.get_operators() if fall_on_error_status: statuses = [consts.OperatorStatus.AVAILABLE] else: statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED] operators_utils.wait_till_all_operators_are_in_status( client=self.api_client, cluster_id=self.id, operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)), operator_types=[OperatorType.BUILTIN], statuses=statuses, timeout=timeout, fall_on_error_status=False, ) operators_utils.wait_till_all_operators_are_in_status( client=self.api_client, cluster_id=self.id, operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)), operator_types=[OperatorType.OLM], statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED], timeout=timeout, fall_on_error_status=fall_on_error_status, ) def is_operator_in_status(self, operator_name, status): return operators_utils.is_operator_in_status( operators=self.get_operators(), operator_name=operator_name, status=status ) def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLED], timeout=timeout, ) def _set_hostnames_and_roles(self): cluster_id = self.id hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id)) nodes = self.nodes.get_nodes(refresh=True) for host in hosts: if host.has_hostname(): continue name = self.find_matching_node_name(host, nodes) assert name is not None, ( f"Failed to find matching node for host with mac address {host.macs()}" f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}" ) if self.nodes.nodes_count == 1: role = None else: role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name) def _ha_not_none(self): return ( self._high_availability_mode != consts.HighAvailabilityMode.NONE and self._config.platform != consts.Platforms.NONE ) def download_image(self, iso_download_path: str = None) -> Path: if self._infra_env is None: log.warning("No infra_env found. Generating infra_env and downloading ISO") return self.generate_and_download_infra_env( iso_download_path=iso_download_path or self._config.iso_download_path, iso_image_type=self._config.iso_image_type, ) return self._infra_env.download_image(iso_download_path) @JunitTestCase() def prepare_for_installation(self, **kwargs): super(Cluster, self).prepare_for_installation(**kwargs) self.nodes.wait_for_networking() self._set_hostnames_and_roles() if self._high_availability_mode != consts.HighAvailabilityMode.NONE: self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers())) self.set_network_params(controller=self.nodes.controller) # in case of None platform we need to specify dns records before hosts are ready if self._config.platform == consts.Platforms.NONE: self._configure_load_balancer() self.nodes.controller.set_dns_for_user_managed_network() elif self._high_availability_mode == consts.HighAvailabilityMode.NONE: main_cidr = self.get_primary_machine_cidr() ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr) self.nodes.controller.set_single_node_ip(ip) self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip) self.wait_for_ready_to_install() # in case of regular cluster, need to set dns after vips exits # in our case when nodes are ready, vips will be there for sure if self._ha_not_none(): vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id) self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"]) def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None): self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path) def download_kubeconfig(self, kubeconfig_path: str = None): self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path) def download_installation_logs(self, cluster_tar_path): self.api_client.download_cluster_logs(self.id, cluster_tar_path) def get_install_config(self): return yaml.safe_load(self.api_client.get_cluster_install_config(self.id)) def get_admin_credentials(self): return self.api_client.get_cluster_admin_credentials(self.id) def register_dummy_host(self): dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71" self.api_client.register_host(self.id, dummy_host_id) def host_get_next_step(self, host_id): return self.api_client.host_get_next_step(self.id, host_id) def host_post_step_result(self, host_id, step_type, step_id, exit_code, output): self.api_client.host_post_step_result( self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output ) def host_update_install_progress(self, host_id, current_stage, progress_info=None): self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info) def host_complete_install(self): self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True) def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig): self._infra_env = InfraEnv.generate( self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type ) self._infra_env.download_image(iso_download_path=self._config.iso_download_path) nodes.start_all() self.wait_until_hosts_are_discovered() return nodes.create_nodes_cluster_hosts_mapping(cluster=self) def wait_for_cluster_validation( self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2 ): log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses) try: waiting.wait( lambda: self.is_cluster_validation_in_status( validation_section=validation_section, validation_id=validation_id, statuses=statuses ), timeout_seconds=timeout, sleep_seconds=interval, waiting_for=f"Cluster validation to be in status {statuses}", ) except BaseException: log.error( "Cluster validation status is: %s", utils.get_cluster_validation_value( self.api_client.cluster_get(self.id), validation_section, validation_id ), ) raise def is_cluster_validation_in_status(self, validation_section, validation_id, statuses): log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses) try: return ( utils.get_cluster_validation_value( self.api_client.cluster_get(self.id), validation_section, validation_id ) in statuses ) except BaseException: log.exception("Failed to get cluster %s validation info", self.id) def wait_for_host_validation( self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2 ): log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses) try: waiting.wait( lambda: self.is_host_validation_in_status( host_id=host_id, validation_section=validation_section, validation_id=validation_id, statuses=statuses, ), timeout_seconds=timeout, sleep_seconds=interval, waiting_for=f"Host validation to be in status {statuses}", ) except BaseException: log.error( "Host validation status is: %s", utils.get_host_validation_value( self.api_client.cluster_get(self.id), host_id, validation_section, validation_id ), ) raise def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses): log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses) try: return ( utils.get_host_validation_value( self.api_client.cluster_get(self.id), host_id, validation_section, validation_id ) in statuses ) except BaseException: log.exception("Failed to get cluster %s validation info", self.id) def wait_for_cluster_to_be_in_installing_pending_user_action_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION], timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_cluster_to_be_in_installing_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING], timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT, ) def wait_for_cluster_to_be_in_finalizing_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED], timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, break_statuses=[consts.ClusterStatus.ERROR], ) def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=statuses, timeout=timeout, ) @classmethod def reset_cluster_and_wait_for_ready(cls, cluster): # Reset cluster install cluster.reset_install() assert cluster.is_in_insufficient_status() # Reboot required nodes into ISO cluster.reboot_required_nodes_into_iso_after_reset() # Wait for hosts to be rediscovered cluster.wait_until_hosts_are_discovered() cluster.wait_for_ready_to_install() def get_events(self, host_id="", infra_env_id=""): warnings.warn( "Cluster.get_events is now deprecated, use EventsHandler.get_events instead", PendingDeprecationWarning, ) handler = EventsHandler(self.api_client) return handler.get_events(host_id, self.id, infra_env_id) def _configure_load_balancer(self): main_cidr = self.get_primary_machine_cidr() secondary_cidr = self.nodes.controller.get_provisioning_cidr() master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips( self.api_client, self.id, secondary_cidr ) worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr) load_balancer_ip = str(IPNetwork(main_cidr).ip + 1) tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder) lb_controller = LoadBalancerController(tf) lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips) @classmethod def _get_namespace_index(cls, libvirt_network_if): # Hack to retrieve namespace index - does not exist in tests matcher = re.match(r"^tt(\d+)$", libvirt_network_if) return int(matcher.groups()[0]) if matcher is not None else 0 def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10): warnings.warn( "Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead", PendingDeprecationWarning, ) handler = EventsHandler(self.api_client) return handler.wait_for_event( event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout ) @staticmethod def get_inventory_host_nics_data(host: dict, ipv4_first=True): def get_network_interface_ip(interface): addresses = ( interface.ipv4_addresses + interface.ipv6_addresses if ipv4_first else interface.ipv6_addresses + interface.ipv4_addresses ) return addresses[0].split("/")[0] if len(addresses) > 0 else None inventory = models.Inventory(**json.loads(host["inventory"])) interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces] return [ { "name": interface.name, "model": interface.product, "mac": interface.mac_address, "ip": get_network_interface_ip(interface), "speed": interface.speed_mbps, } for interface in interfaces_list ] @staticmethod def get_hosts_nics_data(hosts: list, ipv4_first=True): return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts] @staticmethod def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]: return [ClusterHost(h) for h in cluster.hosts] @staticmethod def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]: return [ClusterHost(models.Host(**h)) for h in hosts] def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]: cidrs = set() for host in hosts: ips = [] if self.nodes.is_ipv4: ips += host.ipv4_addresses() if self.nodes.is_ipv6: ips += host.ipv6_addresses() for host_ip in ips: cidr = network_utils.get_cidr_by_interface(host_ip) cidrs.add(cidr) return cidrs def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]: cluster_cidrs = self.get_cluster_cidrs(hosts) matching_cidrs = set() for cidr in cluster_cidrs: for host in hosts: interfaces = [] if self.nodes.is_ipv4: interfaces += host.ipv4_addresses() if self.nodes.is_ipv6: interfaces += host.ipv6_addresses() if not network_utils.any_interface_in_cidr(interfaces, cidr): break matching_cidrs.add(cidr) return matching_cidrs @staticmethod def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True): cluster_info = client.cluster_get(cluster_id).to_dict() if len(cluster_info["hosts"]) == 0: raise Exception("No host found") network = IPNetwork(machine_cidr) interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first) for intf in interfaces: ip = intf["ip"] if IPAddress(ip) in network: return ip raise Exception("IP for single node not found") @staticmethod def get_ips_for_role(client, cluster_id, network, role): cluster_info = client.cluster_get(cluster_id).to_dict() ret = [] net = IPNetwork(network) hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role]) for host_interfaces in hosts_interfaces: for intf in host_interfaces: ip = IPAddress(intf["ip"]) if ip in net: ret = ret + [intf["ip"]] return ret @staticmethod def get_master_ips(client, cluster_id, network): return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER) @staticmethod def get_worker_ips(client, cluster_id, network): return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER) @staticmethod def get_vips_from_cluster(client, cluster_id): cluster_info = client.cluster_get(cluster_id) return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip) def get_host_disks(self, host, filter=None): hosts = self.get_hosts() selected_host = [h for h in hosts if h["id"] == host["id"]] disks = json.loads(selected_host[0]["inventory"])["disks"] if not filter: return [disk for disk in disks] else: return [disk for disk in disks if filter(disk)] def get_inventory_host_ips_data(self, host: dict): nics = self.get_inventory_host_nics_data(host) return [nic["ip"] for nic in nics] # needed for None platform and single node # we need to get ip where api is running def get_kube_api_ip(self, hosts): for host in hosts: for ip in self.get_inventory_host_ips_data(host): if self.is_kubeapi_service_ready(ip): return ip def get_api_vip(self, cluster): cluster = cluster or self.get_details() api_vip = cluster.api_vip if not api_vip and cluster.user_managed_networking: log.info("API VIP is not set, searching for api ip on masters") masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"]) api_vip = self._wait_for_api_vip(masters) log.info("api vip is %s", api_vip) return api_vip def _wait_for_api_vip(self, hosts, timeout=180): """Enable some grace time for waiting for API's availability.""" return waiting.wait( lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP" ) def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]: # Looking for node matches the given host by its mac address (which is unique) for node in nodes: for mac in node.macs: if mac.lower() in host.macs(): return node.name # IPv6 static ips if self._config.is_static_ip: mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder) for mac in host.macs(): for name, macs in mappings.items(): if mac in macs: return name return None @staticmethod def is_kubeapi_service_ready(ip_or_dns): """Validate if kube-api is ready on given address.""" with contextlib.suppress(ValueError): # IPv6 addresses need to be surrounded with square-brackets # to differentiate them from domain names if ipaddress.ip_address(ip_or_dns).version == 6: ip_or_dns = f"[{ip_or_dns}]" try: response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1) return response.ok except BaseException: return False def wait_and_kill_installer(self, host): # Wait for specific host to be in installing in progress self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS]) # Kill installer to simulate host error selected_node = self.nodes.get_node_from_cluster_host(host) selected_node.kill_installer() def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret): import warnings from tests.config import ClusterConfig, InfraEnvConfig warnings.warn( "Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to " "this function. The function and solution for that case have not been determined yet. It might be " "on another module, or as a classmethod within Cluster class." " For more information see https://issues.redhat.com/browse/MGMT-4975", PendingDeprecationWarning, ) if isinstance(cluster_info, dict): cluster_info = models.cluster.Cluster(**cluster_info) cluster = Cluster( api_client=api_client, infra_env_config=InfraEnvConfig(), config=ClusterConfig( cluster_name=ClusterName(cluster_info.name), pull_secret=pull_secret, ssh_public_key=cluster_info.ssh_public_key, cluster_id=cluster_info.id, ), nodes=None, ) return cluster.get_api_vip(cluster=cluster_info)
import contextlib import ipaddress import json import os import random import re import time import warnings from collections import Counter from typing import Any, Dict, List, Optional, Set, Union import requests import test_infra.utils.waiting import waiting import yaml from assisted_service_client import models from assisted_service_client.models.operator_type import OperatorType from junit_report import JunitTestCase from netaddr import IPAddress, IPNetwork from test_infra import consts, utils from test_infra.assisted_service_api import InventoryClient from test_infra.controllers.load_balancer_controller import LoadBalancerController from test_infra.controllers.node_controllers import Node from test_infra.helper_classes.cluster_host import ClusterHost from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig from test_infra.helper_classes.entity import Entity from test_infra.helper_classes.events_handler import EventsHandler from test_infra.helper_classes.infra_env import InfraEnv from test_infra.helper_classes.nodes import Nodes from test_infra.tools import static_network, terraform_utils from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils from test_infra.utils.entity_name import ClusterName class Cluster(Entity): MINIMUM_NODES_TO_WAIT = 1 EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto _config: BaseClusterConfig def __init__( self, api_client: InventoryClient, config: BaseClusterConfig, infra_env_config: BaseInfraEnvConfig, nodes: Optional[Nodes] = None, ): super().__init__(api_client, config, nodes) self._infra_env_config = infra_env_config self._infra_env = None # Update infraEnv configurations self._infra_env_config.cluster_id = config.cluster_id self._infra_env_config.openshift_version = self._config.openshift_version self._infra_env_config.pull_secret = self._config.pull_secret self._high_availability_mode = config.high_availability_mode self.name = config.cluster_name.get() @property def kubeconfig_path(self): return self._config.kubeconfig_path @property def iso_download_path(self): return self._config.iso_download_path @property def enable_image_download(self): return self._config.download_image def _update_day2_config(self, api_client: InventoryClient, cluster_id: str): day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id) self.update_config( **dict( openshift_version=day2_cluster.openshift_version, cluster_name=ClusterName(day2_cluster.name), additional_ntp_source=day2_cluster.additional_ntp_source, user_managed_networking=day2_cluster.user_managed_networking, high_availability_mode=day2_cluster.high_availability_mode, olm_operators=day2_cluster.monitored_operators, base_dns_domain=day2_cluster.base_dns_domain, vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation, ) ) def _create(self) -> str: disk_encryption = models.DiskEncryption( enable_on=self._config.disk_encryption_roles, mode=self._config.disk_encryption_mode, ) if self._config.cluster_id: log.info(f"Fetching day2 cluster with id {self._config.cluster_id}") self._update_day2_config(self.api_client, self._config.cluster_id) return self._config.cluster_id cluster = self.api_client.create_cluster( self._config.cluster_name.get(), ssh_public_key=self._config.ssh_public_key, openshift_version=self._config.openshift_version, pull_secret=self._config.pull_secret, base_dns_domain=self._config.base_dns_domain, vip_dhcp_allocation=self._config.vip_dhcp_allocation, additional_ntp_source=self._config.additional_ntp_source, user_managed_networking=self._config.user_managed_networking, high_availability_mode=self._config.high_availability_mode, olm_operators=[{"name": name} for name in self._config.olm_operators], network_type=self._config.network_type, disk_encryption=disk_encryption, ) self._config.cluster_id = cluster.id return cluster.id def delete(self): self.api_client.delete_cluster(self.id) def get_details(self): return self.api_client.cluster_get(self.id) def get_cluster_name(self): return self.get_details().name def get_hosts(self): return self.api_client.get_cluster_hosts(self.id) def get_host_ids(self): return [host["id"] for host in self.get_hosts()] def get_host_ids_names_mapping(self): return {host["id"]: host["requested_hostname"] for host in self.get_hosts()} def get_host_assigned_roles(self): hosts = self.get_hosts() return {h["id"]: h["role"] for h in hosts} def get_operators(self): return self.api_client.get_cluster_operators(self.id) # TODO remove in favor of generate_infra_env def generate_image(self): warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning) self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key) def generate_infra_env( self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None ) -> InfraEnv: self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type self._infra_env_config.static_network_config = static_network_config self._infra_env_config.ignition_config_override = ignition_info self._infra_env_config.proxy = proxy or self._config.proxy infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config) self._infra_env = infra_env return infra_env def update_infra_env_proxy(self, proxy: models.Proxy) -> None: self._infra_env_config.proxy = proxy self._infra_env.update_proxy(proxy=proxy) def download_infra_env_image(self, iso_download_path=None) -> Path: iso_download_path = iso_download_path or self._config.iso_download_path return self._infra_env.download_image(iso_download_path=iso_download_path) @JunitTestCase() def generate_and_download_infra_env( self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None, ) -> Path: if self._config.is_static_ip and static_network_config is None: static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder) self.generate_infra_env( static_network_config=static_network_config, iso_image_type=iso_image_type, ssh_key=ssh_key, ignition_info=ignition_info, proxy=proxy, ) return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path) @JunitTestCase() def generate_and_download_image( self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None ): warnings.warn( "generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.", DeprecationWarning, ) iso_download_path = iso_download_path or self._config.iso_download_path # ensure file path exists before downloading if not os.path.exists(iso_download_path): utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False) self.api_client.generate_and_download_image( cluster_id=self.id, ssh_key=ssh_key or self._config.ssh_public_key, image_path=iso_download_path, image_type=iso_image_type or self._config.iso_image_type, static_network_config=static_network_config, ) def wait_until_hosts_are_disconnected(self, nodes_count: int = None): statuses = [consts.NodesStatus.DISCONNECTED] test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, nodes_count=nodes_count or self.nodes.nodes_count, statuses=statuses, timeout=consts.DISCONNECTED_TIMEOUT, ) @JunitTestCase() def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None): statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN] if allow_insufficient: statuses.append(consts.NodesStatus.INSUFFICIENT) test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, nodes_count=nodes_count or self.nodes.nodes_count, statuses=statuses, timeout=consts.NODES_REGISTERED_TIMEOUT, ) def _get_matching_hosts(self, host_type, count): hosts = self.get_hosts() return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count] def set_cluster_name(self, cluster_name: str): log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}") self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None)) self.api_client.update_cluster(self.id, {"name": cluster_name}) def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None: self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths) def set_ocs(self, properties=None): self.set_olm_operator(consts.OperatorType.OCS, properties=properties) def set_cnv(self, properties=None): self.set_olm_operator(consts.OperatorType.CNV, properties=properties) def unset_ocs(self): self.unset_olm_operator(consts.OperatorType.OCS) def unset_cnv(self): self.unset_olm_operator(consts.OperatorType.CNV) def unset_olm_operator(self, operator_name): log.info(f"Unsetting {operator_name} for cluster: {self.id}") cluster = self.api_client.cluster_get(self.id) olm_operators = [] for operator in cluster.monitored_operators: if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN: continue olm_operators.append({"name": operator.name, "properties": operator.properties}) self.api_client.update_cluster(self.id, {"olm_operators": olm_operators}) def set_olm_operator(self, operator_name, properties=None): log.info(f"Setting {operator_name} for cluster: {self.id}") cluster = self.api_client.cluster_get(self.id) if operator_name in [o.name for o in cluster.monitored_operators]: return olm_operators = [] for operator in cluster.monitored_operators: if operator.operator_type == OperatorType.BUILTIN: continue olm_operators.append({"name": operator.name, "properties": operator.properties}) olm_operators.append({"name": operator_name, "properties": properties}) self._config.olm_operators = olm_operators self.api_client.update_cluster(self.id, {"olm_operators": olm_operators}) def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None): if requested_roles is None: requested_roles = Counter( master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count ) assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"]) assigned_roles.extend( self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"]) ) for role in assigned_roles: self._infra_env.update_host(host_id=role["id"], host_role=role["role"]) return assigned_roles def set_specific_host_role(self, host, role): self._infra_env.update_host(host_id=host["id"], host_role=role) def set_network_params(self, controller=None): # Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests if self._config.platform == consts.Platforms.NONE: log.info("On None platform, leaving network management to the user") api_vip = ingress_vip = machine_networks = None elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE: log.info("Letting access VIPs be deducted from machine networks") api_vip = ingress_vip = None machine_networks = self.get_machine_networks() else: log.info("Assigning VIPs statically") access_vips = controller.get_ingress_and_api_vips() api_vip = access_vips["api_vip"] ingress_vip = access_vips["ingress_vip"] machine_networks = None if self._config.is_ipv4 and self._config.is_ipv6: machine_networks = controller.get_all_machine_addresses() self.set_advanced_networking( vip_dhcp_allocation=self._config.vip_dhcp_allocation, cluster_networks=self._config.cluster_networks, service_networks=self._config.service_networks, machine_networks=machine_networks, api_vip=api_vip, ingress_vip=ingress_vip, ) def get_primary_machine_cidr(self): cidr = self.nodes.controller.get_primary_machine_cidr() if not cidr: # Support controllers which the machine cidr is not configurable. taking it from the AI instead matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details())) if not matching_cidrs: raise RuntimeError("No matching cidr for DHCP") cidr = next(iter(matching_cidrs)) return cidr def get_machine_networks(self): networks = [] primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr() if primary_machine_cidr: networks.append(primary_machine_cidr) secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr() if secondary_machine_cidr: networks.append(secondary_machine_cidr) if not networks: # Support controllers which the machine cidr is not configurable. taking it from the AI instead networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details())) if not networks: raise RuntimeError("No matching cidr for DHCP") return networks def set_ingress_and_api_vips(self, vips): log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}") self.api_client.update_cluster(self.id, vips) def set_ssh_key(self, ssh_key: str): log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}") self.update_config(ssh_public_key=ssh_key) self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key}) def set_base_dns_domain(self, base_dns_domain: str): log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}") self.update_config(base_dns_domain=base_dns_domain) self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain}) def set_advanced_networking( self, vip_dhcp_allocation: Optional[bool] = None, cluster_networks: Optional[List[models.ClusterNetwork]] = None, service_networks: Optional[List[models.ServiceNetwork]] = None, machine_networks: Optional[List[models.MachineNetwork]] = None, api_vip: Optional[str] = None, ingress_vip: Optional[str] = None, ): if machine_networks is None: machine_networks = self._config.machine_networks else: machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks] if vip_dhcp_allocation is None: vip_dhcp_allocation = self._config.vip_dhcp_allocation advanced_networking = { "vip_dhcp_allocation": vip_dhcp_allocation, "cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks, "service_networks": service_networks if service_networks is not None else self._config.service_networks, "machine_networks": machine_networks, "api_vip": api_vip if api_vip is not None else self._config.api_vip, "ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip, } log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}") self.update_config(**advanced_networking) self.api_client.update_cluster(self.id, advanced_networking) def set_pull_secret(self, pull_secret: str): log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}") self.update_config(pull_secret=pull_secret) self.api_client.update_cluster(self.id, {"pull_secret": pull_secret}) def set_host_name(self, host_id, requested_name): log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}") self._infra_env.update_host(host_id=host_id, host_name=requested_name) def set_additional_ntp_source(self, ntp_source: List[str]): log.info(f"Setting Additional NTP source:{ntp_source}") if isinstance(ntp_source, List): ntp_source_string = ",".join(ntp_source) elif isinstance(ntp_source, str): ntp_source_string = ntp_source else: raise TypeError( f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}" ) self.update_config(additional_ntp_source=ntp_source_string) self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string}) def patch_discovery_ignition(self, ignition): self._infra_env.patch_discovery_ignition(ignition_info=ignition) def set_proxy_values(self, proxy_values: models.Proxy) -> None: log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}") self.update_config(proxy=proxy_values) self.api_client.set_cluster_proxy( self.id, http_proxy=self._config.proxy.http_proxy, https_proxy=self._config.proxy.https_proxy, no_proxy=self._config.proxy.no_proxy, ) @JunitTestCase() def start_install(self): self.api_client.install_cluster(cluster_id=self.id) def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False): logs_utils.wait_for_logs_complete( client=self.api_client, cluster_id=self.id, timeout=timeout, interval=interval, check_host_logs_only=check_host_logs_only, ) def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS], nodes_count=nodes_count, timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT, ) def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING], nodes_count=nodes_count, ) def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=statuses, nodes_count=nodes_count, fall_on_error_status=fall_on_error_status, ) def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_specific_host_is_in_status( client=self.api_client, cluster_id=self.id, host_name=host.get("requested_hostname"), statuses=statuses, nodes_count=nodes_count, ) def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True): index = consts.all_host_stages.index(stage) test_infra.utils.waiting.wait_till_specific_host_is_in_stage( client=self.api_client, cluster_id=self.id, host_name=host.get("requested_hostname"), stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :], ) def wait_for_cluster_in_error_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR], timeout=consts.ERROR_TIMEOUT, ) def wait_for_pending_for_input_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.PENDING_FOR_INPUT], timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.REBOOTING], nodes_count=nodes_count, ) def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None): num_masters = num_masters if num_masters is not None else self.nodes.masters_count test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.CONFIGURING], nodes_count=num_masters - 1, ) def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None): num_masters = num_masters if num_masters is not None else self.nodes.masters_count test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=[consts.HostsProgressStages.JOINED], nodes_count=num_masters - 1, ) def wait_for_hosts_stage(self, stage: str, inclusive: bool = True): index = consts.all_host_stages.index(stage) test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage( client=self.api_client, cluster_id=self.id, stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :], nodes_count=self.nodes.nodes_count, ) @JunitTestCase() def start_install_and_wait_for_installed( self, wait_for_hosts=True, wait_for_operators=True, wait_for_cluster_install=True, download_kubeconfig=True, ): self.start_install() if wait_for_hosts: self.wait_for_hosts_to_install() if wait_for_operators: self.wait_for_operators_to_finish() if wait_for_cluster_install: self.wait_for_install() if download_kubeconfig: self.download_kubeconfig() def disable_worker_hosts(self): hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER) for host in hosts: self.disable_host(host) def disable_host(self, host): host_name = host["requested_hostname"] log.info(f"Going to disable host: {host_name} in cluster: {self.id}") self._infra_env.unbind_host(host_id=host["id"]) def enable_host(self, host): host_name = host["requested_hostname"] log.info(f"Going to enable host: {host_name} in cluster: {self.id}") self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id) def delete_host(self, host): host_id = host["id"] log.info(f"Going to delete host: {host_id} in cluster: {self.id}") self._infra_env.delete_host(host_id=host_id) def cancel_install(self): self.api_client.cancel_cluster_install(cluster_id=self.id) def get_bootstrap_hostname(self): hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER) for host in hosts: if host.get("bootstrap"): log.info("Bootstrap node is: %s", host["requested_hostname"]) return host["requested_hostname"] def get_hosts_by_role(self, role, hosts=None): hosts = hosts or self.api_client.get_cluster_hosts(self.id) nodes_by_role = [] for host in hosts: if host["role"] == role: nodes_by_role.append(host) log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}") return nodes_by_role def get_random_host_by_role(self, role): return random.choice(self.get_hosts_by_role(role)) def get_reboot_required_hosts(self): return self.api_client.get_hosts_in_statuses( cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION] ) def reboot_required_nodes_into_iso_after_reset(self): hosts_to_reboot = self.get_reboot_required_hosts() self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset") def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER, fall_on_error_status=fall_on_error_status, timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1): test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.REBOOT_TIMEOUT, nodes_count=nodes_count, fall_on_error_status=fall_on_error_status, timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_hosts_to_be_in_wrong_boot_order( self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True ): test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION], status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER, nodes_count=nodes_count, timeout=timeout, fall_on_error_status=fall_on_error_status, ) def wait_for_ready_to_install(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.READY], timeout=consts.READY_TIMEOUT, ) # This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure time.sleep(10) utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.READY], timeout=consts.READY_TIMEOUT, ) def is_in_cancelled_status(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED] ) def is_in_error(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR] ) def is_finalizing(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING] ) def is_installing(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING] ) def reset_install(self): self.api_client.reset_cluster_install(cluster_id=self.id) def is_in_insufficient_status(self): return utils.is_cluster_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT] ) def wait_for_hosts_to_install( self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None ): test_infra.utils.waiting.wait_till_all_hosts_are_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLED], nodes_count=nodes_count or self.nodes.nodes_count, timeout=timeout, fall_on_error_status=fall_on_error_status, ) def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True): operators = self.get_operators() if fall_on_error_status: statuses = [consts.OperatorStatus.AVAILABLE] else: statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED] operators_utils.wait_till_all_operators_are_in_status( client=self.api_client, cluster_id=self.id, operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)), operator_types=[OperatorType.BUILTIN], statuses=statuses, timeout=timeout, fall_on_error_status=False, ) operators_utils.wait_till_all_operators_are_in_status( client=self.api_client, cluster_id=self.id, operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)), operator_types=[OperatorType.OLM], statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED], timeout=timeout, fall_on_error_status=fall_on_error_status, ) def is_operator_in_status(self, operator_name, status): return operators_utils.is_operator_in_status( operators=self.get_operators(), operator_name=operator_name, status=status ) def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLED], timeout=timeout, ) def _set_hostnames_and_roles(self): cluster_id = self.id hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id)) nodes = self.nodes.get_nodes(refresh=True) for host in hosts: if host.has_hostname(): continue name = self.find_matching_node_name(host, nodes) assert name is not None, ( f"Failed to find matching node for host with mac address {host.macs()}" f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}" ) if self.nodes.nodes_count == 1: role = None else: role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name) def _ha_not_none(self): return ( self._high_availability_mode != consts.HighAvailabilityMode.NONE and self._config.platform != consts.Platforms.NONE ) def download_image(self, iso_download_path: str = None) -> Path: if self._infra_env is None: log.warning("No infra_env found. Generating infra_env and downloading ISO") return self.generate_and_download_infra_env( iso_download_path=iso_download_path or self._config.iso_download_path, iso_image_type=self._config.iso_image_type, ) return self._infra_env.download_image(iso_download_path) @JunitTestCase() def prepare_for_installation(self, **kwargs): super(Cluster, self).prepare_for_installation(**kwargs) self.nodes.wait_for_networking() self._set_hostnames_and_roles() if self._high_availability_mode != consts.HighAvailabilityMode.NONE: self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers())) self.set_network_params(controller=self.nodes.controller) # in case of None platform we need to specify dns records before hosts are ready if self._config.platform == consts.Platforms.NONE: self._configure_load_balancer() self.nodes.controller.set_dns_for_user_managed_network() elif self._high_availability_mode == consts.HighAvailabilityMode.NONE: main_cidr = self.get_primary_machine_cidr() ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr) self.nodes.controller.set_single_node_ip(ip) self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip) self.wait_for_ready_to_install() # in case of regular cluster, need to set dns after vips exits # in our case when nodes are ready, vips will be there for sure if self._ha_not_none(): vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id) self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"]) def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None): self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path) def download_kubeconfig(self, kubeconfig_path: str = None): self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path) def download_installation_logs(self, cluster_tar_path): self.api_client.download_cluster_logs(self.id, cluster_tar_path) def get_install_config(self): return yaml.safe_load(self.api_client.get_cluster_install_config(self.id)) def get_admin_credentials(self): return self.api_client.get_cluster_admin_credentials(self.id) def register_dummy_host(self): dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71" self.api_client.register_host(self.id, dummy_host_id) def host_get_next_step(self, host_id): return self.api_client.host_get_next_step(self.id, host_id) def host_post_step_result(self, host_id, step_type, step_id, exit_code, output): self.api_client.host_post_step_result( self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output ) def host_update_install_progress(self, host_id, current_stage, progress_info=None): self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info) def host_complete_install(self): self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True) def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig): self._infra_env = InfraEnv.generate( self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type ) self._infra_env.download_image(iso_download_path=self._config.iso_download_path) nodes.start_all() self.wait_until_hosts_are_discovered() return nodes.create_nodes_cluster_hosts_mapping(cluster=self) def wait_for_cluster_validation( self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2 ): log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses) try: waiting.wait( lambda: self.is_cluster_validation_in_status( validation_section=validation_section, validation_id=validation_id, statuses=statuses ), timeout_seconds=timeout, sleep_seconds=interval, waiting_for=f"Cluster validation to be in status {statuses}", ) except BaseException: log.error( "Cluster validation status is: %s", utils.get_cluster_validation_value( self.api_client.cluster_get(self.id), validation_section, validation_id ), ) raise def is_cluster_validation_in_status(self, validation_section, validation_id, statuses): log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses) try: return ( utils.get_cluster_validation_value( self.api_client.cluster_get(self.id), validation_section, validation_id ) in statuses ) except BaseException: log.exception("Failed to get cluster %s validation info", self.id) def wait_for_host_validation( self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2 ): log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses) try: waiting.wait( lambda: self.is_host_validation_in_status( host_id=host_id, validation_section=validation_section, validation_id=validation_id, statuses=statuses, ), timeout_seconds=timeout, sleep_seconds=interval, waiting_for=f"Host validation to be in status {statuses}", ) except BaseException: log.error( "Host validation status is: %s", utils.get_host_validation_value( self.api_client.cluster_get(self.id), host_id, validation_section, validation_id ), ) raise def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses): log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses) try: return ( utils.get_host_validation_value( self.api_client.cluster_get(self.id), host_id, validation_section, validation_id ) in statuses ) except BaseException: log.exception("Failed to get cluster %s validation info", self.id) def wait_for_cluster_to_be_in_installing_pending_user_action_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION], timeout=consts.PENDING_USER_ACTION_TIMEOUT, ) def wait_for_cluster_to_be_in_installing_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING], timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT, ) def wait_for_cluster_to_be_in_finalizing_status(self): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED], timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, break_statuses=[consts.ClusterStatus.ERROR], ) def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT): utils.wait_till_cluster_is_in_status( client=self.api_client, cluster_id=self.id, statuses=statuses, timeout=timeout, ) @classmethod def reset_cluster_and_wait_for_ready(cls, cluster): # Reset cluster install cluster.reset_install() assert cluster.is_in_insufficient_status() # Reboot required nodes into ISO cluster.reboot_required_nodes_into_iso_after_reset() # Wait for hosts to be rediscovered cluster.wait_until_hosts_are_discovered() cluster.wait_for_ready_to_install() def get_events(self, host_id="", infra_env_id=""): warnings.warn( "Cluster.get_events is now deprecated, use EventsHandler.get_events instead", PendingDeprecationWarning, ) handler = EventsHandler(self.api_client) return handler.get_events(host_id, self.id, infra_env_id) def _configure_load_balancer(self): main_cidr = self.get_primary_machine_cidr() secondary_cidr = self.nodes.controller.get_provisioning_cidr() master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips( self.api_client, self.id, secondary_cidr ) worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr) load_balancer_ip = str(IPNetwork(main_cidr).ip + 1) tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder) lb_controller = LoadBalancerController(tf) lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips) @classmethod def _get_namespace_index(cls, libvirt_network_if): # Hack to retrieve namespace index - does not exist in tests matcher = re.match(r"^tt(\d+)$", libvirt_network_if) return int(matcher.groups()[0]) if matcher is not None else 0 def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10): warnings.warn( "Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead", PendingDeprecationWarning, ) handler = EventsHandler(self.api_client) return handler.wait_for_event( event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout ) @staticmethod def get_inventory_host_nics_data(host: dict, ipv4_first=True): def get_network_interface_ip(interface): addresses = ( interface.ipv4_addresses + interface.ipv6_addresses if ipv4_first else interface.ipv6_addresses + interface.ipv4_addresses ) return addresses[0].split("/")[0] if len(addresses) > 0 else None inventory = models.Inventory(**json.loads(host["inventory"])) interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces] return [ { "name": interface.name, "model": interface.product, "mac": interface.mac_address, "ip": get_network_interface_ip(interface), "speed": interface.speed_mbps, } for interface in interfaces_list ] @staticmethod def get_hosts_nics_data(hosts: list, ipv4_first=True): return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts] @staticmethod def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]: return [ClusterHost(h) for h in cluster.hosts] @staticmethod def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]: return [ClusterHost(models.Host(**h)) for h in hosts] def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]: cidrs = set() for host in hosts: ips = [] if self.nodes.is_ipv4: ips += host.ipv4_addresses() if self.nodes.is_ipv6: ips += host.ipv6_addresses() for host_ip in ips: cidr = network_utils.get_cidr_by_interface(host_ip) cidrs.add(cidr) return cidrs def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]: cluster_cidrs = self.get_cluster_cidrs(hosts) matching_cidrs = set() for cidr in cluster_cidrs: for host in hosts: interfaces = [] if self.nodes.is_ipv4: interfaces += host.ipv4_addresses() if self.nodes.is_ipv6: interfaces += host.ipv6_addresses() if not network_utils.any_interface_in_cidr(interfaces, cidr): break matching_cidrs.add(cidr) return matching_cidrs @staticmethod def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True): cluster_info = client.cluster_get(cluster_id).to_dict() if len(cluster_info["hosts"]) == 0: raise Exception("No host found") network = IPNetwork(machine_cidr) interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first) for intf in interfaces: ip = intf["ip"] if IPAddress(ip) in network: return ip raise Exception("IP for single node not found") @staticmethod def get_ips_for_role(client, cluster_id, network, role): cluster_info = client.cluster_get(cluster_id).to_dict() ret = [] net = IPNetwork(network) hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role]) for host_interfaces in hosts_interfaces: for intf in host_interfaces: ip = IPAddress(intf["ip"]) if ip in net: ret = ret + [intf["ip"]] return ret @staticmethod def get_master_ips(client, cluster_id, network): return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER) @staticmethod def get_worker_ips(client, cluster_id, network): return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER) @staticmethod def get_vips_from_cluster(client, cluster_id): cluster_info = client.cluster_get(cluster_id) return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip) def get_host_disks(self, host, filter=None): hosts = self.get_hosts() selected_host = [h for h in hosts if h["id"] == host["id"]] disks = json.loads(selected_host[0]["inventory"])["disks"] if not filter: return [disk for disk in disks] else: return [disk for disk in disks if filter(disk)] def get_inventory_host_ips_data(self, host: dict): nics = self.get_inventory_host_nics_data(host) return [nic["ip"] for nic in nics] # needed for None platform and single node # we need to get ip where api is running def get_kube_api_ip(self, hosts): for host in hosts: for ip in self.get_inventory_host_ips_data(host): if self.is_kubeapi_service_ready(ip): return ip def get_api_vip(self, cluster): cluster = cluster or self.get_details() api_vip = cluster.api_vip if not api_vip and cluster.user_managed_networking: log.info("API VIP is not set, searching for api ip on masters") masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"]) api_vip = self._wait_for_api_vip(masters) log.info("api vip is %s", api_vip) return api_vip def _wait_for_api_vip(self, hosts, timeout=180): """Enable some grace time for waiting for API's availability.""" return waiting.wait( lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP" ) def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]: # Looking for node matches the given host by its mac address (which is unique) for node in nodes: for mac in node.macs: if mac.lower() in host.macs(): return node.name # IPv6 static ips if self._config.is_static_ip: mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder) for mac in host.macs(): for name, macs in mappings.items(): if mac in macs: return name return None @staticmethod def is_kubeapi_service_ready(ip_or_dns): """Validate if kube-api is ready on given address.""" with contextlib.suppress(ValueError): # IPv6 addresses need to be surrounded with square-brackets # to differentiate them from domain names if ipaddress.ip_address(ip_or_dns).version == 6: ip_or_dns = f"[{ip_or_dns}]" try: response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1) return response.ok except BaseException: return False def wait_and_kill_installer(self, host): # Wait for specific host to be in installing in progress self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS]) # Kill installer to simulate host error selected_node = self.nodes.get_node_from_cluster_host(host) selected_node.kill_installer() def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret): import warnings from tests.config import ClusterConfig, InfraEnvConfig warnings.warn( "Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to " "this function. The function and solution for that case have not been determined yet. It might be " "on another module, or as a classmethod within Cluster class." " For more information see https://issues.redhat.com/browse/MGMT-4975", PendingDeprecationWarning, ) if isinstance(cluster_info, dict): cluster_info = models.cluster.Cluster(**cluster_info) cluster = Cluster( api_client=api_client, infra_env_config=InfraEnvConfig(), config=ClusterConfig( cluster_name=ClusterName(cluster_info.name), pull_secret=pull_secret, ssh_public_key=cluster_info.ssh_public_key, cluster_id=cluster_info.id, ), nodes=None, ) return cluster.get_api_vip(cluster=cluster_info)
# -*- coding: utf-8 -*- """ Copyright (c) 2021, Ontario Institute for Cancer Research (OICR). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors: Junjun Zhang <junjun.zhang@oicr.on.ca> """ import os import json import requests import tempfile from typing import Set from .utils import run_cmd, pkg_uri_parser, pkg_asset_download_urls class Package(object): name: str = None version: str = None main: str = None repo_type: str = 'git' # hardcode for now repo_server: str = None repo_account: str = None repo_name: str = None dependencies: Set[str] = set() devDependencies: Set[str] = set() allDependencies: Set[str] = set() def __init__(self, pkg_uri=None, pkg_json=None): if pkg_uri and pkg_json: raise Exception("Cannot specify both pkg_uri and pkg_json") elif pkg_uri: self._init_by_uri(pkg_uri) elif pkg_json: self._init_by_json(pkg_json) else: raise Exception("Must specify either pkg_uri or pkg_json") def _init_by_uri(self, pkg_uri): try: repo_server, repo_account, repo_name, name, version = pkg_uri_parser(pkg_uri) except Exception as ex: raise Exception(f"Package uri error: {ex}") self.name = name self.version = version self.repo_server = repo_server self.repo_account = repo_account.lower() self.repo_name = repo_name # download pkg-release.json from github release asset and parse it to get addition info pkg_json_str = '' download_urls = pkg_asset_download_urls(self.pkg_json_url) for download_url in download_urls: r = requests.get(download_url) if r.status_code == 200: pkg_json_str = r.text break if not pkg_json_str: raise Exception("Failed to download 'pkg-release.json'. Looks like this package has " f"not been released: {self.pkg_uri}.") self._init_by_json(pkg_json_str=pkg_json_str) def _init_by_json(self, pkg_json=None, pkg_json_str=None): if pkg_json: with open(pkg_json, 'r') as f: pkg_dict = json.load(f) elif pkg_json_str: pkg_dict = json.loads(pkg_json_str) else: raise Exception("Must specify 'pkg_json' or 'pkg_json_str' when call '_init_by_json'") self.name = pkg_dict['name'] self.version = pkg_dict['version'] self.main = pkg_dict['main'] _, _, repo_server, repo_account, repo_name = \ pkg_dict['repository']['url'].split('/') self.repo_server = repo_server self.repo_account = repo_account.lower() self.repo_name = repo_name.split('.')[0] # repo_name.git self._init_deps( pkg_dict.get('dependencies', []), pkg_dict.get('devDependencies', []) ) @property def fullname(self): return f"{self.name}@{self.version}" @property def project_fullname(self): return f"{self.repo_server}/{self.repo_account}/{self.repo_name}" @property def pkg_uri(self): return f"{self.project_fullname}/{self.fullname}" @property def release_tag(self): return self.fullname.replace('@', '.v') @property def pkg_tar_url(self): return f"https://{self.project_fullname}/releases/download/{self.release_tag}/{self.release_tag}.tar.gz" @property def pkg_json_url(self): return f"https://{self.project_fullname}/releases/download/{self.release_tag}/pkg-release.json" def install(self, target_project_root, force=False): target_path = os.path.join( target_project_root, 'wfpr_modules', self.repo_server, self.repo_account, self.repo_name, self.fullname ) if os.path.isdir(target_path) and not force: raise Exception(f"Pakcage already installed: {target_path.replace(os.path.join(os.getcwd(), ""), "")}, " "skip unless force option is specified.") if force: out, err, ret = run_cmd(f"rm -fr {target_path}") # remove possible previous installation if ret != 0: raise Exception(f"Unable to remove previously installed package: {err}") return self._download_and_install(target_path) def _download_and_install(self, target_path=None): success = False for download_url in pkg_asset_download_urls(self.pkg_tar_url): response = requests.get(download_url, stream=True) if response.status_code == 200: success = True break if success: with tempfile.TemporaryDirectory() as tmpdirname: local_tar_path = os.path.join(tmpdirname, os.path.basename(self.pkg_tar_url)) with open(local_tar_path, 'wb') as f: for chunk in response.raw.stream(1024, decode_content=False): if chunk: f.write(chunk) cmd = f"mkdir -p {target_path} && " \ f"tar -xzf {local_tar_path} -C {target_path} && " \ f"cd {target_path} && ln -s ../../../../../wfpr_modules . && " \ "cd tests && ln -s ../wfpr_modules ." out, err, ret = run_cmd(cmd) if ret != 0: run_cmd(f"rm -fr {target_path}") # undo partial installation raise Exception(f"Package downloaded but installation failed: {err}") return target_path # return the path the package was installed else: raise Exception(f"Looks like this package has not been released: {self.pkg_uri}") def __repr__(self): return self.pkg_uri def _init_deps(self, dependencies=[], devDependencies=[]): # some basic validation if len(dependencies) != len(set(dependencies)): raise Exception(f"Duplicated dependencies found: {", ".join(dependencies)}") else: dependencies = set(dependencies) if len(devDependencies) != len(set(devDependencies)): raise Exception(f"Duplicated devDependencies found: {", ".join(devDependencies)}") else: devDependencies = set(devDependencies) if dependencies.intersection(devDependencies): raise Exception("Dependency duplicated in 'dependencies' and 'devDependencies': " f"{ ", ".join(dependencies.intersection(devDependencies)) }") allDependencies = dependencies.union(devDependencies) for dep_pkg_uri in allDependencies: try: pkg_uri_parser(dep_pkg_uri) # make sure pkg_uri format is valid, although we don't use the return values except Exception as ex: raise Exception(f"Invalid dependency: {dep_pkg_uri}. Message: {ex}") self.dependencies = dependencies self.devDependencies = devDependencies # at this stage, let's just treat all dependencies the same way, # later may need to handle devDep differently self.allDependencies = self.dependencies.union(self.devDependencies)
# -*- coding: utf-8 -*- """ Copyright (c) 2021, Ontario Institute for Cancer Research (OICR). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors: Junjun Zhang <junjun.zhang@oicr.on.ca> """ import os import json import requests import tempfile from typing import Set from .utils import run_cmd, pkg_uri_parser, pkg_asset_download_urls class Package(object): name: str = None version: str = None main: str = None repo_type: str = 'git' # hardcode for now repo_server: str = None repo_account: str = None repo_name: str = None dependencies: Set[str] = set() devDependencies: Set[str] = set() allDependencies: Set[str] = set() def __init__(self, pkg_uri=None, pkg_json=None): if pkg_uri and pkg_json: raise Exception("Cannot specify both pkg_uri and pkg_json") elif pkg_uri: self._init_by_uri(pkg_uri) elif pkg_json: self._init_by_json(pkg_json) else: raise Exception("Must specify either pkg_uri or pkg_json") def _init_by_uri(self, pkg_uri): try: repo_server, repo_account, repo_name, name, version = pkg_uri_parser(pkg_uri) except Exception as ex: raise Exception(f"Package uri error: {ex}") self.name = name self.version = version self.repo_server = repo_server self.repo_account = repo_account.lower() self.repo_name = repo_name # download pkg-release.json from github release asset and parse it to get addition info pkg_json_str = '' download_urls = pkg_asset_download_urls(self.pkg_json_url) for download_url in download_urls: r = requests.get(download_url) if r.status_code == 200: pkg_json_str = r.text break if not pkg_json_str: raise Exception("Failed to download 'pkg-release.json'. Looks like this package has " f"not been released: {self.pkg_uri}.") self._init_by_json(pkg_json_str=pkg_json_str) def _init_by_json(self, pkg_json=None, pkg_json_str=None): if pkg_json: with open(pkg_json, 'r') as f: pkg_dict = json.load(f) elif pkg_json_str: pkg_dict = json.loads(pkg_json_str) else: raise Exception("Must specify 'pkg_json' or 'pkg_json_str' when call '_init_by_json'") self.name = pkg_dict['name'] self.version = pkg_dict['version'] self.main = pkg_dict['main'] _, _, repo_server, repo_account, repo_name = \ pkg_dict['repository']['url'].split('/') self.repo_server = repo_server self.repo_account = repo_account.lower() self.repo_name = repo_name.split('.')[0] # repo_name.git self._init_deps( pkg_dict.get('dependencies', []), pkg_dict.get('devDependencies', []) ) @property def fullname(self): return f"{self.name}@{self.version}" @property def project_fullname(self): return f"{self.repo_server}/{self.repo_account}/{self.repo_name}" @property def pkg_uri(self): return f"{self.project_fullname}/{self.fullname}" @property def release_tag(self): return self.fullname.replace('@', '.v') @property def pkg_tar_url(self): return f"https://{self.project_fullname}/releases/download/{self.release_tag}/{self.release_tag}.tar.gz" @property def pkg_json_url(self): return f"https://{self.project_fullname}/releases/download/{self.release_tag}/pkg-release.json" def install(self, target_project_root, force=False): target_path = os.path.join( target_project_root, 'wfpr_modules', self.repo_server, self.repo_account, self.repo_name, self.fullname ) if os.path.isdir(target_path) and not force: raise Exception(f"Pakcage already installed: {target_path.replace(os.path.join(os.getcwd(), ''), '')}, " "skip unless force option is specified.") if force: out, err, ret = run_cmd(f"rm -fr {target_path}") # remove possible previous installation if ret != 0: raise Exception(f"Unable to remove previously installed package: {err}") return self._download_and_install(target_path) def _download_and_install(self, target_path=None): success = False for download_url in pkg_asset_download_urls(self.pkg_tar_url): response = requests.get(download_url, stream=True) if response.status_code == 200: success = True break if success: with tempfile.TemporaryDirectory() as tmpdirname: local_tar_path = os.path.join(tmpdirname, os.path.basename(self.pkg_tar_url)) with open(local_tar_path, 'wb') as f: for chunk in response.raw.stream(1024, decode_content=False): if chunk: f.write(chunk) cmd = f"mkdir -p {target_path} && " \ f"tar -xzf {local_tar_path} -C {target_path} && " \ f"cd {target_path} && ln -s ../../../../../wfpr_modules . && " \ "cd tests && ln -s ../wfpr_modules ." out, err, ret = run_cmd(cmd) if ret != 0: run_cmd(f"rm -fr {target_path}") # undo partial installation raise Exception(f"Package downloaded but installation failed: {err}") return target_path # return the path the package was installed else: raise Exception(f"Looks like this package has not been released: {self.pkg_uri}") def __repr__(self): return self.pkg_uri def _init_deps(self, dependencies=[], devDependencies=[]): # some basic validation if len(dependencies) != len(set(dependencies)): raise Exception(f"Duplicated dependencies found: {', '.join(dependencies)}") else: dependencies = set(dependencies) if len(devDependencies) != len(set(devDependencies)): raise Exception(f"Duplicated devDependencies found: {', '.join(devDependencies)}") else: devDependencies = set(devDependencies) if dependencies.intersection(devDependencies): raise Exception("Dependency duplicated in 'dependencies' and 'devDependencies': " f"{ ', '.join(dependencies.intersection(devDependencies)) }") allDependencies = dependencies.union(devDependencies) for dep_pkg_uri in allDependencies: try: pkg_uri_parser(dep_pkg_uri) # make sure pkg_uri format is valid, although we don't use the return values except Exception as ex: raise Exception(f"Invalid dependency: {dep_pkg_uri}. Message: {ex}") self.dependencies = dependencies self.devDependencies = devDependencies # at this stage, let's just treat all dependencies the same way, # later may need to handle devDep differently self.allDependencies = self.dependencies.union(self.devDependencies)
import os import sys import re import logging from packaging import version from configparser import ConfigParser from tempfile import mkstemp from shutil import move, copymode from os import fdopen, remove from pathlib import Path from git import Repo from datetime import datetime from rich import print from mlf_core.create.github_support import is_git_repo from mlf_core.lint.template_linter import TemplateLinter from mlf_core.custom_cli.questionary import mlf_core_questionary_or_dot_mlf_core log = logging.getLogger(__name__) class VersionBumper: """ Responsible for bumping the version across a mlf-core project """ def __init__(self, project_dir, downgrade): self.parser = ConfigParser() self.parser.read(f'{project_dir}/mlf_core.cfg') self.CURRENT_VERSION = self.parser.get('bumpversion', 'current_version') self.downgrade_mode = downgrade self.top_level_dir = project_dir def bump_template_version(self, new_version: str, project_dir: Path) -> None: """ Update the version number for all files that are whitelisted in the config file. INFO on valid versions: All versions must match the format like 1.0.0 or 1.1.0-SNAPSHOT; these are the only valid version formats mlf-core allows. A valid version therefore contains a three digits (in the range from 0 to however large it will grow) separated by two dots. Optional is the -SNAPSHOT at the end (for JVM templates especially). NOTE that versions like 1.2.3.4 or 1.2 WILL NOT be recognized as valid versions as well as no substring of them will be recognized. :param new_version: The new version number that should replace the old one in a mlf-core project :param project_dir: The default value is the current working directory, so we´re initially assuming the user bumps the version from the projects top level directory. If this is not the case this parameter shows the path where the projects top level directory is and bumps the version there """ log.debug(f'Current version: {self.CURRENT_VERSION} --- New version: {new_version}') sections = ['bumpversion_files_whitelisted', 'bumpversion_files_blacklisted'] # if project_dir was given as handle use cwd since we need it for git add ct_cfg_path = f'{str(project_dir)}/mlf_core.cfg' if str(project_dir).startswith(str(Path.cwd())) else \ f'{str(Path.cwd())}/{project_dir}/mlf_core.cfg' # path to CHANGELOG.rst file changelog_path = f'{str(project_dir)}/CHANGELOG.rst' if str(project_dir).startswith(str(Path.cwd())) else \ f'{str(Path.cwd())}/{project_dir}/CHANGELOG.rst' # keep path of all files that were changed during bump version changed_files = [ct_cfg_path, changelog_path] print(f'[bold blue]Changing version number.\nCurrent version is {self.CURRENT_VERSION}.' f'\nNew version will be {new_version}\n') # for each section (whitelisted and blacklisted files) bump the version (if allowed) for section in sections: log.debug(f'Bumping files of section: {section}.') for file, path in self.parser.items(section): not_changed, file_path = self.replace(f'{project_dir}/{path}', new_version, section) # only add file if the version(s) in the file were bumped if not not_changed: path_changed = file_path if file_path.startswith(str(Path.cwd())) else f'{str(Path.cwd())}/{file_path}' changed_files.append(path_changed) # update new version in mlf_core.cfg file log.debug('Updating version in mlf_core.cfg file.') self.parser.set('bumpversion', 'current_version', new_version) with open(f'{project_dir}/mlf_core.cfg', 'w') as configfile: self.parser.write(configfile) # add a new changelog section when downgrade mode is disabled self.add_changelog_section(new_version) # check if a project is a git repository and if so, commit bumped version changes if is_git_repo(project_dir): repo = Repo(project_dir) # git add print('[bold blue]Staging template') repo.git.add(changed_files) # git commit print('[bold blue]Committing changes to local git repository.') repo.index.commit(f'Bump version from {self.CURRENT_VERSION} to {new_version}') def replace(self, file_path: str, subst: str, section: str) -> (bool, str): """ Replace a version with the new version unless the line is explicitly excluded (marked with <<MLF-CORE_NO_BUMP>>). Or, in case of blacklisted files, it ignores all lines with version numbers unless they´re explicitly marked for bump with tag <<MLF-CORE_FORCE_BUMP>>. :param file_path: The path of the file where the version should be updated :param subst: The new version that replaces the old one :param section: The current section (whitelisted or blacklisted files) :return: Whether a file has been changed during bumped and the path of changed file """ # flag that indicates whether no changes were made inside a file file_is_unchanged = True path_changed = '' # Create temp file fh, abs_path = mkstemp() with fdopen(fh, 'w') as new_file: with open(file_path) as old_file: for line in old_file: # update version if tags were found (and were in the right section) if ('<<MLF-CORE_NO_BUMP>>' not in line and not section == 'bumpversion_files_blacklisted') or '<<MLF-CORE_FORCE_BUMP>>' in line: # for info on this regex, see bump_template docstring above tmp = re.sub(r'(?<!\.)\d+(?:\.\d+){2}(?:-SNAPSHOT)?(?!\.)', subst, line) new_file.write(tmp) if tmp != line: if file_is_unchanged: print(f'[bold blue]Updating version number in {file_path}') file_is_unchanged = False path_changed = file_path print(f'[bold red]- {line.strip().replace('<!-- <<MLF-CORE_FORCE_BUMP>> -->', '')}\n' + f'[bold green]+ {tmp.strip().replace('<!-- <<MLF-CORE_FORCE_BUMP>> -->', '')}') print() else: new_file.write(line) # Copy the file permissions from the old file to the new file copymode(file_path, abs_path) # Remove original file remove(file_path) # Move new file move(abs_path, file_path) return file_is_unchanged, path_changed def can_run_bump_version(self, new_version: str, project_dir: str) -> bool: """ Ensure that all requirements are met, so that the bump version command can be run successfully. This included the following requirements: 1.) The new version number matches the format like 1.1.0 or 1.1.0-SNAPSHOT required by mlf-core versions 2.) The new version is greater than the current one 3.) The project is a mlf-core project :param new_version: The new version :param project_dir: The directory of the project :return: True if bump version can be run, false otherwise. """ # ensure that the entered version number matches correct format like 1.1.0 or 1.1.0-SNAPSHOT but not 1.2 or 1.2.3.4 if not re.match(r'(?<!\.)\d+(?:\.\d+){2}((?!.)|-SNAPSHOT)(?!.)', new_version): print('[bold red]Invalid version specified!\nEnsure your version number has the form ' 'of 0.0.0 or 15.100.239-SNAPSHOT') return False # ensure the version is bumped within a project created by mlf-core elif not Path(f'{project_dir}/mlf_core.cfg').is_file(): print('[bold red]Did not find a mlf_core.cfg file. Make sure you are in the right directory ' 'or specify the path to your projects bump_version.cfg file') return False # equal versions won't be accepted for bump-version elif new_version == self.CURRENT_VERSION: print(f'[bold red]The new version {new_version} cannot be equal to the current version {self.CURRENT_VERSION}.') return False # only allow bump from a SNAPSHOT version to its correspondence with -SNAPSHOT removed (like 1.0.0-SNAPSHOT to 1.0.0 but not 2.0.0) elif self.CURRENT_VERSION.endswith('-SNAPSHOT') and not self.CURRENT_VERSION.split('-')[0] == new_version: print(f'[bold red]Cannot bump {self.CURRENT_VERSION} to {new_version}.' + f'[blue]\n{self.CURRENT_VERSION} as a SNAPSHOT version can only be bumped to its non-snapshot equivalent ' f'{self.CURRENT_VERSION.split('-')[0]}.') return False # ensure the new version is greater than the current one, if not the user wants to explicitly downgrade it elif not self.downgrade_mode: current_version_r = self.CURRENT_VERSION.replace('-SNAPSHOT', '') new_version_r = new_version.replace('-SNAPSHOT', '') # bump from x.x.x to x.x.x-SNAPSHOT should be only allowed when using the downgrade flag if new_version.endswith('-SNAPSHOT') and self.CURRENT_VERSION == new_version.split('-')[0]: print(f'[bold red]Cannot downgrade {self.CURRENT_VERSION} to its version SNAPSHOT {new_version}.' + f'[blue]\nUse the -d flag if you want to downgrade {self.CURRENT_VERSION} to its SNAPSHOT version.') return False # when the current version and the new version are equal, but one is a -SNAPSHOT version return true elif version.parse(current_version_r) == version.parse(new_version_r) and ('-SNAPSHOT' in self.CURRENT_VERSION or '-SNAPSHOT' in new_version): return True # else check if the new version is greater than the current version elif version.parse(current_version_r) < version.parse(new_version_r): return True # the new version is not greater than the current one print(f'[bold red]The new version {new_version} is not greater than the current version {self.CURRENT_VERSION}.' f'\nThe new version must be greater than the old one.') return False return True def check_bump_range(self, current_version: str, new_version: str) -> bool: """ Check if the new version seems to be a reasonable bump or not (ignored when using the downgrade flag). This should not break the bump-version process, but it requires confirmation of the user. :param current_version: The current version :param new_version: The new version :return: If it´s a reasonable bump """ cur_v_split = current_version.split('.') new_v_split = new_version.split('.') # major update like bumping from 1.8.3 to 2.0.0 if new_v_split[0] != cur_v_split[0]: log.debug('Identified major version bump') return new_v_split[1] == '0' and new_v_split[2] == '0' and (int(new_v_split[0]) - int(cur_v_split[0]) == 1) # minor update like bumping from 1.8.5 to 1.9.0 elif new_v_split[1] != cur_v_split[1]: log.debug('Identified minor version bump') return new_v_split[0] == cur_v_split[0] and new_v_split[2] == '0' and (int(new_v_split[1]) - int(cur_v_split[1]) == 1) # x-minor update like bumping from 1.8.5 to 1.8.6 elif new_v_split[2] != cur_v_split[2]: log.debug('Identified patch version bump') return new_v_split[0] == cur_v_split[0] and new_v_split[1] == cur_v_split[1] and (int(new_v_split[2]) - int(cur_v_split[2]) == 1) # case when we bumping like 3.0.0-SNAPSHOT to 3.0.0 log.debug('Identified SNAPSHOT version bump') return True def lint_before_bump(self) -> None: """ Lint the changelog prior to bumping. Linting consists of two major points (beside checking if a CHANGELOG.rst file even exists at top level directory). 1. Lint CHANGELOG.rst to ensure that bump-version can safely add a new section 2. Check, whether all versions are consistent over the project """ changelog_linter = TemplateLinter(path=self.top_level_dir) changelog_path = os.path.join(self.top_level_dir, 'CHANGELOG.rst') # ensure changelog exists, else abort if not os.path.exists(changelog_path): print(f'[bold red]No file named CHANGELOG.rst found at {self.top_level_dir}. Aborting!') sys.exit(1) # lint changelog and check version consistency log.debug('Linting changelog') changelog_linter.lint_changelog() log.debug('Linting version consistent') changelog_linter.check_version_consistent() print() changelog_linter._print_results() print() # if any failed linting tests, ask user for confirmation of proceeding with bump (which results in undefined behavior) if len(changelog_linter.failed) > 0 or len(changelog_linter.warned) > 0: # ask for confirmation if the user really wants to proceed bumping when linting failed print('[bold red]Changelog linting and/or version check failed!\nYou can fix them and try bumping again. Proceeding bump will result in ' 'undefined behavior!') if not mlf_core_questionary_or_dot_mlf_core(function='confirm', question='Do you really want to continue?', default='n'): sys.exit(1) def add_changelog_section(self, new_version: str) -> None: """ Each version bump will add a new section template to the CHANGELOG.rst :param new_version: The new version """ log.debug('Adding new changelog section.') if self.downgrade_mode: print('[bold yellow]WARNING: Running bump-version in downgrade mode will not add a new changelog section currently!') else: date = datetime.today().strftime("%Y-%m-%d") # replace the SNAPSHOT SECTION header with its non-snapshot correlate if self.CURRENT_VERSION.endswith('-SNAPSHOT'): self.replace_snapshot_header(f'{self.top_level_dir}/CHANGELOG.rst', new_version, date) else: # the section template for a new changelog section nl = '\n' section = f'{new_version} ({date}){nl}{'-' * (len(new_version) + len(date) + 3)}{nl}{nl}' \ f'{f'**{nl}{nl}'.join(['**Added', '**Fixed', '**Dependencies', '**Deprecated**'])}' self.insert_latest_version_section(old_changelog_file=f'{self.top_level_dir}/CHANGELOG.rst', section=section) def replace_snapshot_header(self, source_file_path: str, new_version: str, date: str) -> None: """ Replace the SNAPSHOT header section in CHANGELOG. The pattern (currently) cannot include any newline characters, therefore no multiline support! :param source_file_path: Path to source file (the path where CHANGELOG lies) :param new_version: The new version :param date: Current date """ log.debug('Replacing the changelog header in the changelog file.') # create a temp file (requires to be explicitly deleted later) fh, target_file_path = mkstemp() # read from old file (the source file) and write into new file (the target file) with open(target_file_path, 'w') as target_file: with open(source_file_path, 'r') as source_file: for line in source_file: pattern, subst = '', '' # check if the line is a header section with SNAPSHOT version if re.match(r'^(?<!\.)\d+(?:\.\d+){2}(?!\.)-SNAPSHOT \(\d\d\d\d-\d\d-\d\d\)$', line): dotted_snapshot_line = source_file.readline() next_new_line = source_file.readline() # noqa: F841 necessary to omit an additional newline snapshot_date = line.split('(')[1][:-2] # extract date of SNAPSHOT version adding pattern = f'{self.CURRENT_VERSION} ({snapshot_date})' subst = f'{new_version} ({date})\n{(len(new_version) + len(date) + 3) * '-'}' # replace -SNASPHOT in the header and adjust the dotted line below to the new header length target_file.write(line.replace(pattern, subst)) target_file.write(dotted_snapshot_line.replace('-', '')) else: # else just write the line to the new file target_file.write(line.replace(pattern, subst)) # remove old file remove(source_file_path) # move new file to replace old file move(target_file_path, source_file_path) def insert_latest_version_section(self, old_changelog_file: str, section: str) -> None: """ Insert the new changelog section as the latest section right after the header :param old_changelog_file: path to the current CHANGELOG.rst file :param section: the new section template block for changelog """ log.debug('Inserting latest version section into the changelog.') # create a temp file (requires to be explicitly deleted later) fh, target_file_path = mkstemp() # read from old file (the source file) and write into new file (the target file) with open(target_file_path, 'w') as target_file: with open(old_changelog_file, 'r') as source_file: for line in source_file: # check if the line is the header section with the latest version if re.match(rf'^{self.CURRENT_VERSION} \(\d\d\d\d-\d\d-\d\d\)$', line): target_file.write(f'{section}\n\n\n') target_file.write(line) # remove old file remove(old_changelog_file) # move new file to replace old file move(target_file_path, old_changelog_file)
import os import sys import re import logging from packaging import version from configparser import ConfigParser from tempfile import mkstemp from shutil import move, copymode from os import fdopen, remove from pathlib import Path from git import Repo from datetime import datetime from rich import print from mlf_core.create.github_support import is_git_repo from mlf_core.lint.template_linter import TemplateLinter from mlf_core.custom_cli.questionary import mlf_core_questionary_or_dot_mlf_core log = logging.getLogger(__name__) class VersionBumper: """ Responsible for bumping the version across a mlf-core project """ def __init__(self, project_dir, downgrade): self.parser = ConfigParser() self.parser.read(f'{project_dir}/mlf_core.cfg') self.CURRENT_VERSION = self.parser.get('bumpversion', 'current_version') self.downgrade_mode = downgrade self.top_level_dir = project_dir def bump_template_version(self, new_version: str, project_dir: Path) -> None: """ Update the version number for all files that are whitelisted in the config file. INFO on valid versions: All versions must match the format like 1.0.0 or 1.1.0-SNAPSHOT; these are the only valid version formats mlf-core allows. A valid version therefore contains a three digits (in the range from 0 to however large it will grow) separated by two dots. Optional is the -SNAPSHOT at the end (for JVM templates especially). NOTE that versions like 1.2.3.4 or 1.2 WILL NOT be recognized as valid versions as well as no substring of them will be recognized. :param new_version: The new version number that should replace the old one in a mlf-core project :param project_dir: The default value is the current working directory, so we´re initially assuming the user bumps the version from the projects top level directory. If this is not the case this parameter shows the path where the projects top level directory is and bumps the version there """ log.debug(f'Current version: {self.CURRENT_VERSION} --- New version: {new_version}') sections = ['bumpversion_files_whitelisted', 'bumpversion_files_blacklisted'] # if project_dir was given as handle use cwd since we need it for git add ct_cfg_path = f'{str(project_dir)}/mlf_core.cfg' if str(project_dir).startswith(str(Path.cwd())) else \ f'{str(Path.cwd())}/{project_dir}/mlf_core.cfg' # path to CHANGELOG.rst file changelog_path = f'{str(project_dir)}/CHANGELOG.rst' if str(project_dir).startswith(str(Path.cwd())) else \ f'{str(Path.cwd())}/{project_dir}/CHANGELOG.rst' # keep path of all files that were changed during bump version changed_files = [ct_cfg_path, changelog_path] print(f'[bold blue]Changing version number.\nCurrent version is {self.CURRENT_VERSION}.' f'\nNew version will be {new_version}\n') # for each section (whitelisted and blacklisted files) bump the version (if allowed) for section in sections: log.debug(f'Bumping files of section: {section}.') for file, path in self.parser.items(section): not_changed, file_path = self.replace(f'{project_dir}/{path}', new_version, section) # only add file if the version(s) in the file were bumped if not not_changed: path_changed = file_path if file_path.startswith(str(Path.cwd())) else f'{str(Path.cwd())}/{file_path}' changed_files.append(path_changed) # update new version in mlf_core.cfg file log.debug('Updating version in mlf_core.cfg file.') self.parser.set('bumpversion', 'current_version', new_version) with open(f'{project_dir}/mlf_core.cfg', 'w') as configfile: self.parser.write(configfile) # add a new changelog section when downgrade mode is disabled self.add_changelog_section(new_version) # check if a project is a git repository and if so, commit bumped version changes if is_git_repo(project_dir): repo = Repo(project_dir) # git add print('[bold blue]Staging template') repo.git.add(changed_files) # git commit print('[bold blue]Committing changes to local git repository.') repo.index.commit(f'Bump version from {self.CURRENT_VERSION} to {new_version}') def replace(self, file_path: str, subst: str, section: str) -> (bool, str): """ Replace a version with the new version unless the line is explicitly excluded (marked with <<MLF-CORE_NO_BUMP>>). Or, in case of blacklisted files, it ignores all lines with version numbers unless they´re explicitly marked for bump with tag <<MLF-CORE_FORCE_BUMP>>. :param file_path: The path of the file where the version should be updated :param subst: The new version that replaces the old one :param section: The current section (whitelisted or blacklisted files) :return: Whether a file has been changed during bumped and the path of changed file """ # flag that indicates whether no changes were made inside a file file_is_unchanged = True path_changed = '' # Create temp file fh, abs_path = mkstemp() with fdopen(fh, 'w') as new_file: with open(file_path) as old_file: for line in old_file: # update version if tags were found (and were in the right section) if ('<<MLF-CORE_NO_BUMP>>' not in line and not section == 'bumpversion_files_blacklisted') or '<<MLF-CORE_FORCE_BUMP>>' in line: # for info on this regex, see bump_template docstring above tmp = re.sub(r'(?<!\.)\d+(?:\.\d+){2}(?:-SNAPSHOT)?(?!\.)', subst, line) new_file.write(tmp) if tmp != line: if file_is_unchanged: print(f'[bold blue]Updating version number in {file_path}') file_is_unchanged = False path_changed = file_path print(f'[bold red]- {line.strip().replace("<!-- <<MLF-CORE_FORCE_BUMP>> -->", "")}\n' + f'[bold green]+ {tmp.strip().replace("<!-- <<MLF-CORE_FORCE_BUMP>> -->", "")}') print() else: new_file.write(line) # Copy the file permissions from the old file to the new file copymode(file_path, abs_path) # Remove original file remove(file_path) # Move new file move(abs_path, file_path) return file_is_unchanged, path_changed def can_run_bump_version(self, new_version: str, project_dir: str) -> bool: """ Ensure that all requirements are met, so that the bump version command can be run successfully. This included the following requirements: 1.) The new version number matches the format like 1.1.0 or 1.1.0-SNAPSHOT required by mlf-core versions 2.) The new version is greater than the current one 3.) The project is a mlf-core project :param new_version: The new version :param project_dir: The directory of the project :return: True if bump version can be run, false otherwise. """ # ensure that the entered version number matches correct format like 1.1.0 or 1.1.0-SNAPSHOT but not 1.2 or 1.2.3.4 if not re.match(r'(?<!\.)\d+(?:\.\d+){2}((?!.)|-SNAPSHOT)(?!.)', new_version): print('[bold red]Invalid version specified!\nEnsure your version number has the form ' 'of 0.0.0 or 15.100.239-SNAPSHOT') return False # ensure the version is bumped within a project created by mlf-core elif not Path(f'{project_dir}/mlf_core.cfg').is_file(): print('[bold red]Did not find a mlf_core.cfg file. Make sure you are in the right directory ' 'or specify the path to your projects bump_version.cfg file') return False # equal versions won't be accepted for bump-version elif new_version == self.CURRENT_VERSION: print(f'[bold red]The new version {new_version} cannot be equal to the current version {self.CURRENT_VERSION}.') return False # only allow bump from a SNAPSHOT version to its correspondence with -SNAPSHOT removed (like 1.0.0-SNAPSHOT to 1.0.0 but not 2.0.0) elif self.CURRENT_VERSION.endswith('-SNAPSHOT') and not self.CURRENT_VERSION.split('-')[0] == new_version: print(f'[bold red]Cannot bump {self.CURRENT_VERSION} to {new_version}.' + f'[blue]\n{self.CURRENT_VERSION} as a SNAPSHOT version can only be bumped to its non-snapshot equivalent ' f'{self.CURRENT_VERSION.split("-")[0]}.') return False # ensure the new version is greater than the current one, if not the user wants to explicitly downgrade it elif not self.downgrade_mode: current_version_r = self.CURRENT_VERSION.replace('-SNAPSHOT', '') new_version_r = new_version.replace('-SNAPSHOT', '') # bump from x.x.x to x.x.x-SNAPSHOT should be only allowed when using the downgrade flag if new_version.endswith('-SNAPSHOT') and self.CURRENT_VERSION == new_version.split('-')[0]: print(f'[bold red]Cannot downgrade {self.CURRENT_VERSION} to its version SNAPSHOT {new_version}.' + f'[blue]\nUse the -d flag if you want to downgrade {self.CURRENT_VERSION} to its SNAPSHOT version.') return False # when the current version and the new version are equal, but one is a -SNAPSHOT version return true elif version.parse(current_version_r) == version.parse(new_version_r) and ('-SNAPSHOT' in self.CURRENT_VERSION or '-SNAPSHOT' in new_version): return True # else check if the new version is greater than the current version elif version.parse(current_version_r) < version.parse(new_version_r): return True # the new version is not greater than the current one print(f'[bold red]The new version {new_version} is not greater than the current version {self.CURRENT_VERSION}.' f'\nThe new version must be greater than the old one.') return False return True def check_bump_range(self, current_version: str, new_version: str) -> bool: """ Check if the new version seems to be a reasonable bump or not (ignored when using the downgrade flag). This should not break the bump-version process, but it requires confirmation of the user. :param current_version: The current version :param new_version: The new version :return: If it´s a reasonable bump """ cur_v_split = current_version.split('.') new_v_split = new_version.split('.') # major update like bumping from 1.8.3 to 2.0.0 if new_v_split[0] != cur_v_split[0]: log.debug('Identified major version bump') return new_v_split[1] == '0' and new_v_split[2] == '0' and (int(new_v_split[0]) - int(cur_v_split[0]) == 1) # minor update like bumping from 1.8.5 to 1.9.0 elif new_v_split[1] != cur_v_split[1]: log.debug('Identified minor version bump') return new_v_split[0] == cur_v_split[0] and new_v_split[2] == '0' and (int(new_v_split[1]) - int(cur_v_split[1]) == 1) # x-minor update like bumping from 1.8.5 to 1.8.6 elif new_v_split[2] != cur_v_split[2]: log.debug('Identified patch version bump') return new_v_split[0] == cur_v_split[0] and new_v_split[1] == cur_v_split[1] and (int(new_v_split[2]) - int(cur_v_split[2]) == 1) # case when we bumping like 3.0.0-SNAPSHOT to 3.0.0 log.debug('Identified SNAPSHOT version bump') return True def lint_before_bump(self) -> None: """ Lint the changelog prior to bumping. Linting consists of two major points (beside checking if a CHANGELOG.rst file even exists at top level directory). 1. Lint CHANGELOG.rst to ensure that bump-version can safely add a new section 2. Check, whether all versions are consistent over the project """ changelog_linter = TemplateLinter(path=self.top_level_dir) changelog_path = os.path.join(self.top_level_dir, 'CHANGELOG.rst') # ensure changelog exists, else abort if not os.path.exists(changelog_path): print(f'[bold red]No file named CHANGELOG.rst found at {self.top_level_dir}. Aborting!') sys.exit(1) # lint changelog and check version consistency log.debug('Linting changelog') changelog_linter.lint_changelog() log.debug('Linting version consistent') changelog_linter.check_version_consistent() print() changelog_linter._print_results() print() # if any failed linting tests, ask user for confirmation of proceeding with bump (which results in undefined behavior) if len(changelog_linter.failed) > 0 or len(changelog_linter.warned) > 0: # ask for confirmation if the user really wants to proceed bumping when linting failed print('[bold red]Changelog linting and/or version check failed!\nYou can fix them and try bumping again. Proceeding bump will result in ' 'undefined behavior!') if not mlf_core_questionary_or_dot_mlf_core(function='confirm', question='Do you really want to continue?', default='n'): sys.exit(1) def add_changelog_section(self, new_version: str) -> None: """ Each version bump will add a new section template to the CHANGELOG.rst :param new_version: The new version """ log.debug('Adding new changelog section.') if self.downgrade_mode: print('[bold yellow]WARNING: Running bump-version in downgrade mode will not add a new changelog section currently!') else: date = datetime.today().strftime("%Y-%m-%d") # replace the SNAPSHOT SECTION header with its non-snapshot correlate if self.CURRENT_VERSION.endswith('-SNAPSHOT'): self.replace_snapshot_header(f'{self.top_level_dir}/CHANGELOG.rst', new_version, date) else: # the section template for a new changelog section nl = '\n' section = f'{new_version} ({date}){nl}{"-" * (len(new_version) + len(date) + 3)}{nl}{nl}' \ f'{f"**{nl}{nl}".join(["**Added", "**Fixed", "**Dependencies", "**Deprecated**"])}' self.insert_latest_version_section(old_changelog_file=f'{self.top_level_dir}/CHANGELOG.rst', section=section) def replace_snapshot_header(self, source_file_path: str, new_version: str, date: str) -> None: """ Replace the SNAPSHOT header section in CHANGELOG. The pattern (currently) cannot include any newline characters, therefore no multiline support! :param source_file_path: Path to source file (the path where CHANGELOG lies) :param new_version: The new version :param date: Current date """ log.debug('Replacing the changelog header in the changelog file.') # create a temp file (requires to be explicitly deleted later) fh, target_file_path = mkstemp() # read from old file (the source file) and write into new file (the target file) with open(target_file_path, 'w') as target_file: with open(source_file_path, 'r') as source_file: for line in source_file: pattern, subst = '', '' # check if the line is a header section with SNAPSHOT version if re.match(r'^(?<!\.)\d+(?:\.\d+){2}(?!\.)-SNAPSHOT \(\d\d\d\d-\d\d-\d\d\)$', line): dotted_snapshot_line = source_file.readline() next_new_line = source_file.readline() # noqa: F841 necessary to omit an additional newline snapshot_date = line.split('(')[1][:-2] # extract date of SNAPSHOT version adding pattern = f'{self.CURRENT_VERSION} ({snapshot_date})' subst = f'{new_version} ({date})\n{(len(new_version) + len(date) + 3) * "-"}' # replace -SNASPHOT in the header and adjust the dotted line below to the new header length target_file.write(line.replace(pattern, subst)) target_file.write(dotted_snapshot_line.replace('-', '')) else: # else just write the line to the new file target_file.write(line.replace(pattern, subst)) # remove old file remove(source_file_path) # move new file to replace old file move(target_file_path, source_file_path) def insert_latest_version_section(self, old_changelog_file: str, section: str) -> None: """ Insert the new changelog section as the latest section right after the header :param old_changelog_file: path to the current CHANGELOG.rst file :param section: the new section template block for changelog """ log.debug('Inserting latest version section into the changelog.') # create a temp file (requires to be explicitly deleted later) fh, target_file_path = mkstemp() # read from old file (the source file) and write into new file (the target file) with open(target_file_path, 'w') as target_file: with open(old_changelog_file, 'r') as source_file: for line in source_file: # check if the line is the header section with the latest version if re.match(rf'^{self.CURRENT_VERSION} \(\d\d\d\d-\d\d-\d\d\)$', line): target_file.write(f'{section}\n\n\n') target_file.write(line) # remove old file remove(old_changelog_file) # move new file to replace old file move(target_file_path, old_changelog_file)
import pytest import vcr from app import APP, cli RUNNER = APP.test_cli_runner() # https://flask.palletsprojects.com/en/1.1.x/testing/#testing-cli-commands @vcr.use_cassette('tests/fixtures/vcr_cassettes/import_data.yml') def _test_import_data(): result = RUNNER.invoke(cli.import_data, ['--date', APP.config.get('START_DATE')]) assert f"imported for {APP.config.get("START_DATE")}" in result.output @vcr.use_cassette('tests/fixtures/vcr_cassettes/import_data.yml') def _test_reimport_data(): result = RUNNER.invoke(cli.reimport_data) assert f"completed queueing" in result.output
import pytest import vcr from app import APP, cli RUNNER = APP.test_cli_runner() # https://flask.palletsprojects.com/en/1.1.x/testing/#testing-cli-commands @vcr.use_cassette('tests/fixtures/vcr_cassettes/import_data.yml') def _test_import_data(): result = RUNNER.invoke(cli.import_data, ['--date', APP.config.get('START_DATE')]) assert f"imported for {APP.config.get('START_DATE')}" in result.output @vcr.use_cassette('tests/fixtures/vcr_cassettes/import_data.yml') def _test_reimport_data(): result = RUNNER.invoke(cli.reimport_data) assert f"completed queueing" in result.output
"""Utilities for submitting and starting Work Queue workers. """ import argparse import os import re import subprocess from shadho.configuration import ShadhoConfig def parse_args(args=None): p = argparse.ArgumentParser( description='Start a Work Queue worker and connect to SHADHO.') p.add_argument('-M', '--master', type=str, default='', help='name of the Work Queue master to connect to') p.add_argument('-u', '--user', type=str, default=os.environ['USER'], help='name of the user running the Work Queue master') p.add_argument('-t', '--timeout', type=int, default=900, help='amount of time worker idles before exiting') p.add_argument('--cores', type=int, default=1, help='the number of cores for the worker to use;' + ' pass 0 to use all available cores') p.add_argument('--feature', type=str, nargs='*', default=[], help='user specified feature to advertise, e.g. GPU model name') return p.parse_args(args) def shadho_wq_worker(args=None, config=None): """Start a Work Queue worker.""" if config is None: config = ShadhoConfig() if args is None: cmd_args = '' else: cmd_args = f'{'-M' if args.master else ''} {args.master} --cores {args.cores} --timeout {args.timeout}' for feature in args.feature: cmd_args += f' --feature {feature}' if not re.search(r'(^|[\s])-M([\s]|$)', cmd_args): cmd_args = ' '.join([cmd_args, '-M', config.workqueue.name]).strip() if not re.search(r'[\s]*-M[\s][\S]*' + args.user + r'.*[\s]*', cmd_args): print('Replacing') cmd_args = re.sub(r'(^|[\s]*)(.*-M[\s])([\S]+)([\s]*.*$)', r'\1\2\3-' + args.user + r'\4', cmd_args) executable = os.path.join(config.shadho_dir, 'bin', 'work_queue_worker') print(cmd_args) subprocess.run([executable] + cmd_args.split(), stderr=subprocess.STDOUT) def main(): args = parse_args() shadho_wq_worker(args=args) if __name__ == '__main__': main()
"""Utilities for submitting and starting Work Queue workers. """ import argparse import os import re import subprocess from shadho.configuration import ShadhoConfig def parse_args(args=None): p = argparse.ArgumentParser( description='Start a Work Queue worker and connect to SHADHO.') p.add_argument('-M', '--master', type=str, default='', help='name of the Work Queue master to connect to') p.add_argument('-u', '--user', type=str, default=os.environ['USER'], help='name of the user running the Work Queue master') p.add_argument('-t', '--timeout', type=int, default=900, help='amount of time worker idles before exiting') p.add_argument('--cores', type=int, default=1, help='the number of cores for the worker to use;' + ' pass 0 to use all available cores') p.add_argument('--feature', type=str, nargs='*', default=[], help='user specified feature to advertise, e.g. GPU model name') return p.parse_args(args) def shadho_wq_worker(args=None, config=None): """Start a Work Queue worker.""" if config is None: config = ShadhoConfig() if args is None: cmd_args = '' else: cmd_args = f'{"-M" if args.master else ""} {args.master} --cores {args.cores} --timeout {args.timeout}' for feature in args.feature: cmd_args += f' --feature {feature}' if not re.search(r'(^|[\s])-M([\s]|$)', cmd_args): cmd_args = ' '.join([cmd_args, '-M', config.workqueue.name]).strip() if not re.search(r'[\s]*-M[\s][\S]*' + args.user + r'.*[\s]*', cmd_args): print('Replacing') cmd_args = re.sub(r'(^|[\s]*)(.*-M[\s])([\S]+)([\s]*.*$)', r'\1\2\3-' + args.user + r'\4', cmd_args) executable = os.path.join(config.shadho_dir, 'bin', 'work_queue_worker') print(cmd_args) subprocess.run([executable] + cmd_args.split(), stderr=subprocess.STDOUT) def main(): args = parse_args() shadho_wq_worker(args=args) if __name__ == '__main__': main()
from Funcoes.banco import conexao from Model.Veiculo import Veiculo from Model.Vendas_Header import Vendas_Header from Model.Cliente import Cliente class Pendencias: def __init__(self, pend_id="", cliente: Cliente = "", venda: Vendas_Header = "", veiculo: Veiculo = "", datahora="", valor=""): self.id = pend_id self.cliente = cliente self.venda = venda self.veiculo = veiculo self.datahora = datahora self.valor = valor def update(self): conn = conexao() cur = conn.cursor() cur.execute(f""" UPDATE pendencias SET pend_clie_id = {self.cliente.id}, pend_veic_placa = \'{self.veiculo.placa}\', pend_valor = {self.valor} WHERE pend_venda_id = {self.venda.id} """) conn.commit() cur.close() conn.close() def inserir(self): conn = conexao() cur = conn.cursor() cur.execute( f"INSERT INTO pendencias (pend_clie_id, pend_venda_id, pend_veic_placa, pend_datahora, pend_valor) " f"VALUES " f"({self.cliente.id}, {self.venda.id}, \"{self.veiculo.placa if self.veiculo.placa else "null"}\', " f"\'{self.datahora}\', {self.valor})") conn.commit() cur.close() conn.close() def delete(self): conn = conexao() cur = conn.cursor() cur.execute(f"DELETE FROM pendencias WHERE pend_venda_id = {self.venda.id}") conn.commit() cur.close() conn.close() @staticmethod def todas_pendencias(): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id") row = cur.fetchall() conn.commit() cur.close() conn.close() return row def busca_pendencias_by_cliente(self, op): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id" f"AND clie_id {op} {self.cliente.id}") conn.commit() cur.close() conn.close() def busca_pendencias_by_id(self): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id" f"AND pend_id = {self.id}") conn.commit() cur.close() conn.close()
from Funcoes.banco import conexao from Model.Veiculo import Veiculo from Model.Vendas_Header import Vendas_Header from Model.Cliente import Cliente class Pendencias: def __init__(self, pend_id="", cliente: Cliente = "", venda: Vendas_Header = "", veiculo: Veiculo = "", datahora="", valor=""): self.id = pend_id self.cliente = cliente self.venda = venda self.veiculo = veiculo self.datahora = datahora self.valor = valor def update(self): conn = conexao() cur = conn.cursor() cur.execute(f""" UPDATE pendencias SET pend_clie_id = {self.cliente.id}, pend_veic_placa = \'{self.veiculo.placa}\', pend_valor = {self.valor} WHERE pend_venda_id = {self.venda.id} """) conn.commit() cur.close() conn.close() def inserir(self): conn = conexao() cur = conn.cursor() cur.execute( f"INSERT INTO pendencias (pend_clie_id, pend_venda_id, pend_veic_placa, pend_datahora, pend_valor) " f"VALUES " f"({self.cliente.id}, {self.venda.id}, \'{self.veiculo.placa if self.veiculo.placa else 'null'}\', " f"\'{self.datahora}\', {self.valor})") conn.commit() cur.close() conn.close() def delete(self): conn = conexao() cur = conn.cursor() cur.execute(f"DELETE FROM pendencias WHERE pend_venda_id = {self.venda.id}") conn.commit() cur.close() conn.close() @staticmethod def todas_pendencias(): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id") row = cur.fetchall() conn.commit() cur.close() conn.close() return row def busca_pendencias_by_cliente(self, op): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id" f"AND clie_id {op} {self.cliente.id}") conn.commit() cur.close() conn.close() def busca_pendencias_by_id(self): conn = conexao() cur = conn.cursor() cur.execute(f"SELECT pend_id, pend_venda_id, clie_nome, ROUND(pend_valor::numeric, 2) FROM pendencias " f"INNER JOIN cliente ON clie_id = pend_clie_id" f"AND pend_id = {self.id}") conn.commit() cur.close() conn.close()
import logging from .common import extract_typed_props, PropertyMeta from ..builders.external_doc import ExternalDocBuilder from ..specification import Tag logger = logging.getLogger(__name__) class TagBuilder: _external_doc_builder: ExternalDocBuilder def __init__(self, external_doc_builder: ExternalDocBuilder) -> None: self._external_doc_builder = external_doc_builder def build_list(self, data_list: list) -> list[Tag]: return [self._build_tag(item) for item in data_list] def _build_tag(self, data: dict) -> Tag: logger.debug(f"Tag building [{data["name"]}]") attrs_map = { "name": PropertyMeta(name="name", cast=str), "description": PropertyMeta(name="description", cast=str), "external_docs": PropertyMeta(name="externalDocs", cast=self._external_doc_builder.build), } attrs = extract_typed_props(data, attrs_map) return Tag(**attrs)
import logging from .common import extract_typed_props, PropertyMeta from ..builders.external_doc import ExternalDocBuilder from ..specification import Tag logger = logging.getLogger(__name__) class TagBuilder: _external_doc_builder: ExternalDocBuilder def __init__(self, external_doc_builder: ExternalDocBuilder) -> None: self._external_doc_builder = external_doc_builder def build_list(self, data_list: list) -> list[Tag]: return [self._build_tag(item) for item in data_list] def _build_tag(self, data: dict) -> Tag: logger.debug(f"Tag building [{data['name']}]") attrs_map = { "name": PropertyMeta(name="name", cast=str), "description": PropertyMeta(name="description", cast=str), "external_docs": PropertyMeta(name="externalDocs", cast=self._external_doc_builder.build), } attrs = extract_typed_props(data, attrs_map) return Tag(**attrs)
######## #GRADED: 24/25 # Homework 3 # # MAKE SURE YOU ARE RUNNING THIS WITH PYTHON 3! # Python 2 will give a "Non-ASCII character" error # # Either use workon/mkvirtualenv to create an # environment or use the python3 command # ######## ######## # # Here is a programmer! # ######## programmer = { 'name': 'Christine', 'fish': 100, 'languages': ['C++', 'Ruby', 'Java', 'Python' ] } # 1. What kind of data structure (a.k.a. type) is programmer? Print it. print(type(programmer)) # 2. What keys does programmer have? Print it. print(programmer.keys()) # 3. Print the programmer's name. print(programmer['name']) # 4. If the programmer has more than 30 fish, print "The programmer owns a lot of fish." #If the programmer has 0 fish, say "the programmer has no fish." #If the programmer has between 1 and 30 fish, print "the programmer has a few fish." fish = programmer['fish'] if fish >= 1 and fish <= 30: print("The programmer has a few fish") if fish > 30: print("The programmer owns a lot of fish") elif fish == 0: print("The programmer has no fish") # 5. Print the sentence, "{programmer's name} knows {number of languages} languages") print(programmer['name'], "knows", len(programmer['languages']), "languages" ) # 6. Use a loop to print each language the programmer knows languages = programmer['languages'] for language in languages: print(language) ######## # # Here is a bunch of workers! # ######## company = { 'name': 'ACME Product Production Program', 'coders': [ { 'name': 'Lady Macbeth', 'languages': ['C++', 'Ruby', 'Java', 'Python' ] }, { 'name': 'Lothario', 'languages': ['C++'] }, { 'name': 'Ophelia', 'languages': [ 'Ruby', 'Erlang', 'Python' ] }, { 'name': 'Mercutio', 'languages': ['ASM', 'Python' ] } ], 'managers': [ { 'name': 'Alpha' }, { 'name': 'Beta' }, { 'name': 'Gamma' }, { 'name': 'Delta' } ] } # 7. What type is the company variable? What are its keys? print(type(company)) # 8. What data structure (a.k.a. type) is the 'coders' part of company? print(company.keys()) #TA-STEPHAN: This just gives you the keys of company, not the data structure of #coders. Try this: #print(type(company['coders'])) # 9. How many coders does the company have? print(len(company['coders'])) # 10. Print the name of each manager. managers = company['managers'] for manager in managers: print(manager['name']) # 11. Print the number of languages each coder knows. coders = company['coders'] for coder in coders: print(coder['name'], "knows", (len(coder['languages'])), "languages") ######## # # Search results from Spotify for an artist named "Kendrick" # https://api.spotify.com/v1/search?query=kendrick&limit=20&type=artist # ######## artist_search_result = {'artists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=kendrick&offset=20&limit=20&type=artist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=kendrick&offset=0&limit=20&type=artist', 'previous': None, 'items': [{'images': [{'height': 1000, 'url': 'https://i.scdn.co/image/b1947120c60a8f2886c98faf52a61895821c7cf0', 'width': 1000}, {'height': 640, 'url': 'https://i.scdn.co/image/c50721f32900d561d44f38006208ab69717fe1f9', 'width': 640}, {'height': 200, 'url': 'https://i.scdn.co/image/762628b9c2bf991e6f9325522dab32c0cf7c06a2', 'width': 200}, {'height': 64, 'url': 'https://i.scdn.co/image/876101e8b1a981d5d6f9257f0f6ddd15087bdfd5', 'width': 64}], 'genres': ['alternative hip hop'], 'href': 'https://api.spotify.com/v1/artists/2YZyLoL8N0Wb9xBt1NhZWg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2YZyLoL8N0Wb9xBt1NhZWg'}, 'popularity': 84, 'type': 'artist', 'followers': {'href': None, 'total': 2454724}, 'name': 'Kendrick Lamar', 'uri': 'spotify:artist:2YZyLoL8N0Wb9xBt1NhZWg', 'id': '2YZyLoL8N0Wb9xBt1NhZWg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/b6e825eb7039bb792a65b484b3d56064fb629ec8', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/1229558513a6881b2635c4b2954f8bd709415ae5', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/1301ae674a679c1865b2ffc0702be296d86224fc', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6xfqnpe2HnLVUaYXs2F8YS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6xfqnpe2HnLVUaYXs2F8YS'}, 'popularity': 57, 'type': 'artist', 'followers': {'href': None, 'total': 84080}, 'name': 'Anna Kendrick', 'uri': 'spotify:artist:6xfqnpe2HnLVUaYXs2F8YS', 'id': '6xfqnpe2HnLVUaYXs2F8YS'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1iApxRdcW8Uok4htrDrvdY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1iApxRdcW8Uok4htrDrvdY'}, 'popularity': 45, 'type': 'artist', 'followers': {'href': None, 'total': 1764}, 'name': 'Tech N9ne feat. Kendrick Lamar, ¡Mayday!, Kendall Morgan', 'uri': 'spotify:artist:1iApxRdcW8Uok4htrDrvdY', 'id': '1iApxRdcW8Uok4htrDrvdY'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/3130aee8b99f3fd47e32c704f146eeafc2ad01fc', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/4547ade74391dcd3b3ca38afe820e5f44a5bddc7', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/5ae90745618ea45fe0e0e832feebecaab3dc2d14', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1cjrBtunBfOLbXQ0OK1yEY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1cjrBtunBfOLbXQ0OK1yEY'}, 'popularity': 41, 'type': 'artist', 'followers': {'href': None, 'total': 7}, 'name': 'Edgar Kendricks', 'uri': 'spotify:artist:1cjrBtunBfOLbXQ0OK1yEY', 'id': '1cjrBtunBfOLbXQ0OK1yEY'}, {'images': [{'height': 1280, 'url': 'https://i.scdn.co/image/664f1a004773bd74a4ff5104818e4f383ef95a5e', 'width': 676}, {'height': 1212, 'url': 'https://i.scdn.co/image/d5eff2f40af987b8794a43b6df78a47f41e4dc8f', 'width': 640}, {'height': 379, 'url': 'https://i.scdn.co/image/04eacbd2e9a333aff1deb625512fef76cd60c754', 'width': 200}, {'height': 121, 'url': 'https://i.scdn.co/image/745fd75c3bb492e40f93835e233f3e80d4ab513a', 'width': 64}], 'genres': ['motown'], 'href': 'https://api.spotify.com/v1/artists/2Uuon75BhnuuxdKLYn4wHn', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2Uuon75BhnuuxdKLYn4wHn'}, 'popularity': 39, 'type': 'artist', 'followers': {'href': None, 'total': 5310}, 'name': 'Eddie Kendricks', 'uri': 'spotify:artist:2Uuon75BhnuuxdKLYn4wHn', 'id': '2Uuon75BhnuuxdKLYn4wHn'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/14bfe97f0b355da905a49255991be8d72c96d49c', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/6c8c92a391746de3ac3f630180c74c7e363d0c97', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d17dc6566044566cee5ad0b529df6320a0dcb065', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cKOuZYoNGwJ91GSVhUV9g', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cKOuZYoNGwJ91GSVhUV9g'}, 'popularity': 29, 'type': 'artist', 'followers': {'href': None, 'total': 81}, 'name': 'Kendrick', 'uri': 'spotify:artist:2cKOuZYoNGwJ91GSVhUV9g', 'id': '2cKOuZYoNGwJ91GSVhUV9g'}, {'images': [{'height': 635, 'url': 'https://i.scdn.co/image/70292a01a38948fa70e00b175e8d60ee33a40bc3', 'width': 950}, {'height': 428, 'url': 'https://i.scdn.co/image/b9537b37f129c1be5e8f3ba4efe3cac5b25f7636', 'width': 640}, {'height': 134, 'url': 'https://i.scdn.co/image/e73e70c68dc9d40799fa1a7865c7c2b56a56ae32', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/3dccb1ab6811e5d59dd71ef664621e7f0aacd0b2', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3xidVCWg60r8Wdm6g9VCux', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3xidVCWg60r8Wdm6g9VCux'}, 'popularity': 36, 'type': 'artist', 'followers': {'href': None, 'total': 1031}, 'name': 'Kendrick Scott', 'uri': 'spotify:artist:3xidVCWg60r8Wdm6g9VCux', 'id': '3xidVCWg60r8Wdm6g9VCux'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7Bin9s9lePTNx57vB5rHW8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7Bin9s9lePTNx57vB5rHW8'}, 'popularity': 24, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Kendrick Small', 'uri': 'spotify:artist:7Bin9s9lePTNx57vB5rHW8', 'id': '7Bin9s9lePTNx57vB5rHW8'}, {'images': [{'height': 290, 'url': 'https://i.scdn.co/image/af863b35263ff14eb78218f371bef8a0e76f1de5', 'width': 1000}, {'height': 186, 'url': 'https://i.scdn.co/image/7f86627d478319b749db28e5029e8ef08f330759', 'width': 640}, {'height': 58, 'url': 'https://i.scdn.co/image/7f751ccee89a1f1b84d1a0cd1d37437c41bde338', 'width': 200}, {'height': 19, 'url': 'https://i.scdn.co/image/510ff1b14b3ae7688012ed56a4d201e2a6333e8e', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1srLlKy0yVmQorLl9PhXbS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1srLlKy0yVmQorLl9PhXbS'}, 'popularity': 30, 'type': 'artist', 'followers': {'href': None, 'total': 3222}, 'name': 'Graham Kendrick', 'uri': 'spotify:artist:1srLlKy0yVmQorLl9PhXbS', 'id': '1srLlKy0yVmQorLl9PhXbS'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/801f01bc6446f7b97d656ee3a86702c642633c4f', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/ec3937428234fca86329588823b68b0e81aa2251', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/567b432948b028ed45d637d972c0058f2bf1bb91', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/5DOCFpRL15EQCkZDU3RcP8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/5DOCFpRL15EQCkZDU3RcP8'}, 'popularity': 23, 'type': 'artist', 'followers': {'href': None, 'total': 69}, 'name': 'Temps & Eddie Kendricks', 'uri': 'spotify:artist:5DOCFpRL15EQCkZDU3RcP8', 'id': '5DOCFpRL15EQCkZDU3RcP8'}, {'images': [{'height': 667, 'url': 'https://i.scdn.co/image/555a8e287d0b50921f43773779ccc99f4eb14bd8', 'width': 1000}, {'height': 427, 'url': 'https://i.scdn.co/image/3cf168aefd1633f40f7021e44d2106d4a3c34f8c', 'width': 640}, {'height': 133, 'url': 'https://i.scdn.co/image/9d51baf1e5d9bd05e7e20117f6f0bbac4ede8ad2', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/030909e0a8dc938af89c7a030e0b204fbd46f11d', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/0IyuDlCVbMa3TAoVaDKEeL', 'external_urls': {'spotify': 'https://open.spotify.com/artist/0IyuDlCVbMa3TAoVaDKEeL'}, 'popularity': 19, 'type': 'artist', 'followers': {'href': None, 'total': 1471}, 'name': 'Kendrick Scott Oracle', 'uri': 'spotify:artist:0IyuDlCVbMa3TAoVaDKEeL', 'id': '0IyuDlCVbMa3TAoVaDKEeL'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6FnUiliI9F1f2V9THnXxpu', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6FnUiliI9F1f2V9THnXxpu'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 93}, 'name': 'Solange feat. Kendrick Lamar', 'uri': 'spotify:artist:6FnUiliI9F1f2V9THnXxpu', 'id': '6FnUiliI9F1f2V9THnXxpu'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/0724aac29dcf4876b54f01a2813365b92343ed5a', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/c6e8964165d08bc9cb2fc05d68439930db61c890', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d26bc9756a0c7679f095c9e0e8e13dc9b39febde', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7by6up72jjsUGwmmMitGr1', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7by6up72jjsUGwmmMitGr1'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 32}, 'name': 'The Kendricks', 'uri': 'spotify:artist:7by6up72jjsUGwmmMitGr1', 'id': '7by6up72jjsUGwmmMitGr1'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1WscexgNxCyVt7Bx5pmsUg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1WscexgNxCyVt7Bx5pmsUg'}, 'popularity': 11, 'type': 'artist', 'followers': {'href': None, 'total': 39}, 'name': 'Richard Kendrick', 'uri': 'spotify:artist:1WscexgNxCyVt7Bx5pmsUg', 'id': '1WscexgNxCyVt7Bx5pmsUg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/12a0f2aa81ccde7f63fb02417f44c8de99df1087', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/5a75162ef05c05e950b42d863ca7a811386a97b0', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/64f475b3531667c58f1ef02f1774ec7697b1ac81', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/4UnJ85AumWoUuGnOpLEnl7', 'external_urls': {'spotify': 'https://open.spotify.com/artist/4UnJ85AumWoUuGnOpLEnl7'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 58}, 'name': 'Darnell Kendricks', 'uri': 'spotify:artist:4UnJ85AumWoUuGnOpLEnl7', 'id': '4UnJ85AumWoUuGnOpLEnl7'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7kj1cdDalpyc3rURdJx8b9', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7kj1cdDalpyc3rURdJx8b9'}, 'popularity': 7, 'type': 'artist', 'followers': {'href': None, 'total': 54}, 'name': 'Eddie Kendrick', 'uri': 'spotify:artist:7kj1cdDalpyc3rURdJx8b9', 'id': '7kj1cdDalpyc3rURdJx8b9'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cUIfFEsQgmPlYt75T2Lvy', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cUIfFEsQgmPlYt75T2Lvy'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 14}, 'name': 'Alex Kendrick', 'uri': 'spotify:artist:2cUIfFEsQgmPlYt75T2Lvy', 'id': '2cUIfFEsQgmPlYt75T2Lvy'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3M8jTGG0AItu4XYotTlC6M', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3M8jTGG0AItu4XYotTlC6M'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 451}, 'name': 'Kendrick Lamar & Jay Rock', 'uri': 'spotify:artist:3M8jTGG0AItu4XYotTlC6M', 'id': '3M8jTGG0AItu4XYotTlC6M'}, {'images': [{'height': 600, 'url': 'https://i.scdn.co/image/b388ac8f9a6fef30800af948440f81020bec6ea6', 'width': 411}, {'height': 292, 'url': 'https://i.scdn.co/image/9ede141bfd29ffac1984c61331191e39b7d92e12', 'width': 200}, {'height': 93, 'url': 'https://i.scdn.co/image/eff9add3f6d333717bd051d75721e518596c2719', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/46uaPpO9BqSSlLvSXFwyJs', 'external_urls': {'spotify': 'https://open.spotify.com/artist/46uaPpO9BqSSlLvSXFwyJs'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 66}, 'name': 'Charlotte Kendrick', 'uri': 'spotify:artist:46uaPpO9BqSSlLvSXFwyJs', 'id': '46uaPpO9BqSSlLvSXFwyJs'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/63rMSwRscrbMp9u0MhfDzK', 'external_urls': {'spotify': 'https://open.spotify.com/artist/63rMSwRscrbMp9u0MhfDzK'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Will Kendrick', 'uri': 'spotify:artist:63rMSwRscrbMp9u0MhfDzK', 'id': '63rMSwRscrbMp9u0MhfDzK'}], 'total': 160}} # 12. What is the data type of the search result? Print it. print(type(artist_search_result)) # 13. What are all of the keys that the search result has? print(artist_search_result.keys()) # 14. Take a look at 'artists' - what keys does it have? print(artist_search_result['artists'].keys()) # 15. Using len() with something-or-other would show me how many results I CURRENTLY have, #but I want to know the TOTAL number of results Spotify has for my search result. #From looking at the names of the keys under 'artists', how many total results are there? artists = artist_search_result['artists'] print(artists['total']) # 16. How popular is Kendrick Lamar vs. Anna Kendrick? Use a for loop to list the names and #popularity of every artist. #####JUST CHECKING#### print(artists['items']) --> found name and popularity here artists_info = (artists['items']) for artist in artists_info: print(artist['name'],"has a popularity of", artist['popularity'] ) Kendrick_Lamar = (artists_info[0]['name']) Kendrick_Lamar_popularity = (artists_info[0]['popularity']) Anna_Kendrick = (artists_info[1]['name']) Anna_Kendrick_popularity= (artists_info[1]['popularity']) print(Kendrick_Lamar, "has a popularity of", Kendrick_Lamar_popularity, "and", Anna_Kendrick, "has a popularity of", Anna_Kendrick_popularity) ######## # # Search results from Spotify for a playlist including the term "90s" # https://api.spotify.com/v1/search?query=90s&limit=20&type=playlist # ######## playlist_search_result = {'playlists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=90s&offset=20&limit=20&type=playlist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=90s&offset=0&limit=20&type=playlist', 'previous': None, 'items': [{'public': None, 'snapshot_id': 'X2zFSzFviruyjSFdaByIuiT9se/LKmkQFWbqY+NzH+TQ1Sj4rH0Q/0WxQlKrNvKw', 'id': '5TcHWbnN6SIhvPY1MXMDrb', 'tracks': {'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb/tracks', 'total': 937}, 'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk/playlist/5TcHWbnN6SIhvPY1MXMDrb'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk'}, 'type': 'user', 'uri': 'spotify:user:sam85uk', 'href': 'https://api.spotify.com/v1/users/sam85uk', 'id': 'sam85uk'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 60}], 'uri': 'spotify:user:sam85uk:playlist:5TcHWbnN6SIhvPY1MXMDrb', 'name': '90s', 'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb'}, {'public': None, 'snapshot_id': 'vs/L+N+xSmk4giRCCNAsNK4hljW++tS/RHSq0HJbYqWCXo65jGmGpT9ssHu5GgEh', 'id': '6OugflVBHYjm6HIjCObsz4', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4/tracks', 'total': 924}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882/playlist/6OugflVBHYjm6HIjCObsz4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882'}, 'type': 'user', 'uri': 'spotify:user:1220462882', 'href': 'https://api.spotify.com/v1/users/1220462882', 'id': '1220462882'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 60}], 'uri': 'spotify:user:1220462882:playlist:6OugflVBHYjm6HIjCObsz4', 'name': '90s country', 'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4'}, {'public': None, 'snapshot_id': 'ZMcQcP9SEc7jKFwoEO97LcQzLn0iBMcC2NjnFKEcbpKLXkbc7f6n1yjdXitIg32A', 'id': '5e1bpazQUEHijFhcJobkAp', 'tracks': {'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp/tracks', 'total': 164}, 'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy/playlist/5e1bpazQUEHijFhcJobkAp'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy'}, 'type': 'user', 'uri': 'spotify:user:luccyyy', 'href': 'https://api.spotify.com/v1/users/luccyyy', 'id': 'luccyyy'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 60}], 'uri': 'spotify:user:luccyyy:playlist:5e1bpazQUEHijFhcJobkAp', 'name': ''90s Rock', 'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp'}, {'public': None, 'snapshot_id': '5JWrVBSsfjaBGPt2OGlZ8ZJgkI9xB39TC5ISrrK/yYuFvgzOIji3eeHWfCenGpbI', 'id': '5v6cPhjJxSgNn6Aluh0lqV', 'tracks': {'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV/tracks', 'total': 137}, 'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd/playlist/5v6cPhjJxSgNn6Aluh0lqV'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd'}, 'type': 'user', 'uri': 'spotify:user:crockstarltd', 'href': 'https://api.spotify.com/v1/users/crockstarltd', 'id': 'crockstarltd'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 60}], 'uri': 'spotify:user:crockstarltd:playlist:5v6cPhjJxSgNn6Aluh0lqV', 'name': '90s alternative', 'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV'}, {'public': None, 'snapshot_id': 'NNzGf/01IMRYYmlNZhEehdkG8FONeb5xdyiUoVUVUsk5hovm++HAO4+UYckAlz7F', 'id': '3wneNouatgyKcqjPJtBP2L', 'tracks': {'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L/tracks', 'total': 815}, 'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309/playlist/3wneNouatgyKcqjPJtBP2L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309'}, 'type': 'user', 'uri': 'spotify:user:macaulaymeudt92309', 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309', 'id': 'macaulaymeudt92309'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 60}], 'uri': 'spotify:user:macaulaymeudt92309:playlist:3wneNouatgyKcqjPJtBP2L', 'name': ''90s Hits ', 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L'}, {'public': None, 'snapshot_id': 'iACKRBrA4eZtFBpk0oCrYEJvBNaSOr2utdXOi2mwuGXMmMv/utD0w/hyLhEkHxDI', 'id': '29jIRPHN6igNaKMwsMQudX', 'tracks': {'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX/tracks', 'total': 185}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12445745/playlist/29jIRPHN6igNaKMwsMQudX'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12445745'}, 'type': 'user', 'uri': 'spotify:user:12445745', 'href': 'https://api.spotify.com/v1/users/12445745', 'id': '12445745'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 60}], 'uri': 'spotify:user:12445745:playlist:29jIRPHN6igNaKMwsMQudX', 'name': '90s RnB', 'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX'}, {'public': None, 'snapshot_id': 'MiF9QytElWJwgY1nXZlaw8HJMF/mPV+UYdk35yEs93gqHPpFW8YAxZs90asJ3yBd', 'id': '3QmFUqc1Io68CVD2NL2iLM', 'tracks': {'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM/tracks', 'total': 281}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113/playlist/3QmFUqc1Io68CVD2NL2iLM'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113'}, 'type': 'user', 'uri': 'spotify:user:1297361113', 'href': 'https://api.spotify.com/v1/users/1297361113', 'id': '1297361113'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 60}], 'uri': 'spotify:user:1297361113:playlist:3QmFUqc1Io68CVD2NL2iLM', 'name': 'ROCK 90S INGLES', 'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM'}, {'public': None, 'snapshot_id': 'n6rtZGQ+Y7BG/Y+w7awcPmbn2Tus4ahRSOzDF67JzqtTqYjSMV8x58WQUUySFRls', 'id': '5772HGqmp2E99GQo5tfmcJ', 'tracks': {'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ/tracks', 'total': 211}, 'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79/playlist/5772HGqmp2E99GQo5tfmcJ'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79'}, 'type': 'user', 'uri': 'spotify:user:19jconnell79', 'href': 'https://api.spotify.com/v1/users/19jconnell79', 'id': '19jconnell79'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 60}], 'uri': 'spotify:user:19jconnell79:playlist:5772HGqmp2E99GQo5tfmcJ', 'name': '90s rap/hip hop', 'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ'}, {'public': None, 'snapshot_id': 'SXYTRsNxGol+X5UGoW5s8NxYHyPD9aRb4l+WyIIFDXvBqF0BHVnoghcUzvM4qELA', 'id': '6PtCrShmNnLSP1Vr4PAyVm', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm/tracks', 'total': 50}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia/playlist/6PtCrShmNnLSP1Vr4PAyVm'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia'}, 'type': 'user', 'uri': 'spotify:user:spotifyaustralia', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia', 'id': 'spotifyaustralia'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/2bb39ac6d272a86f9fb377d26249c942b6138a97', 'width': None}], 'uri': 'spotify:user:spotifyaustralia:playlist:6PtCrShmNnLSP1Vr4PAyVm', 'name': '#ThrowbackThursday', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm'}, {'public': None, 'snapshot_id': 'uUQscWhcfXEt0hICBieJ3bq5hST6l4P+cAEFMDNlZPhRmU7CxbXNg75BMxph1fxD', 'id': '5TvJd5fV2RMgv87mdd516L', 'tracks': {'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L/tracks', 'total': 310}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone/playlist/5TvJd5fV2RMgv87mdd516L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone'}, 'type': 'user', 'uri': 'spotify:user:jordancstone', 'href': 'https://api.spotify.com/v1/users/jordancstone', 'id': 'jordancstone'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 60}], 'uri': 'spotify:user:jordancstone:playlist:5TvJd5fV2RMgv87mdd516L', 'name': '90s ROCK (300 tracks)', 'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L'}, {'public': None, 'snapshot_id': 'CUhnTrVMGslFOFW2i9ncLzbo39Qu/vNIAuRdXEea1NP3SPMlu9PHd2PnrYX3BYHR', 'id': '35iftafjBbC2wWKIgelOf6', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_/playlist/35iftafjBbC2wWKIgelOf6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_'}, 'type': 'user', 'uri': 'spotify:user:spotify_uk_', 'href': 'https://api.spotify.com/v1/users/spotify_uk_', 'id': 'spotify_uk_'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/f6148a7d6511c45c3099dfbaa9a0c7af4c1de77b', 'width': None}], 'uri': 'spotify:user:spotify_uk_:playlist:35iftafjBbC2wWKIgelOf6', 'name': '90s R&B Anthems', 'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6'}, {'public': None, 'snapshot_id': 'EIuQGXw7ea1XtV0NeUT/RhLnudL0cW2ORvZl9fBh+pJHIgR1gsm8eH0N4Oy/QeLP', 'id': '5yGuoOwRQF3o8NVRRlvCj7', 'tracks': {'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7/tracks', 'total': 98}, 'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1/playlist/5yGuoOwRQF3o8NVRRlvCj7'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1'}, 'type': 'user', 'uri': 'spotify:user:truckasaurus1', 'href': 'https://api.spotify.com/v1/users/truckasaurus1', 'id': 'truckasaurus1'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 60}], 'uri': 'spotify:user:truckasaurus1:playlist:5yGuoOwRQF3o8NVRRlvCj7', 'name': '90s Hiphop / Gangsta Rap', 'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7'}, {'public': None, 'snapshot_id': 'lSl+DalM6hI5UJfkejtCwkcTyKjlQbDOhTrvI9niH7m+zX+hR5jswQegrji0+DjU', 'id': '6jpj4xWiZe6891Ba3TADOO', 'tracks': {'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO/tracks', 'total': 219}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jec904/playlist/6jpj4xWiZe6891Ba3TADOO'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jec904'}, 'type': 'user', 'uri': 'spotify:user:jec904', 'href': 'https://api.spotify.com/v1/users/jec904', 'id': 'jec904'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 60}], 'uri': 'spotify:user:jec904:playlist:6jpj4xWiZe6891Ba3TADOO', 'name': '90s Country', 'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO'}, {'public': None, 'snapshot_id': 'NAzpFjabibSDSRIlNQM4LtPGVLyP14W+PtFemfOZ3WIDa1t+tkBeZIE3ubESfD8z', 'id': '0QEvLnkCV2hQx8AVs9VSCq', 'tracks': {'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328/playlist/0QEvLnkCV2hQx8AVs9VSCq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328'}, 'type': 'user', 'uri': 'spotify:user:12145536328', 'href': 'https://api.spotify.com/v1/users/12145536328', 'id': '12145536328'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 60}], 'uri': 'spotify:user:12145536328:playlist:0QEvLnkCV2hQx8AVs9VSCq', 'name': 'Boybands 90s', 'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq'}, {'public': None, 'snapshot_id': 'xFuRVW3gHlQ2duHjA8Y+n+5dEsl5aewbp0tveVWMXXpWImllg5+tXPpuoNSh+c2Q', 'id': '3XctgNePp5NFKiaIYB8z2F', 'tracks': {'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F/tracks', 'total': 119}, 'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell/playlist/3XctgNePp5NFKiaIYB8z2F'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell'}, 'type': 'user', 'uri': 'spotify:user:gunnerwaddell', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell', 'id': 'gunnerwaddell'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 60}], 'uri': 'spotify:user:gunnerwaddell:playlist:3XctgNePp5NFKiaIYB8z2F', 'name': '90s early 2000 country', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F'}, {'public': None, 'snapshot_id': 'aSYk2sFcfUQitS99gpCYw+eohvnlA+o8kNcfMHDb7eB4+bvyV1hLAW7M+lMNSaYv', 'id': '7zK2WjuX5otv9au92VXsKc', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc/tracks', 'total': 662}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378/playlist/7zK2WjuX5otv9au92VXsKc'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378'}, 'type': 'user', 'uri': 'spotify:user:1220108378', 'href': 'https://api.spotify.com/v1/users/1220108378', 'id': '1220108378'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 60}], 'uri': 'spotify:user:1220108378:playlist:7zK2WjuX5otv9au92VXsKc', 'name': 'Ultimate 90s Playlist', 'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc'}, {'public': None, 'snapshot_id': 'bSeyYo2va/u8/fZWTX9PJ7pfdEzUhv605BWH56JACU1w3PNSztzJFfQji4fJ7pVR', 'id': '6k8WK2AGWnwD37I9kh2zvq', 'tracks': {'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq/tracks', 'total': 209}, 'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos/playlist/6k8WK2AGWnwD37I9kh2zvq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos'}, 'type': 'user', 'uri': 'spotify:user:stephaniegeorgopulos', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos', 'id': 'stephaniegeorgopulos'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 60}], 'uri': 'spotify:user:stephaniegeorgopulos:playlist:6k8WK2AGWnwD37I9kh2zvq', 'name': '90s R&B', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq'}, {'public': None, 'snapshot_id': '+vMVeKIqmjtjzAoZGwbi0v8AZLbPFrqUgmAqBYblyJQyi8p3l43TNdqqPSwNKPPd', 'id': '5DygagPVN6KmHre6MJNAJ4', 'tracks': {'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4/tracks', 'total': 822}, 'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85/playlist/5DygagPVN6KmHre6MJNAJ4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85'}, 'type': 'user', 'uri': 'spotify:user:bigju85', 'href': 'https://api.spotify.com/v1/users/bigju85', 'id': 'bigju85'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 60}], 'uri': 'spotify:user:bigju85:playlist:5DygagPVN6KmHre6MJNAJ4', 'name': '90s Rap', 'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4'}, {'public': None, 'snapshot_id': 'b5YyIRMEU/0WE+OYt2CGO7uoRjSRccLWKzcJzc04gE35qSICKr+KDxO1mZE+v+/H', 'id': '6ZblzG0k7kzs0Ex7EdFWY6', 'tracks': {'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6/tracks', 'total': 136}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737/playlist/6ZblzG0k7kzs0Ex7EdFWY6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737'}, 'type': 'user', 'uri': 'spotify:user:1232709737', 'href': 'https://api.spotify.com/v1/users/1232709737', 'id': '1232709737'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 60}], 'uri': 'spotify:user:1232709737:playlist:6ZblzG0k7kzs0Ex7EdFWY6', 'name': '90s PARTY', 'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6'}, {'public': None, 'snapshot_id': 'sqjT9Ws60MZTay5r3UzHelKm9qD/mgeZ9kYFyFw9Og4nX4mVvgz3WhA+Red3MfTF', 'id': '5t30PswiZDYfZAIrTwGr2V', 'tracks': {'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V/tracks', 'total': 88}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853/playlist/5t30PswiZDYfZAIrTwGr2V'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853'}, 'type': 'user', 'uri': 'spotify:user:12143691853', 'href': 'https://api.spotify.com/v1/users/12143691853', 'id': '12143691853'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 60}], 'uri': 'spotify:user:12143691853:playlist:5t30PswiZDYfZAIrTwGr2V', 'name': 'lovesongs 80s to 90s', 'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V'}], 'total': 2538}} # 17. What is the data type of the search result? Print it. print(type(playlist_search_result)) # 18. What are all of the keys that the search result has? print(playlist_search_result.keys()) # 19. Take a look at 'playlists' - what keys does it have? print(playlist_search_result['playlists'].keys()) # 20. Save the list of playlists into a variable called 'playlists' playlist_info = playlist_search_result['playlists'] playlists = playlist_info['items'] # 21. Print the title of every playlist for every_item in playlists: print(every_item['name']) # 22. Loop through every playlist, printing its keys for every_item in playlists: print(every_item.keys()) # 23. What is the data type of a playlist's 'tracks'? #print(type(every_item['tracks'])) print(type(playlists[0]['tracks'])) # 24. Print the name and number of tracks for every playlist print(playlists[0]['tracks'].keys()) #for every_playlist in playlists: #print(every_item['name']) print(playlists[0]['tracks']['total']) for every_playlist in playlists: print(every_playlist['tracks']['total']) print(every_playlist['name']) # 25. We like curation! Loop through the playlists again, but only display those with fewer than 200 tracks. for every_playlist in playlists: if every_playlist['tracks']['total']< 200: print(every_playlist['name'])
######## #GRADED: 24/25 # Homework 3 # # MAKE SURE YOU ARE RUNNING THIS WITH PYTHON 3! # Python 2 will give a "Non-ASCII character" error # # Either use workon/mkvirtualenv to create an # environment or use the python3 command # ######## ######## # # Here is a programmer! # ######## programmer = { 'name': 'Christine', 'fish': 100, 'languages': ['C++', 'Ruby', 'Java', 'Python' ] } # 1. What kind of data structure (a.k.a. type) is programmer? Print it. print(type(programmer)) # 2. What keys does programmer have? Print it. print(programmer.keys()) # 3. Print the programmer's name. print(programmer['name']) # 4. If the programmer has more than 30 fish, print "The programmer owns a lot of fish." #If the programmer has 0 fish, say "the programmer has no fish." #If the programmer has between 1 and 30 fish, print "the programmer has a few fish." fish = programmer['fish'] if fish >= 1 and fish <= 30: print("The programmer has a few fish") if fish > 30: print("The programmer owns a lot of fish") elif fish == 0: print("The programmer has no fish") # 5. Print the sentence, "{programmer's name} knows {number of languages} languages") print(programmer['name'], "knows", len(programmer['languages']), "languages" ) # 6. Use a loop to print each language the programmer knows languages = programmer['languages'] for language in languages: print(language) ######## # # Here is a bunch of workers! # ######## company = { 'name': 'ACME Product Production Program', 'coders': [ { 'name': 'Lady Macbeth', 'languages': ['C++', 'Ruby', 'Java', 'Python' ] }, { 'name': 'Lothario', 'languages': ['C++'] }, { 'name': 'Ophelia', 'languages': [ 'Ruby', 'Erlang', 'Python' ] }, { 'name': 'Mercutio', 'languages': ['ASM', 'Python' ] } ], 'managers': [ { 'name': 'Alpha' }, { 'name': 'Beta' }, { 'name': 'Gamma' }, { 'name': 'Delta' } ] } # 7. What type is the company variable? What are its keys? print(type(company)) # 8. What data structure (a.k.a. type) is the 'coders' part of company? print(company.keys()) #TA-STEPHAN: This just gives you the keys of company, not the data structure of #coders. Try this: #print(type(company['coders'])) # 9. How many coders does the company have? print(len(company['coders'])) # 10. Print the name of each manager. managers = company['managers'] for manager in managers: print(manager['name']) # 11. Print the number of languages each coder knows. coders = company['coders'] for coder in coders: print(coder['name'], "knows", (len(coder['languages'])), "languages") ######## # # Search results from Spotify for an artist named "Kendrick" # https://api.spotify.com/v1/search?query=kendrick&limit=20&type=artist # ######## artist_search_result = {'artists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=kendrick&offset=20&limit=20&type=artist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=kendrick&offset=0&limit=20&type=artist', 'previous': None, 'items': [{'images': [{'height': 1000, 'url': 'https://i.scdn.co/image/b1947120c60a8f2886c98faf52a61895821c7cf0', 'width': 1000}, {'height': 640, 'url': 'https://i.scdn.co/image/c50721f32900d561d44f38006208ab69717fe1f9', 'width': 640}, {'height': 200, 'url': 'https://i.scdn.co/image/762628b9c2bf991e6f9325522dab32c0cf7c06a2', 'width': 200}, {'height': 64, 'url': 'https://i.scdn.co/image/876101e8b1a981d5d6f9257f0f6ddd15087bdfd5', 'width': 64}], 'genres': ['alternative hip hop'], 'href': 'https://api.spotify.com/v1/artists/2YZyLoL8N0Wb9xBt1NhZWg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2YZyLoL8N0Wb9xBt1NhZWg'}, 'popularity': 84, 'type': 'artist', 'followers': {'href': None, 'total': 2454724}, 'name': 'Kendrick Lamar', 'uri': 'spotify:artist:2YZyLoL8N0Wb9xBt1NhZWg', 'id': '2YZyLoL8N0Wb9xBt1NhZWg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/b6e825eb7039bb792a65b484b3d56064fb629ec8', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/1229558513a6881b2635c4b2954f8bd709415ae5', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/1301ae674a679c1865b2ffc0702be296d86224fc', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6xfqnpe2HnLVUaYXs2F8YS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6xfqnpe2HnLVUaYXs2F8YS'}, 'popularity': 57, 'type': 'artist', 'followers': {'href': None, 'total': 84080}, 'name': 'Anna Kendrick', 'uri': 'spotify:artist:6xfqnpe2HnLVUaYXs2F8YS', 'id': '6xfqnpe2HnLVUaYXs2F8YS'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1iApxRdcW8Uok4htrDrvdY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1iApxRdcW8Uok4htrDrvdY'}, 'popularity': 45, 'type': 'artist', 'followers': {'href': None, 'total': 1764}, 'name': 'Tech N9ne feat. Kendrick Lamar, ¡Mayday!, Kendall Morgan', 'uri': 'spotify:artist:1iApxRdcW8Uok4htrDrvdY', 'id': '1iApxRdcW8Uok4htrDrvdY'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/3130aee8b99f3fd47e32c704f146eeafc2ad01fc', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/4547ade74391dcd3b3ca38afe820e5f44a5bddc7', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/5ae90745618ea45fe0e0e832feebecaab3dc2d14', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1cjrBtunBfOLbXQ0OK1yEY', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1cjrBtunBfOLbXQ0OK1yEY'}, 'popularity': 41, 'type': 'artist', 'followers': {'href': None, 'total': 7}, 'name': 'Edgar Kendricks', 'uri': 'spotify:artist:1cjrBtunBfOLbXQ0OK1yEY', 'id': '1cjrBtunBfOLbXQ0OK1yEY'}, {'images': [{'height': 1280, 'url': 'https://i.scdn.co/image/664f1a004773bd74a4ff5104818e4f383ef95a5e', 'width': 676}, {'height': 1212, 'url': 'https://i.scdn.co/image/d5eff2f40af987b8794a43b6df78a47f41e4dc8f', 'width': 640}, {'height': 379, 'url': 'https://i.scdn.co/image/04eacbd2e9a333aff1deb625512fef76cd60c754', 'width': 200}, {'height': 121, 'url': 'https://i.scdn.co/image/745fd75c3bb492e40f93835e233f3e80d4ab513a', 'width': 64}], 'genres': ['motown'], 'href': 'https://api.spotify.com/v1/artists/2Uuon75BhnuuxdKLYn4wHn', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2Uuon75BhnuuxdKLYn4wHn'}, 'popularity': 39, 'type': 'artist', 'followers': {'href': None, 'total': 5310}, 'name': 'Eddie Kendricks', 'uri': 'spotify:artist:2Uuon75BhnuuxdKLYn4wHn', 'id': '2Uuon75BhnuuxdKLYn4wHn'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/14bfe97f0b355da905a49255991be8d72c96d49c', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/6c8c92a391746de3ac3f630180c74c7e363d0c97', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d17dc6566044566cee5ad0b529df6320a0dcb065', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cKOuZYoNGwJ91GSVhUV9g', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cKOuZYoNGwJ91GSVhUV9g'}, 'popularity': 29, 'type': 'artist', 'followers': {'href': None, 'total': 81}, 'name': 'Kendrick', 'uri': 'spotify:artist:2cKOuZYoNGwJ91GSVhUV9g', 'id': '2cKOuZYoNGwJ91GSVhUV9g'}, {'images': [{'height': 635, 'url': 'https://i.scdn.co/image/70292a01a38948fa70e00b175e8d60ee33a40bc3', 'width': 950}, {'height': 428, 'url': 'https://i.scdn.co/image/b9537b37f129c1be5e8f3ba4efe3cac5b25f7636', 'width': 640}, {'height': 134, 'url': 'https://i.scdn.co/image/e73e70c68dc9d40799fa1a7865c7c2b56a56ae32', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/3dccb1ab6811e5d59dd71ef664621e7f0aacd0b2', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3xidVCWg60r8Wdm6g9VCux', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3xidVCWg60r8Wdm6g9VCux'}, 'popularity': 36, 'type': 'artist', 'followers': {'href': None, 'total': 1031}, 'name': 'Kendrick Scott', 'uri': 'spotify:artist:3xidVCWg60r8Wdm6g9VCux', 'id': '3xidVCWg60r8Wdm6g9VCux'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7Bin9s9lePTNx57vB5rHW8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7Bin9s9lePTNx57vB5rHW8'}, 'popularity': 24, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Kendrick Small', 'uri': 'spotify:artist:7Bin9s9lePTNx57vB5rHW8', 'id': '7Bin9s9lePTNx57vB5rHW8'}, {'images': [{'height': 290, 'url': 'https://i.scdn.co/image/af863b35263ff14eb78218f371bef8a0e76f1de5', 'width': 1000}, {'height': 186, 'url': 'https://i.scdn.co/image/7f86627d478319b749db28e5029e8ef08f330759', 'width': 640}, {'height': 58, 'url': 'https://i.scdn.co/image/7f751ccee89a1f1b84d1a0cd1d37437c41bde338', 'width': 200}, {'height': 19, 'url': 'https://i.scdn.co/image/510ff1b14b3ae7688012ed56a4d201e2a6333e8e', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1srLlKy0yVmQorLl9PhXbS', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1srLlKy0yVmQorLl9PhXbS'}, 'popularity': 30, 'type': 'artist', 'followers': {'href': None, 'total': 3222}, 'name': 'Graham Kendrick', 'uri': 'spotify:artist:1srLlKy0yVmQorLl9PhXbS', 'id': '1srLlKy0yVmQorLl9PhXbS'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/801f01bc6446f7b97d656ee3a86702c642633c4f', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/ec3937428234fca86329588823b68b0e81aa2251', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/567b432948b028ed45d637d972c0058f2bf1bb91', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/5DOCFpRL15EQCkZDU3RcP8', 'external_urls': {'spotify': 'https://open.spotify.com/artist/5DOCFpRL15EQCkZDU3RcP8'}, 'popularity': 23, 'type': 'artist', 'followers': {'href': None, 'total': 69}, 'name': 'Temps & Eddie Kendricks', 'uri': 'spotify:artist:5DOCFpRL15EQCkZDU3RcP8', 'id': '5DOCFpRL15EQCkZDU3RcP8'}, {'images': [{'height': 667, 'url': 'https://i.scdn.co/image/555a8e287d0b50921f43773779ccc99f4eb14bd8', 'width': 1000}, {'height': 427, 'url': 'https://i.scdn.co/image/3cf168aefd1633f40f7021e44d2106d4a3c34f8c', 'width': 640}, {'height': 133, 'url': 'https://i.scdn.co/image/9d51baf1e5d9bd05e7e20117f6f0bbac4ede8ad2', 'width': 200}, {'height': 43, 'url': 'https://i.scdn.co/image/030909e0a8dc938af89c7a030e0b204fbd46f11d', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/0IyuDlCVbMa3TAoVaDKEeL', 'external_urls': {'spotify': 'https://open.spotify.com/artist/0IyuDlCVbMa3TAoVaDKEeL'}, 'popularity': 19, 'type': 'artist', 'followers': {'href': None, 'total': 1471}, 'name': 'Kendrick Scott Oracle', 'uri': 'spotify:artist:0IyuDlCVbMa3TAoVaDKEeL', 'id': '0IyuDlCVbMa3TAoVaDKEeL'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/6FnUiliI9F1f2V9THnXxpu', 'external_urls': {'spotify': 'https://open.spotify.com/artist/6FnUiliI9F1f2V9THnXxpu'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 93}, 'name': 'Solange feat. Kendrick Lamar', 'uri': 'spotify:artist:6FnUiliI9F1f2V9THnXxpu', 'id': '6FnUiliI9F1f2V9THnXxpu'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/0724aac29dcf4876b54f01a2813365b92343ed5a', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/c6e8964165d08bc9cb2fc05d68439930db61c890', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/d26bc9756a0c7679f095c9e0e8e13dc9b39febde', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7by6up72jjsUGwmmMitGr1', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7by6up72jjsUGwmmMitGr1'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 32}, 'name': 'The Kendricks', 'uri': 'spotify:artist:7by6up72jjsUGwmmMitGr1', 'id': '7by6up72jjsUGwmmMitGr1'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/1WscexgNxCyVt7Bx5pmsUg', 'external_urls': {'spotify': 'https://open.spotify.com/artist/1WscexgNxCyVt7Bx5pmsUg'}, 'popularity': 11, 'type': 'artist', 'followers': {'href': None, 'total': 39}, 'name': 'Richard Kendrick', 'uri': 'spotify:artist:1WscexgNxCyVt7Bx5pmsUg', 'id': '1WscexgNxCyVt7Bx5pmsUg'}, {'images': [{'height': 640, 'url': 'https://i.scdn.co/image/12a0f2aa81ccde7f63fb02417f44c8de99df1087', 'width': 640}, {'height': 300, 'url': 'https://i.scdn.co/image/5a75162ef05c05e950b42d863ca7a811386a97b0', 'width': 300}, {'height': 64, 'url': 'https://i.scdn.co/image/64f475b3531667c58f1ef02f1774ec7697b1ac81', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/4UnJ85AumWoUuGnOpLEnl7', 'external_urls': {'spotify': 'https://open.spotify.com/artist/4UnJ85AumWoUuGnOpLEnl7'}, 'popularity': 10, 'type': 'artist', 'followers': {'href': None, 'total': 58}, 'name': 'Darnell Kendricks', 'uri': 'spotify:artist:4UnJ85AumWoUuGnOpLEnl7', 'id': '4UnJ85AumWoUuGnOpLEnl7'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/7kj1cdDalpyc3rURdJx8b9', 'external_urls': {'spotify': 'https://open.spotify.com/artist/7kj1cdDalpyc3rURdJx8b9'}, 'popularity': 7, 'type': 'artist', 'followers': {'href': None, 'total': 54}, 'name': 'Eddie Kendrick', 'uri': 'spotify:artist:7kj1cdDalpyc3rURdJx8b9', 'id': '7kj1cdDalpyc3rURdJx8b9'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/2cUIfFEsQgmPlYt75T2Lvy', 'external_urls': {'spotify': 'https://open.spotify.com/artist/2cUIfFEsQgmPlYt75T2Lvy'}, 'popularity': 9, 'type': 'artist', 'followers': {'href': None, 'total': 14}, 'name': 'Alex Kendrick', 'uri': 'spotify:artist:2cUIfFEsQgmPlYt75T2Lvy', 'id': '2cUIfFEsQgmPlYt75T2Lvy'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/3M8jTGG0AItu4XYotTlC6M', 'external_urls': {'spotify': 'https://open.spotify.com/artist/3M8jTGG0AItu4XYotTlC6M'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 451}, 'name': 'Kendrick Lamar & Jay Rock', 'uri': 'spotify:artist:3M8jTGG0AItu4XYotTlC6M', 'id': '3M8jTGG0AItu4XYotTlC6M'}, {'images': [{'height': 600, 'url': 'https://i.scdn.co/image/b388ac8f9a6fef30800af948440f81020bec6ea6', 'width': 411}, {'height': 292, 'url': 'https://i.scdn.co/image/9ede141bfd29ffac1984c61331191e39b7d92e12', 'width': 200}, {'height': 93, 'url': 'https://i.scdn.co/image/eff9add3f6d333717bd051d75721e518596c2719', 'width': 64}], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/46uaPpO9BqSSlLvSXFwyJs', 'external_urls': {'spotify': 'https://open.spotify.com/artist/46uaPpO9BqSSlLvSXFwyJs'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 66}, 'name': 'Charlotte Kendrick', 'uri': 'spotify:artist:46uaPpO9BqSSlLvSXFwyJs', 'id': '46uaPpO9BqSSlLvSXFwyJs'}, {'images': [], 'genres': [], 'href': 'https://api.spotify.com/v1/artists/63rMSwRscrbMp9u0MhfDzK', 'external_urls': {'spotify': 'https://open.spotify.com/artist/63rMSwRscrbMp9u0MhfDzK'}, 'popularity': 5, 'type': 'artist', 'followers': {'href': None, 'total': 1}, 'name': 'Will Kendrick', 'uri': 'spotify:artist:63rMSwRscrbMp9u0MhfDzK', 'id': '63rMSwRscrbMp9u0MhfDzK'}], 'total': 160}} # 12. What is the data type of the search result? Print it. print(type(artist_search_result)) # 13. What are all of the keys that the search result has? print(artist_search_result.keys()) # 14. Take a look at 'artists' - what keys does it have? print(artist_search_result['artists'].keys()) # 15. Using len() with something-or-other would show me how many results I CURRENTLY have, #but I want to know the TOTAL number of results Spotify has for my search result. #From looking at the names of the keys under 'artists', how many total results are there? artists = artist_search_result['artists'] print(artists['total']) # 16. How popular is Kendrick Lamar vs. Anna Kendrick? Use a for loop to list the names and #popularity of every artist. #####JUST CHECKING#### print(artists['items']) --> found name and popularity here artists_info = (artists['items']) for artist in artists_info: print(artist['name'],"has a popularity of", artist['popularity'] ) Kendrick_Lamar = (artists_info[0]['name']) Kendrick_Lamar_popularity = (artists_info[0]['popularity']) Anna_Kendrick = (artists_info[1]['name']) Anna_Kendrick_popularity= (artists_info[1]['popularity']) print(Kendrick_Lamar, "has a popularity of", Kendrick_Lamar_popularity, "and", Anna_Kendrick, "has a popularity of", Anna_Kendrick_popularity) ######## # # Search results from Spotify for a playlist including the term "90s" # https://api.spotify.com/v1/search?query=90s&limit=20&type=playlist # ######## playlist_search_result = {'playlists': {'offset': 0, 'next': 'https://api.spotify.com/v1/search?query=90s&offset=20&limit=20&type=playlist', 'limit': 20, 'href': 'https://api.spotify.com/v1/search?query=90s&offset=0&limit=20&type=playlist', 'previous': None, 'items': [{'public': None, 'snapshot_id': 'X2zFSzFviruyjSFdaByIuiT9se/LKmkQFWbqY+NzH+TQ1Sj4rH0Q/0WxQlKrNvKw', 'id': '5TcHWbnN6SIhvPY1MXMDrb', 'tracks': {'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb/tracks', 'total': 937}, 'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk/playlist/5TcHWbnN6SIhvPY1MXMDrb'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/sam85uk'}, 'type': 'user', 'uri': 'spotify:user:sam85uk', 'href': 'https://api.spotify.com/v1/users/sam85uk', 'id': 'sam85uk'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/08d18e9d8c7d49cbb1eb609998fff55741cf54f61ebd9af955a76278cbfc1959ed78ab4a604123186abcdf6587a26b505a0816d7b3ff9fba4be070344bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e', 'width': 60}], 'uri': 'spotify:user:sam85uk:playlist:5TcHWbnN6SIhvPY1MXMDrb', 'name': '90s', 'href': 'https://api.spotify.com/v1/users/sam85uk/playlists/5TcHWbnN6SIhvPY1MXMDrb'}, {'public': None, 'snapshot_id': 'vs/L+N+xSmk4giRCCNAsNK4hljW++tS/RHSq0HJbYqWCXo65jGmGpT9ssHu5GgEh', 'id': '6OugflVBHYjm6HIjCObsz4', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4/tracks', 'total': 924}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882/playlist/6OugflVBHYjm6HIjCObsz4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220462882'}, 'type': 'user', 'uri': 'spotify:user:1220462882', 'href': 'https://api.spotify.com/v1/users/1220462882', 'id': '1220462882'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/81987cf55db3b188d996f89b41a724b9d82311c37fb148a7b28456487198e98997eb7a7462ca8c8c214a89248160db7677ea145da83b29bf01c189fb85c86129145f848525b858bc3557161db5654ded', 'width': 60}], 'uri': 'spotify:user:1220462882:playlist:6OugflVBHYjm6HIjCObsz4', 'name': '90s country', 'href': 'https://api.spotify.com/v1/users/1220462882/playlists/6OugflVBHYjm6HIjCObsz4'}, {'public': None, 'snapshot_id': 'ZMcQcP9SEc7jKFwoEO97LcQzLn0iBMcC2NjnFKEcbpKLXkbc7f6n1yjdXitIg32A', 'id': '5e1bpazQUEHijFhcJobkAp', 'tracks': {'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp/tracks', 'total': 164}, 'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy/playlist/5e1bpazQUEHijFhcJobkAp'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/luccyyy'}, 'type': 'user', 'uri': 'spotify:user:luccyyy', 'href': 'https://api.spotify.com/v1/users/luccyyy', 'id': 'luccyyy'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/7cc30bbf3935c30f795c99b19c1f7ad8f0427cff5ed7a60280fe4adabb6404581d36c8eac93ea0f90d69e33e5e05c63fe4a9d3146771ec7e6f052dceec4f7a63269a7997b385763abc036f483ac46212', 'width': 60}], 'uri': 'spotify:user:luccyyy:playlist:5e1bpazQUEHijFhcJobkAp', 'name': "'90s Rock", 'href': 'https://api.spotify.com/v1/users/luccyyy/playlists/5e1bpazQUEHijFhcJobkAp'}, {'public': None, 'snapshot_id': '5JWrVBSsfjaBGPt2OGlZ8ZJgkI9xB39TC5ISrrK/yYuFvgzOIji3eeHWfCenGpbI', 'id': '5v6cPhjJxSgNn6Aluh0lqV', 'tracks': {'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV/tracks', 'total': 137}, 'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd/playlist/5v6cPhjJxSgNn6Aluh0lqV'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/crockstarltd'}, 'type': 'user', 'uri': 'spotify:user:crockstarltd', 'href': 'https://api.spotify.com/v1/users/crockstarltd', 'id': 'crockstarltd'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/d8f6b63b7df6e7cccf92044d706a377e9803d9d73d35d425a1f8b25cada6937470e7e258d7835dacae29a6ee0fc15e797f94e7f30588cdcfaf7febb0e15a74a0c972959dbb9d597acf98868cb95b0664', 'width': 60}], 'uri': 'spotify:user:crockstarltd:playlist:5v6cPhjJxSgNn6Aluh0lqV', 'name': '90s alternative', 'href': 'https://api.spotify.com/v1/users/crockstarltd/playlists/5v6cPhjJxSgNn6Aluh0lqV'}, {'public': None, 'snapshot_id': 'NNzGf/01IMRYYmlNZhEehdkG8FONeb5xdyiUoVUVUsk5hovm++HAO4+UYckAlz7F', 'id': '3wneNouatgyKcqjPJtBP2L', 'tracks': {'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L/tracks', 'total': 815}, 'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309/playlist/3wneNouatgyKcqjPJtBP2L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/macaulaymeudt92309'}, 'type': 'user', 'uri': 'spotify:user:macaulaymeudt92309', 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309', 'id': 'macaulaymeudt92309'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4a589f44ef1c9988ef3441b8b40fa1ea66b7b7300acfcd6f1b85b23a79fc4d678a9368f27c110d0238a9cb53d4ebe717af8921e8d86cbfacdb88b9ea0db70a83d7a6f026c20efd426fa3dea6f369d385', 'width': 60}], 'uri': 'spotify:user:macaulaymeudt92309:playlist:3wneNouatgyKcqjPJtBP2L', 'name': "'90s Hits ", 'href': 'https://api.spotify.com/v1/users/macaulaymeudt92309/playlists/3wneNouatgyKcqjPJtBP2L'}, {'public': None, 'snapshot_id': 'iACKRBrA4eZtFBpk0oCrYEJvBNaSOr2utdXOi2mwuGXMmMv/utD0w/hyLhEkHxDI', 'id': '29jIRPHN6igNaKMwsMQudX', 'tracks': {'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX/tracks', 'total': 185}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12445745/playlist/29jIRPHN6igNaKMwsMQudX'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12445745'}, 'type': 'user', 'uri': 'spotify:user:12445745', 'href': 'https://api.spotify.com/v1/users/12445745', 'id': '12445745'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/0cd7ff9cc859ff202ed95892ed85747591aea1e54a33a0da119619c814576f290b8a0a56a4683df0976c2e58bf940880b7a083e3cd343068975e63b47433313c5921731df38907118b14b7101c15dec7', 'width': 60}], 'uri': 'spotify:user:12445745:playlist:29jIRPHN6igNaKMwsMQudX', 'name': '90s RnB', 'href': 'https://api.spotify.com/v1/users/12445745/playlists/29jIRPHN6igNaKMwsMQudX'}, {'public': None, 'snapshot_id': 'MiF9QytElWJwgY1nXZlaw8HJMF/mPV+UYdk35yEs93gqHPpFW8YAxZs90asJ3yBd', 'id': '3QmFUqc1Io68CVD2NL2iLM', 'tracks': {'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM/tracks', 'total': 281}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113/playlist/3QmFUqc1Io68CVD2NL2iLM'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1297361113'}, 'type': 'user', 'uri': 'spotify:user:1297361113', 'href': 'https://api.spotify.com/v1/users/1297361113', 'id': '1297361113'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/28961f2d68c3477b4c86d2fee6e00178f32851eb676a05f833234b5273fd35b908a4cc57105fd744d6b045d91a974b3f77b5724a93c0f5c5188e616bdcab2d136873cfbdb5feac7539897d6774bf83d3', 'width': 60}], 'uri': 'spotify:user:1297361113:playlist:3QmFUqc1Io68CVD2NL2iLM', 'name': 'ROCK 90S INGLES', 'href': 'https://api.spotify.com/v1/users/1297361113/playlists/3QmFUqc1Io68CVD2NL2iLM'}, {'public': None, 'snapshot_id': 'n6rtZGQ+Y7BG/Y+w7awcPmbn2Tus4ahRSOzDF67JzqtTqYjSMV8x58WQUUySFRls', 'id': '5772HGqmp2E99GQo5tfmcJ', 'tracks': {'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ/tracks', 'total': 211}, 'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79/playlist/5772HGqmp2E99GQo5tfmcJ'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/19jconnell79'}, 'type': 'user', 'uri': 'spotify:user:19jconnell79', 'href': 'https://api.spotify.com/v1/users/19jconnell79', 'id': '19jconnell79'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/21ffd457485a3a428e38d3b00b399cbd917a0535f36a53eb6401ff8f827518170143df42f4e7e82282cbb8cfabbd840774f1d865415d293227982b2730b5feda36f583b4772709babe690eee17fc99ec', 'width': 60}], 'uri': 'spotify:user:19jconnell79:playlist:5772HGqmp2E99GQo5tfmcJ', 'name': '90s rap/hip hop', 'href': 'https://api.spotify.com/v1/users/19jconnell79/playlists/5772HGqmp2E99GQo5tfmcJ'}, {'public': None, 'snapshot_id': 'SXYTRsNxGol+X5UGoW5s8NxYHyPD9aRb4l+WyIIFDXvBqF0BHVnoghcUzvM4qELA', 'id': '6PtCrShmNnLSP1Vr4PAyVm', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm/tracks', 'total': 50}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia/playlist/6PtCrShmNnLSP1Vr4PAyVm'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotifyaustralia'}, 'type': 'user', 'uri': 'spotify:user:spotifyaustralia', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia', 'id': 'spotifyaustralia'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/2bb39ac6d272a86f9fb377d26249c942b6138a97', 'width': None}], 'uri': 'spotify:user:spotifyaustralia:playlist:6PtCrShmNnLSP1Vr4PAyVm', 'name': '#ThrowbackThursday', 'href': 'https://api.spotify.com/v1/users/spotifyaustralia/playlists/6PtCrShmNnLSP1Vr4PAyVm'}, {'public': None, 'snapshot_id': 'uUQscWhcfXEt0hICBieJ3bq5hST6l4P+cAEFMDNlZPhRmU7CxbXNg75BMxph1fxD', 'id': '5TvJd5fV2RMgv87mdd516L', 'tracks': {'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L/tracks', 'total': 310}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone/playlist/5TvJd5fV2RMgv87mdd516L'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jordancstone'}, 'type': 'user', 'uri': 'spotify:user:jordancstone', 'href': 'https://api.spotify.com/v1/users/jordancstone', 'id': 'jordancstone'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/46363292538df1e91a597dd30584a4ca41cb6bd9bf3b966b83f90362e502ca3fa0a1a7919759ac9650eef0041184673da90f5a2818f302e810cb1dac75638ccc8f57667af2d620defa755f6ffdd795fb', 'width': 60}], 'uri': 'spotify:user:jordancstone:playlist:5TvJd5fV2RMgv87mdd516L', 'name': '90s ROCK (300 tracks)', 'href': 'https://api.spotify.com/v1/users/jordancstone/playlists/5TvJd5fV2RMgv87mdd516L'}, {'public': None, 'snapshot_id': 'CUhnTrVMGslFOFW2i9ncLzbo39Qu/vNIAuRdXEea1NP3SPMlu9PHd2PnrYX3BYHR', 'id': '35iftafjBbC2wWKIgelOf6', 'tracks': {'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_/playlist/35iftafjBbC2wWKIgelOf6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/spotify_uk_'}, 'type': 'user', 'uri': 'spotify:user:spotify_uk_', 'href': 'https://api.spotify.com/v1/users/spotify_uk_', 'id': 'spotify_uk_'}, 'collaborative': False, 'images': [{'height': None, 'url': 'https://u.scdn.co/images/pl/default/f6148a7d6511c45c3099dfbaa9a0c7af4c1de77b', 'width': None}], 'uri': 'spotify:user:spotify_uk_:playlist:35iftafjBbC2wWKIgelOf6', 'name': '90s R&B Anthems', 'href': 'https://api.spotify.com/v1/users/spotify_uk_/playlists/35iftafjBbC2wWKIgelOf6'}, {'public': None, 'snapshot_id': 'EIuQGXw7ea1XtV0NeUT/RhLnudL0cW2ORvZl9fBh+pJHIgR1gsm8eH0N4Oy/QeLP', 'id': '5yGuoOwRQF3o8NVRRlvCj7', 'tracks': {'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7/tracks', 'total': 98}, 'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1/playlist/5yGuoOwRQF3o8NVRRlvCj7'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/truckasaurus1'}, 'type': 'user', 'uri': 'spotify:user:truckasaurus1', 'href': 'https://api.spotify.com/v1/users/truckasaurus1', 'id': 'truckasaurus1'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9c37cfe8125864774ccb36f076cba3fcfcaa2db30477a78fbbb6623f43dffcd56eb12d7a5562bec1d62bff10f0ef84fd3cf34a73172e52de6816871daab64767f8c06174e7842db40be0a9b8ab1f519e', 'width': 60}], 'uri': 'spotify:user:truckasaurus1:playlist:5yGuoOwRQF3o8NVRRlvCj7', 'name': '90s Hiphop / Gangsta Rap', 'href': 'https://api.spotify.com/v1/users/truckasaurus1/playlists/5yGuoOwRQF3o8NVRRlvCj7'}, {'public': None, 'snapshot_id': 'lSl+DalM6hI5UJfkejtCwkcTyKjlQbDOhTrvI9niH7m+zX+hR5jswQegrji0+DjU', 'id': '6jpj4xWiZe6891Ba3TADOO', 'tracks': {'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO/tracks', 'total': 219}, 'external_urls': {'spotify': 'http://open.spotify.com/user/jec904/playlist/6jpj4xWiZe6891Ba3TADOO'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/jec904'}, 'type': 'user', 'uri': 'spotify:user:jec904', 'href': 'https://api.spotify.com/v1/users/jec904', 'id': 'jec904'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/5fc4b1d3fa12a473b72599a2d950ad5696b206329589ba01e4797b8d98859e79e7f70147f83e86d93b3bba0931af7fdff4a5a197ae9ccd50cc2b4dfacc178aa97cc7e64e9672a8eef88b631484d02c84', 'width': 60}], 'uri': 'spotify:user:jec904:playlist:6jpj4xWiZe6891Ba3TADOO', 'name': '90s Country', 'href': 'https://api.spotify.com/v1/users/jec904/playlists/6jpj4xWiZe6891Ba3TADOO'}, {'public': None, 'snapshot_id': 'NAzpFjabibSDSRIlNQM4LtPGVLyP14W+PtFemfOZ3WIDa1t+tkBeZIE3ubESfD8z', 'id': '0QEvLnkCV2hQx8AVs9VSCq', 'tracks': {'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq/tracks', 'total': 70}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328/playlist/0QEvLnkCV2hQx8AVs9VSCq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12145536328'}, 'type': 'user', 'uri': 'spotify:user:12145536328', 'href': 'https://api.spotify.com/v1/users/12145536328', 'id': '12145536328'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/29f1d179c8ab0184f21bf015002de2214754e9d24cfe43a6693027f8b2188806e62c1831fd2cd68b4087e6a8c34321039140c1e13d69614bb1c8f20091142eab23c3273fa73e490f2d04464c69423f75', 'width': 60}], 'uri': 'spotify:user:12145536328:playlist:0QEvLnkCV2hQx8AVs9VSCq', 'name': 'Boybands 90s', 'href': 'https://api.spotify.com/v1/users/12145536328/playlists/0QEvLnkCV2hQx8AVs9VSCq'}, {'public': None, 'snapshot_id': 'xFuRVW3gHlQ2duHjA8Y+n+5dEsl5aewbp0tveVWMXXpWImllg5+tXPpuoNSh+c2Q', 'id': '3XctgNePp5NFKiaIYB8z2F', 'tracks': {'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F/tracks', 'total': 119}, 'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell/playlist/3XctgNePp5NFKiaIYB8z2F'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/gunnerwaddell'}, 'type': 'user', 'uri': 'spotify:user:gunnerwaddell', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell', 'id': 'gunnerwaddell'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/f0264d0abd5d237a49f797cfe1752a69d28a9d1e0b0e75514b3dca50bd0175290bdf10c77adaaeb620f6ec66d8b8bad42954ddaf3798af90b9b8b434a45505776adb49c1f554ea3d272513621bd6284f', 'width': 60}], 'uri': 'spotify:user:gunnerwaddell:playlist:3XctgNePp5NFKiaIYB8z2F', 'name': '90s early 2000 country', 'href': 'https://api.spotify.com/v1/users/gunnerwaddell/playlists/3XctgNePp5NFKiaIYB8z2F'}, {'public': None, 'snapshot_id': 'aSYk2sFcfUQitS99gpCYw+eohvnlA+o8kNcfMHDb7eB4+bvyV1hLAW7M+lMNSaYv', 'id': '7zK2WjuX5otv9au92VXsKc', 'tracks': {'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc/tracks', 'total': 662}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378/playlist/7zK2WjuX5otv9au92VXsKc'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1220108378'}, 'type': 'user', 'uri': 'spotify:user:1220108378', 'href': 'https://api.spotify.com/v1/users/1220108378', 'id': '1220108378'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/53e089d368f4ffe69ac66ef7acd03cf273db9f234bc738a988570b3ad00ff8f756632b8ad90fc58813f894c1300cbdda027ff948ee963640c5126d2968eeccd1b4822115c647069e0de419b86545fbb1', 'width': 60}], 'uri': 'spotify:user:1220108378:playlist:7zK2WjuX5otv9au92VXsKc', 'name': 'Ultimate 90s Playlist', 'href': 'https://api.spotify.com/v1/users/1220108378/playlists/7zK2WjuX5otv9au92VXsKc'}, {'public': None, 'snapshot_id': 'bSeyYo2va/u8/fZWTX9PJ7pfdEzUhv605BWH56JACU1w3PNSztzJFfQji4fJ7pVR', 'id': '6k8WK2AGWnwD37I9kh2zvq', 'tracks': {'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq/tracks', 'total': 209}, 'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos/playlist/6k8WK2AGWnwD37I9kh2zvq'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/stephaniegeorgopulos'}, 'type': 'user', 'uri': 'spotify:user:stephaniegeorgopulos', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos', 'id': 'stephaniegeorgopulos'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/9032d34a555405e96cf941b46f2f2a4b7aa75a33804bbef29d9f13121c0b58777c7efff77c2011229a63bb59f7044e1fa98e63093726844d26d20dd2fb49306a2f7b5d603361e1e45750711413d52484', 'width': 60}], 'uri': 'spotify:user:stephaniegeorgopulos:playlist:6k8WK2AGWnwD37I9kh2zvq', 'name': '90s R&B', 'href': 'https://api.spotify.com/v1/users/stephaniegeorgopulos/playlists/6k8WK2AGWnwD37I9kh2zvq'}, {'public': None, 'snapshot_id': '+vMVeKIqmjtjzAoZGwbi0v8AZLbPFrqUgmAqBYblyJQyi8p3l43TNdqqPSwNKPPd', 'id': '5DygagPVN6KmHre6MJNAJ4', 'tracks': {'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4/tracks', 'total': 822}, 'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85/playlist/5DygagPVN6KmHre6MJNAJ4'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/bigju85'}, 'type': 'user', 'uri': 'spotify:user:bigju85', 'href': 'https://api.spotify.com/v1/users/bigju85', 'id': 'bigju85'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/467e12151d2e11c6813fa7804ee8d96ebd1d7ac5708eab87ad4662c94a67d17547766902d49c2d927681dea378e04e46d2f22cc9b722700436f685eca15e922378d2dca9c7d683665e472643b860a41f', 'width': 60}], 'uri': 'spotify:user:bigju85:playlist:5DygagPVN6KmHre6MJNAJ4', 'name': '90s Rap', 'href': 'https://api.spotify.com/v1/users/bigju85/playlists/5DygagPVN6KmHre6MJNAJ4'}, {'public': None, 'snapshot_id': 'b5YyIRMEU/0WE+OYt2CGO7uoRjSRccLWKzcJzc04gE35qSICKr+KDxO1mZE+v+/H', 'id': '6ZblzG0k7kzs0Ex7EdFWY6', 'tracks': {'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6/tracks', 'total': 136}, 'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737/playlist/6ZblzG0k7kzs0Ex7EdFWY6'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/1232709737'}, 'type': 'user', 'uri': 'spotify:user:1232709737', 'href': 'https://api.spotify.com/v1/users/1232709737', 'id': '1232709737'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/4bc3fa03ea66c557fa889ee9684ddc8b2ed94a5e3a1ec3bdc44d89ed93bf99d12138139469731b143a5443882440686191df60791974c312a14eeeb1443f7f3365178fb5a4050a516ab3221388daaf64', 'width': 60}], 'uri': 'spotify:user:1232709737:playlist:6ZblzG0k7kzs0Ex7EdFWY6', 'name': '90s PARTY', 'href': 'https://api.spotify.com/v1/users/1232709737/playlists/6ZblzG0k7kzs0Ex7EdFWY6'}, {'public': None, 'snapshot_id': 'sqjT9Ws60MZTay5r3UzHelKm9qD/mgeZ9kYFyFw9Og4nX4mVvgz3WhA+Red3MfTF', 'id': '5t30PswiZDYfZAIrTwGr2V', 'tracks': {'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V/tracks', 'total': 88}, 'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853/playlist/5t30PswiZDYfZAIrTwGr2V'}, 'type': 'playlist', 'owner': {'external_urls': {'spotify': 'http://open.spotify.com/user/12143691853'}, 'type': 'user', 'uri': 'spotify:user:12143691853', 'href': 'https://api.spotify.com/v1/users/12143691853', 'id': '12143691853'}, 'collaborative': False, 'images': [{'height': 640, 'url': 'https://mosaic.scdn.co/640/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 640}, {'height': 300, 'url': 'https://mosaic.scdn.co/300/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 300}, {'height': 60, 'url': 'https://mosaic.scdn.co/60/eb646ba4b6c6db5fcc180d091512bdcf497ed3f063e47b9e1420e2effd4d4537c464c27e6e598f2ed1fec99e6c187e2f67bc28d5367d5db9664b1835f7c286b445154de89205dcfd05e3db6647555d87', 'width': 60}], 'uri': 'spotify:user:12143691853:playlist:5t30PswiZDYfZAIrTwGr2V', 'name': 'lovesongs 80s to 90s', 'href': 'https://api.spotify.com/v1/users/12143691853/playlists/5t30PswiZDYfZAIrTwGr2V'}], 'total': 2538}} # 17. What is the data type of the search result? Print it. print(type(playlist_search_result)) # 18. What are all of the keys that the search result has? print(playlist_search_result.keys()) # 19. Take a look at 'playlists' - what keys does it have? print(playlist_search_result['playlists'].keys()) # 20. Save the list of playlists into a variable called 'playlists' playlist_info = playlist_search_result['playlists'] playlists = playlist_info['items'] # 21. Print the title of every playlist for every_item in playlists: print(every_item['name']) # 22. Loop through every playlist, printing its keys for every_item in playlists: print(every_item.keys()) # 23. What is the data type of a playlist's 'tracks'? #print(type(every_item['tracks'])) print(type(playlists[0]['tracks'])) # 24. Print the name and number of tracks for every playlist print(playlists[0]['tracks'].keys()) #for every_playlist in playlists: #print(every_item['name']) print(playlists[0]['tracks']['total']) for every_playlist in playlists: print(every_playlist['tracks']['total']) print(every_playlist['name']) # 25. We like curation! Loop through the playlists again, but only display those with fewer than 200 tracks. for every_playlist in playlists: if every_playlist['tracks']['total']< 200: print(every_playlist['name'])
import numpy as np from sklearn.metrics import average_precision_score from sklearn.metrics import roc_auc_score def _fast_hist(label_true, label_pred, n_class): mask = (label_true >= 0) & (label_true < n_class) hist = np.bincount( n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2 ).reshape(n_class, n_class) return hist class Metrics: def __init__(self, metrics, len_dataset, n_classes): self.metrics = metrics self.len_dataset = len_dataset self.n_classes = n_classes self.accurate, self.errors, self.proba_pred = [], [], [] self.accuracy = 0 self.current_miou = 0 self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) def update(self, pred, target, confidence): self.accurate.extend(pred.eq(target.view_as(pred)).detach().to("cpu").numpy()) self.accuracy += pred.eq(target.view_as(pred)).sum().item() self.errors.extend((pred != target.view_as(pred)).detach().to("cpu").numpy()) self.proba_pred.extend(confidence.detach().to("cpu").numpy()) if "mean_iou" in self.metrics: pred = pred.cpu().numpy().flatten() target = target.cpu().numpy().flatten() mask = (target >= 0) & (target < self.n_classes) hist = np.bincount( self.n_classes * target[mask].astype(int) + pred[mask], minlength=self.n_classes ** 2, ).reshape(self.n_classes, self.n_classes) self.confusion_matrix += hist def get_scores(self, split="train"): self.accurate = np.reshape(self.accurate, newshape=(len(self.accurate), -1)).flatten() self.errors = np.reshape(self.errors, newshape=(len(self.errors), -1)).flatten() self.proba_pred = np.reshape(self.proba_pred, newshape=(len(self.proba_pred), -1)).flatten() scores = {} if "accuracy" in self.metrics: accuracy = self.accuracy / self.len_dataset scores[f"{split}/accuracy"] = {"value": accuracy, "string": f"{accuracy:05.2%}"} if "auc" in self.metrics: if len(np.unique(self.accurate)) == 1: auc = 1 else: auc = roc_auc_score(self.accurate, self.proba_pred) scores[f"{split}/auc"] = {"value": auc, "string": f"{auc:05.2%}"} if "ap_success" in self.metrics: ap_success = average_precision_score(self.accurate, self.proba_pred) scores[f"{split}/ap_success"] = {"value": ap_success, "string": f"{ap_success:05.2%}"} if "accuracy_success" in self.metrics: accuracy_success = np.round(self.proba_pred[self.accurate == 1]).mean() scores[f"{split}/accuracy_success"] = { "value": accuracy_success, "string": f"{accuracy_success:05.2%}", } if "ap_errors" in self.metrics: ap_errors = average_precision_score(self.errors, -self.proba_pred) scores[f"{split}/ap_errors"] = {"value": ap_errors, "string": f"{ap_errors:05.2%}"} if "accuracy_errors" in self.metrics: accuracy_errors = 1.0 - np.round(self.proba_pred[self.errors == 1]).mean() scores[f"{split}/accuracy_errors"] = { "value": accuracy_errors, "string": f"{accuracy_errors:05.2%}", } if "fpr_at_95tpr" in self.metrics: for i,delta in enumerate(np.arange( self.proba_pred.min(), self.proba_pred.max(), (self.proba_pred.max() - self.proba_pred.min()) / 10000, )): tpr = len(self.proba_pred[(self.accurate == 1) & (self.proba_pred >= delta)]) / len( self.proba_pred[(self.accurate == 1)] ) if i%100 == 0: print(f"Threshold:\t {delta:.6f}") print(f"TPR: \t\t {tpr:.4%}") print("------") if 0.9505 >= tpr >= 0.9495: print(f"Nearest threshold 95% TPR value: {tpr:.6f}") print(f"Threshold 95% TPR value: {delta:.6f}") fpr = len( self.proba_pred[(self.errors == 1) & (self.proba_pred >= delta)] ) / len(self.proba_pred[(self.errors == 1)]) scores[f"{split}/fpr_at_95tpr"] = {"value": fpr, "string": f"{fpr:05.2%}"} break if "mean_iou" in self.metrics: iou = np.diag(self.confusion_matrix) / ( self.confusion_matrix.sum(axis=1) + self.confusion_matrix.sum(axis=0) - np.diag(self.confusion_matrix) ) mean_iou = np.nanmean(iou) scores[f"{split}/mean_iou"] = {"value": mean_iou, "string": f"{mean_iou:05.2%}"} return scores
import numpy as np from sklearn.metrics import average_precision_score from sklearn.metrics import roc_auc_score def _fast_hist(label_true, label_pred, n_class): mask = (label_true >= 0) & (label_true < n_class) hist = np.bincount( n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2 ).reshape(n_class, n_class) return hist class Metrics: def __init__(self, metrics, len_dataset, n_classes): self.metrics = metrics self.len_dataset = len_dataset self.n_classes = n_classes self.accurate, self.errors, self.proba_pred = [], [], [] self.accuracy = 0 self.current_miou = 0 self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) def update(self, pred, target, confidence): self.accurate.extend(pred.eq(target.view_as(pred)).detach().to("cpu").numpy()) self.accuracy += pred.eq(target.view_as(pred)).sum().item() self.errors.extend((pred != target.view_as(pred)).detach().to("cpu").numpy()) self.proba_pred.extend(confidence.detach().to("cpu").numpy()) if "mean_iou" in self.metrics: pred = pred.cpu().numpy().flatten() target = target.cpu().numpy().flatten() mask = (target >= 0) & (target < self.n_classes) hist = np.bincount( self.n_classes * target[mask].astype(int) + pred[mask], minlength=self.n_classes ** 2, ).reshape(self.n_classes, self.n_classes) self.confusion_matrix += hist def get_scores(self, split="train"): self.accurate = np.reshape(self.accurate, newshape=(len(self.accurate), -1)).flatten() self.errors = np.reshape(self.errors, newshape=(len(self.errors), -1)).flatten() self.proba_pred = np.reshape(self.proba_pred, newshape=(len(self.proba_pred), -1)).flatten() scores = {} if "accuracy" in self.metrics: accuracy = self.accuracy / self.len_dataset scores[f"{split}/accuracy"] = {"value": accuracy, "string": f"{accuracy:05.2%}"} if "auc" in self.metrics: if len(np.unique(self.accurate)) == 1: auc = 1 else: auc = roc_auc_score(self.accurate, self.proba_pred) scores[f"{split}/auc"] = {"value": auc, "string": f"{auc:05.2%}"} if "ap_success" in self.metrics: ap_success = average_precision_score(self.accurate, self.proba_pred) scores[f"{split}/ap_success"] = {"value": ap_success, "string": f"{ap_success:05.2%}"} if "accuracy_success" in self.metrics: accuracy_success = np.round(self.proba_pred[self.accurate == 1]).mean() scores[f"{split}/accuracy_success"] = { "value": accuracy_success, "string": f"{accuracy_success:05.2%}", } if "ap_errors" in self.metrics: ap_errors = average_precision_score(self.errors, -self.proba_pred) scores[f"{split}/ap_errors"] = {"value": ap_errors, "string": f"{ap_errors:05.2%}"} if "accuracy_errors" in self.metrics: accuracy_errors = 1.0 - np.round(self.proba_pred[self.errors == 1]).mean() scores[f"{split}/accuracy_errors"] = { "value": accuracy_errors, "string": f"{accuracy_errors:05.2%}", } if "fpr_at_95tpr" in self.metrics: for i,delta in enumerate(np.arange( self.proba_pred.min(), self.proba_pred.max(), (self.proba_pred.max() - self.proba_pred.min()) / 10000, )): tpr = len(self.proba_pred[(self.accurate == 1) & (self.proba_pred >= delta)]) / len( self.proba_pred[(self.accurate == 1)] ) if i%100 == 0: print(f"Threshold:\t {delta:.6f}") print(f"TPR: \t\t {tpr:.4%}") print("------") if 0.9505 >= tpr >= 0.9495: print(f"Nearest threshold 95% TPR value: {tpr:.6f}") print(f"Threshold 95% TPR value: {delta:.6f}") fpr = len( self.proba_pred[(self.errors == 1) & (self.proba_pred >= delta)] ) / len(self.proba_pred[(self.errors == 1)]) scores[f"{split}/fpr_at_95tpr"] = {"value": fpr, "string": f"{fpr:05.2%}"} break if "mean_iou" in self.metrics: iou = np.diag(self.confusion_matrix) / ( self.confusion_matrix.sum(axis=1) + self.confusion_matrix.sum(axis=0) - np.diag(self.confusion_matrix) ) mean_iou = np.nanmean(iou) scores[f"{split}/mean_iou"] = {"value": mean_iou, "string": f"{mean_iou:05.2%}"} return scores
""" cartesian grid """ from __future__ import annotations import logging import typing as T import numpy as np from .. import read from ..coord import geog2geomag, geomag2geog from .uniform import altitude_grid, grid1d def cart3d(p: dict[str, T.Any]) -> dict[str, T.Any]: """make cartesian grid Parameters ----------- p: dict simulation parameters Returns ------- xg: dict simulation grid """ # %%create altitude grid # original Matlab params # p.alt_min = 80e3; # p.alt_max = 1000e3; # p.alt_scale = [10e3, 8e3, 500e3, 150e3]; if {"alt_min", "alt_max", "alt_scale", "Bincl"} <= p.keys(): # https://docs.python.org/3/library/stdtypes.html#frozenset.issubset z = altitude_grid(p["alt_min"], p["alt_max"], p["Bincl"], p["alt_scale"]) elif "eq_dir" in p and p["eq_dir"].is_file(): logging.info(f"reusing grid from {p["eq_dir"]}") xeq = read.grid(p["eq_dir"]) z = xeq["x1"] del xeq elif {"alt_min", "alt_max", "lzp"} <= p.keys(): logging.info("make uniform altitude grid") z = np.linspace(p["alt_min"], p["alt_max"], p["lzp"]) dz = z[1] - z[0] z = np.concatenate((z[0] - 2 * dz, z[0] - dz, z, z[-1] + dz, z[-1] + 2 * dz)) else: raise ValueError("must specify altitude grid parameters or grid file to reuse") # %% TRANSVERSE GRID (BASED ON SIZE OF CURRENT REGION SPECIFIED ABOVE) # EAST if "x2parms" in p: x = grid1d(p["xdist"], p["lxp"], p["x2parms"]) else: x = grid1d(p["xdist"], p["lxp"]) # NORTH if "x3parms" in p: y = grid1d(p["ydist"], p["lyp"], p["x3parms"]) else: y = grid1d(p["ydist"], p["lyp"]) # %% COMPUTE CELL WALL LOCATIONS lx2 = x.size xi = np.empty(lx2 + 1) xi[1:-1] = 1 / 2 * (x[1:] + x[:-1]) xi[0] = x[0] - 1 / 2 * (x[1] - x[0]) xi[-1] = x[-1] + 1 / 2 * (x[-1] - x[-2]) lx3 = y.size yi = np.empty(lx3 + 1) yi[1:-1] = 1 / 2 * (y[1:] + y[:-1]) yi[0] = y[0] - 1 / 2 * (y[1] - y[0]) yi[-1] = y[-1] + 1 / 2 * (y[-1] - y[-2]) lx1 = z.size zi = np.empty(lx1 + 1) zi[1:-1] = 1 / 2 * (z[1:] + z[:-1]) zi[0] = z[0] - 1 / 2 * (z[1] - z[0]) zi[-1] = z[-1] + 1 / 2 * (z[-1] - z[-2]) # %% GRAVITATIONAL FIELD COMPONENTS IN DIPOLE SYSTEM Re = 6370e3 G = 6.67428e-11 Me = 5.9722e24 r = z + Re g = G * Me / r ** 2 gz = np.broadcast_to(-g[:, None, None], (g.size, lx2, lx3)) assert gz.shape == (lx1, lx2, lx3) # DISTANCE EW AND NS (FROM ENU (or UEN in our case - cyclic permuted) COORD. SYSTEM) # #NEED TO BE CONVERTED TO DIPOLE SPHERICAL AND THEN # GLAT/GLONG - BASICALLY HERE WE ARE MAPPING THE CARTESIAN GRID ONTO THE # SURFACE OF A SPHERE THEN CONVERTING TO GEOGRAPHIC. # get the magnetic coordinates of the grid center, based on user input thetactr, phictr = geog2geomag(p["glat"], p["glon"]) # %% Center of earth distance r = Re + z r = np.broadcast_to(r[:, None, None], (r.size, lx2, lx3)) assert r.shape == (lx1, lx2, lx3) # %% Northward angular distance gamma2 = y / Re # must retain the sign of x3 theta = thetactr - gamma2 # minus because distance north is against theta's direction theta = np.broadcast_to(theta[None, None, :], (lx1, lx2, theta.size)) assert theta.shape == (lx1, lx2, lx3) # %% Eastward angular distance # gamma1=x/Re; %must retain the sign of x2 gamma1 = x / Re / np.sin(thetactr) # must retain the sign of x2, just use theta of center of grid phi = phictr + gamma1 phi = np.broadcast_to(phi[None, :, None], (lx1, phi.size, lx3)) assert phi.shape == (lx1, lx2, lx3) # %% COMPUTE THE GEOGRAPHIC COORDINATES OF EACH GRID POINT glatgrid, glongrid = geomag2geog(theta, phi) # %% COMPUTE ECEF CARTESIAN IN CASE THEY ARE NEEDED xECEF = r * np.sin(theta) * np.cos(phi) yECEF = r * np.sin(theta) * np.sin(phi) zECEF = r * np.cos(theta) # %% COMPUTE SPHERICAL ECEF UNIT VECTORS - CARTESIAN-ECEF COMPONENTS er = np.empty((lx1, lx2, lx3, 3)) etheta = np.empty_like(er) ephi = np.empty_like(er) er[:, :, :, 0] = np.sin(theta) * np.cos(phi) # xECEF-component of er er[:, :, :, 1] = np.sin(theta) * np.sin(phi) # yECEF er[:, :, :, 2] = np.cos(theta) # zECEF etheta[:, :, :, 0] = np.cos(theta) * np.cos(phi) etheta[:, :, :, 1] = np.cos(theta) * np.sin(phi) etheta[:, :, :, 2] = -np.sin(theta) ephi[:, :, :, 0] = -np.sin(phi) ephi[:, :, :, 1] = np.cos(phi) ephi[:, :, :, 2] = 0 # %% UEN UNIT VECTORS IN ECEF COMPONENTS e1 = er # up is the same direction as from ctr of earth e2 = ephi # e2 is same as ephi e3 = -etheta # etheta is positive south, e3 is pos. north # %% STORE RESULTS IN GRID DATA STRUCTURE xg = { "x1": z, "x2": x, "x3": y, "x1i": zi, "x2i": xi, "x3i": yi, } lx = (xg["x1"].size, xg["x2"].size, xg["x3"].size) xg["lx"] = np.array(lx) xg["dx1f"] = np.append(xg["x1"][1:] - xg["x1"][:-1], xg["x1"][-1] - xg["x1"][-2]) # FWD DIFF xg["dx1b"] = np.insert(xg["x1"][1:] - xg["x1"][:-1], 0, xg["x1"][1] - xg["x1"][0]) # BACK DIFF xg["dx1h"] = xg["x1i"][1:-1] - xg["x1i"][:-2] # MIDPOINT DIFFS xg["dx2f"] = np.append(xg["x2"][1:] - xg["x2"][:-1], xg["x2"][-1] - xg["x2"][-2]) # FWD DIFF xg["dx2b"] = np.insert(xg["x2"][1:] - xg["x2"][:-1], 0, xg["x2"][1] - xg["x2"][0]) # BACK DIFF xg["dx2h"] = xg["x2i"][1:-1] - xg["x2i"][:-2] # MIDPOINT DIFFS xg["dx3f"] = np.append(xg["x3"][1:] - xg["x3"][:-1], xg["x3"][-1] - xg["x3"][-2]) # FWD DIFF xg["dx3b"] = np.insert(xg["x3"][1:] - xg["x3"][:-1], 0, xg["x3"][1] - xg["x3"][0]) # BACK DIFF xg["dx3h"] = xg["x3i"][1:-1] - xg["x3i"][:-2] # MIDPOINT DIFFS xg["h1"] = np.ones(lx) xg["h2"] = np.ones(lx) xg["h3"] = np.ones(lx) xg["h1x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h2x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h3x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h1x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h2x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h3x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h1x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) xg["h2x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) xg["h3x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) # %% Cartesian, ECEF representation of curvilinar coordinates xg["e1"] = e1 xg["e2"] = e2 xg["e3"] = e3 # %% ECEF spherical coordinates xg["r"] = r xg["theta"] = theta xg["phi"] = phi # xg.rx1i=[]; xg.thetax1i=[]; # xg.rx2i=[]; xg.thetax2i=[]; # %% These are cartesian representations of the ECEF, spherical unit vectors xg["er"] = er xg["etheta"] = etheta xg["ephi"] = ephi xg["I"] = np.broadcast_to(p["Bincl"], (lx2, lx3)) # %% Cartesian ECEF coordinates xg["x"] = xECEF xg["z"] = zECEF xg["y"] = yECEF xg["alt"] = xg["r"] - Re # since we need a 3D array use xg.r here... xg["gx1"] = gz xg["gx2"] = np.zeros(lx) xg["gx3"] = np.zeros(lx) xg["Bmag"] = np.broadcast_to(-50000e-9, xg["lx"]) # minus for northern hemisphere... xg["glat"] = glatgrid xg["glon"] = glongrid # xg['xp']=x; xg['zp']=z; # xg['inull']=[]; xg["nullpts"] = np.zeros(lx) # %% TRIM DATA STRUCTURE TO BE THE SIZE FORTRAN EXPECTS # note: xgf is xg == True xgf = xg # indices corresponding to non-ghost cells for 1 dimension i1 = slice(2, lx[0] - 2) i2 = slice(2, lx[1] - 2) i3 = slice(2, lx[2] - 2) # any dx variable will not need to first element (backward diff of two ghost cells) idx1 = slice(1, lx[0]) idx2 = slice(1, lx[1]) idx3 = slice(1, lx[2]) # x1-interface variables need only non-ghost cell values (left interface) plus one ix1i = slice(2, lx[0] - 1) ix2i = slice(2, lx[1] - 1) ix3i = slice(2, lx[2] - 1) # remove ghost cells # now that indices have been define we can go ahead and make this change xgf["lx"] = xgf["lx"] - 4 xgf["dx1b"] = xgf["dx1b"][idx1] xgf["dx2b"] = xgf["dx2b"][idx2] xgf["dx3b"] = xgf["dx3b"][idx3] xgf["x1i"] = xgf["x1i"][ix1i] xgf["x2i"] = xgf["x2i"][ix2i] xgf["x3i"] = xgf["x3i"][ix3i] xgf["dx1h"] = xgf["dx1h"][i1] xgf["dx2h"] = xgf["dx2h"][i2] xgf["dx3h"] = xgf["dx3h"][i3] xgf["h1x1i"] = xgf["h1x1i"][ix1i, i2, i3] xgf["h2x1i"] = xgf["h2x1i"][ix1i, i2, i3] xgf["h3x1i"] = xgf["h3x1i"][ix1i, i2, i3] xgf["h1x2i"] = xgf["h1x2i"][i1, ix2i, i3] xgf["h2x2i"] = xgf["h2x2i"][i1, ix2i, i3] xgf["h3x2i"] = xgf["h3x2i"][i1, ix2i, i3] xgf["h1x3i"] = xgf["h1x3i"][i1, i2, ix3i] xgf["h2x3i"] = xgf["h2x3i"][i1, i2, ix3i] xgf["h3x3i"] = xgf["h3x3i"][i1, i2, ix3i] xgf["gx1"] = xgf["gx1"][i1, i2, i3] xgf["gx2"] = xgf["gx2"][i1, i2, i3] xgf["gx3"] = xgf["gx3"][i1, i2, i3] xgf["glat"] = xgf["glat"][i1, i2, i3] xgf["glon"] = xgf["glon"][i1, i2, i3] xgf["alt"] = xgf["alt"][i1, i2, i3] xgf["Bmag"] = xgf["Bmag"][i1, i2, i3] xgf["I"] = xgf["I"][i2, i3] xgf["nullpts"] = xgf["nullpts"][i1, i2, i3] xgf["e1"] = xgf["e1"][i1, i2, i3, :] xgf["e2"] = xgf["e2"][i1, i2, i3, :] xgf["e3"] = xgf["e3"][i1, i2, i3, :] xgf["er"] = xgf["er"][i1, i2, i3, :] xgf["etheta"] = xgf["etheta"][i1, i2, i3, :] xgf["ephi"] = xgf["ephi"][i1, i2, i3, :] xgf["r"] = xgf["r"][i1, i2, i3] xgf["theta"] = xgf["theta"][i1, i2, i3] xgf["phi"] = xgf["phi"][i1, i2, i3] xgf["x"] = xgf["x"][i1, i2, i3] xgf["y"] = xgf["y"][i1, i2, i3] xgf["z"] = xgf["z"][i1, i2, i3] xgf["glonctr"] = p["glon"] xgf["glatctr"] = p["glat"] return xgf
""" cartesian grid """ from __future__ import annotations import logging import typing as T import numpy as np from .. import read from ..coord import geog2geomag, geomag2geog from .uniform import altitude_grid, grid1d def cart3d(p: dict[str, T.Any]) -> dict[str, T.Any]: """make cartesian grid Parameters ----------- p: dict simulation parameters Returns ------- xg: dict simulation grid """ # %%create altitude grid # original Matlab params # p.alt_min = 80e3; # p.alt_max = 1000e3; # p.alt_scale = [10e3, 8e3, 500e3, 150e3]; if {"alt_min", "alt_max", "alt_scale", "Bincl"} <= p.keys(): # https://docs.python.org/3/library/stdtypes.html#frozenset.issubset z = altitude_grid(p["alt_min"], p["alt_max"], p["Bincl"], p["alt_scale"]) elif "eq_dir" in p and p["eq_dir"].is_file(): logging.info(f"reusing grid from {p['eq_dir']}") xeq = read.grid(p["eq_dir"]) z = xeq["x1"] del xeq elif {"alt_min", "alt_max", "lzp"} <= p.keys(): logging.info("make uniform altitude grid") z = np.linspace(p["alt_min"], p["alt_max"], p["lzp"]) dz = z[1] - z[0] z = np.concatenate((z[0] - 2 * dz, z[0] - dz, z, z[-1] + dz, z[-1] + 2 * dz)) else: raise ValueError("must specify altitude grid parameters or grid file to reuse") # %% TRANSVERSE GRID (BASED ON SIZE OF CURRENT REGION SPECIFIED ABOVE) # EAST if "x2parms" in p: x = grid1d(p["xdist"], p["lxp"], p["x2parms"]) else: x = grid1d(p["xdist"], p["lxp"]) # NORTH if "x3parms" in p: y = grid1d(p["ydist"], p["lyp"], p["x3parms"]) else: y = grid1d(p["ydist"], p["lyp"]) # %% COMPUTE CELL WALL LOCATIONS lx2 = x.size xi = np.empty(lx2 + 1) xi[1:-1] = 1 / 2 * (x[1:] + x[:-1]) xi[0] = x[0] - 1 / 2 * (x[1] - x[0]) xi[-1] = x[-1] + 1 / 2 * (x[-1] - x[-2]) lx3 = y.size yi = np.empty(lx3 + 1) yi[1:-1] = 1 / 2 * (y[1:] + y[:-1]) yi[0] = y[0] - 1 / 2 * (y[1] - y[0]) yi[-1] = y[-1] + 1 / 2 * (y[-1] - y[-2]) lx1 = z.size zi = np.empty(lx1 + 1) zi[1:-1] = 1 / 2 * (z[1:] + z[:-1]) zi[0] = z[0] - 1 / 2 * (z[1] - z[0]) zi[-1] = z[-1] + 1 / 2 * (z[-1] - z[-2]) # %% GRAVITATIONAL FIELD COMPONENTS IN DIPOLE SYSTEM Re = 6370e3 G = 6.67428e-11 Me = 5.9722e24 r = z + Re g = G * Me / r ** 2 gz = np.broadcast_to(-g[:, None, None], (g.size, lx2, lx3)) assert gz.shape == (lx1, lx2, lx3) # DISTANCE EW AND NS (FROM ENU (or UEN in our case - cyclic permuted) COORD. SYSTEM) # #NEED TO BE CONVERTED TO DIPOLE SPHERICAL AND THEN # GLAT/GLONG - BASICALLY HERE WE ARE MAPPING THE CARTESIAN GRID ONTO THE # SURFACE OF A SPHERE THEN CONVERTING TO GEOGRAPHIC. # get the magnetic coordinates of the grid center, based on user input thetactr, phictr = geog2geomag(p["glat"], p["glon"]) # %% Center of earth distance r = Re + z r = np.broadcast_to(r[:, None, None], (r.size, lx2, lx3)) assert r.shape == (lx1, lx2, lx3) # %% Northward angular distance gamma2 = y / Re # must retain the sign of x3 theta = thetactr - gamma2 # minus because distance north is against theta's direction theta = np.broadcast_to(theta[None, None, :], (lx1, lx2, theta.size)) assert theta.shape == (lx1, lx2, lx3) # %% Eastward angular distance # gamma1=x/Re; %must retain the sign of x2 gamma1 = x / Re / np.sin(thetactr) # must retain the sign of x2, just use theta of center of grid phi = phictr + gamma1 phi = np.broadcast_to(phi[None, :, None], (lx1, phi.size, lx3)) assert phi.shape == (lx1, lx2, lx3) # %% COMPUTE THE GEOGRAPHIC COORDINATES OF EACH GRID POINT glatgrid, glongrid = geomag2geog(theta, phi) # %% COMPUTE ECEF CARTESIAN IN CASE THEY ARE NEEDED xECEF = r * np.sin(theta) * np.cos(phi) yECEF = r * np.sin(theta) * np.sin(phi) zECEF = r * np.cos(theta) # %% COMPUTE SPHERICAL ECEF UNIT VECTORS - CARTESIAN-ECEF COMPONENTS er = np.empty((lx1, lx2, lx3, 3)) etheta = np.empty_like(er) ephi = np.empty_like(er) er[:, :, :, 0] = np.sin(theta) * np.cos(phi) # xECEF-component of er er[:, :, :, 1] = np.sin(theta) * np.sin(phi) # yECEF er[:, :, :, 2] = np.cos(theta) # zECEF etheta[:, :, :, 0] = np.cos(theta) * np.cos(phi) etheta[:, :, :, 1] = np.cos(theta) * np.sin(phi) etheta[:, :, :, 2] = -np.sin(theta) ephi[:, :, :, 0] = -np.sin(phi) ephi[:, :, :, 1] = np.cos(phi) ephi[:, :, :, 2] = 0 # %% UEN UNIT VECTORS IN ECEF COMPONENTS e1 = er # up is the same direction as from ctr of earth e2 = ephi # e2 is same as ephi e3 = -etheta # etheta is positive south, e3 is pos. north # %% STORE RESULTS IN GRID DATA STRUCTURE xg = { "x1": z, "x2": x, "x3": y, "x1i": zi, "x2i": xi, "x3i": yi, } lx = (xg["x1"].size, xg["x2"].size, xg["x3"].size) xg["lx"] = np.array(lx) xg["dx1f"] = np.append(xg["x1"][1:] - xg["x1"][:-1], xg["x1"][-1] - xg["x1"][-2]) # FWD DIFF xg["dx1b"] = np.insert(xg["x1"][1:] - xg["x1"][:-1], 0, xg["x1"][1] - xg["x1"][0]) # BACK DIFF xg["dx1h"] = xg["x1i"][1:-1] - xg["x1i"][:-2] # MIDPOINT DIFFS xg["dx2f"] = np.append(xg["x2"][1:] - xg["x2"][:-1], xg["x2"][-1] - xg["x2"][-2]) # FWD DIFF xg["dx2b"] = np.insert(xg["x2"][1:] - xg["x2"][:-1], 0, xg["x2"][1] - xg["x2"][0]) # BACK DIFF xg["dx2h"] = xg["x2i"][1:-1] - xg["x2i"][:-2] # MIDPOINT DIFFS xg["dx3f"] = np.append(xg["x3"][1:] - xg["x3"][:-1], xg["x3"][-1] - xg["x3"][-2]) # FWD DIFF xg["dx3b"] = np.insert(xg["x3"][1:] - xg["x3"][:-1], 0, xg["x3"][1] - xg["x3"][0]) # BACK DIFF xg["dx3h"] = xg["x3i"][1:-1] - xg["x3i"][:-2] # MIDPOINT DIFFS xg["h1"] = np.ones(lx) xg["h2"] = np.ones(lx) xg["h3"] = np.ones(lx) xg["h1x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h2x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h3x1i"] = np.ones((lx[0] + 1, lx[1], lx[2])) xg["h1x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h2x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h3x2i"] = np.ones((lx[0], lx[1] + 1, lx[2])) xg["h1x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) xg["h2x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) xg["h3x3i"] = np.ones((lx[0], lx[1], lx[2] + 1)) # %% Cartesian, ECEF representation of curvilinar coordinates xg["e1"] = e1 xg["e2"] = e2 xg["e3"] = e3 # %% ECEF spherical coordinates xg["r"] = r xg["theta"] = theta xg["phi"] = phi # xg.rx1i=[]; xg.thetax1i=[]; # xg.rx2i=[]; xg.thetax2i=[]; # %% These are cartesian representations of the ECEF, spherical unit vectors xg["er"] = er xg["etheta"] = etheta xg["ephi"] = ephi xg["I"] = np.broadcast_to(p["Bincl"], (lx2, lx3)) # %% Cartesian ECEF coordinates xg["x"] = xECEF xg["z"] = zECEF xg["y"] = yECEF xg["alt"] = xg["r"] - Re # since we need a 3D array use xg.r here... xg["gx1"] = gz xg["gx2"] = np.zeros(lx) xg["gx3"] = np.zeros(lx) xg["Bmag"] = np.broadcast_to(-50000e-9, xg["lx"]) # minus for northern hemisphere... xg["glat"] = glatgrid xg["glon"] = glongrid # xg['xp']=x; xg['zp']=z; # xg['inull']=[]; xg["nullpts"] = np.zeros(lx) # %% TRIM DATA STRUCTURE TO BE THE SIZE FORTRAN EXPECTS # note: xgf is xg == True xgf = xg # indices corresponding to non-ghost cells for 1 dimension i1 = slice(2, lx[0] - 2) i2 = slice(2, lx[1] - 2) i3 = slice(2, lx[2] - 2) # any dx variable will not need to first element (backward diff of two ghost cells) idx1 = slice(1, lx[0]) idx2 = slice(1, lx[1]) idx3 = slice(1, lx[2]) # x1-interface variables need only non-ghost cell values (left interface) plus one ix1i = slice(2, lx[0] - 1) ix2i = slice(2, lx[1] - 1) ix3i = slice(2, lx[2] - 1) # remove ghost cells # now that indices have been define we can go ahead and make this change xgf["lx"] = xgf["lx"] - 4 xgf["dx1b"] = xgf["dx1b"][idx1] xgf["dx2b"] = xgf["dx2b"][idx2] xgf["dx3b"] = xgf["dx3b"][idx3] xgf["x1i"] = xgf["x1i"][ix1i] xgf["x2i"] = xgf["x2i"][ix2i] xgf["x3i"] = xgf["x3i"][ix3i] xgf["dx1h"] = xgf["dx1h"][i1] xgf["dx2h"] = xgf["dx2h"][i2] xgf["dx3h"] = xgf["dx3h"][i3] xgf["h1x1i"] = xgf["h1x1i"][ix1i, i2, i3] xgf["h2x1i"] = xgf["h2x1i"][ix1i, i2, i3] xgf["h3x1i"] = xgf["h3x1i"][ix1i, i2, i3] xgf["h1x2i"] = xgf["h1x2i"][i1, ix2i, i3] xgf["h2x2i"] = xgf["h2x2i"][i1, ix2i, i3] xgf["h3x2i"] = xgf["h3x2i"][i1, ix2i, i3] xgf["h1x3i"] = xgf["h1x3i"][i1, i2, ix3i] xgf["h2x3i"] = xgf["h2x3i"][i1, i2, ix3i] xgf["h3x3i"] = xgf["h3x3i"][i1, i2, ix3i] xgf["gx1"] = xgf["gx1"][i1, i2, i3] xgf["gx2"] = xgf["gx2"][i1, i2, i3] xgf["gx3"] = xgf["gx3"][i1, i2, i3] xgf["glat"] = xgf["glat"][i1, i2, i3] xgf["glon"] = xgf["glon"][i1, i2, i3] xgf["alt"] = xgf["alt"][i1, i2, i3] xgf["Bmag"] = xgf["Bmag"][i1, i2, i3] xgf["I"] = xgf["I"][i2, i3] xgf["nullpts"] = xgf["nullpts"][i1, i2, i3] xgf["e1"] = xgf["e1"][i1, i2, i3, :] xgf["e2"] = xgf["e2"][i1, i2, i3, :] xgf["e3"] = xgf["e3"][i1, i2, i3, :] xgf["er"] = xgf["er"][i1, i2, i3, :] xgf["etheta"] = xgf["etheta"][i1, i2, i3, :] xgf["ephi"] = xgf["ephi"][i1, i2, i3, :] xgf["r"] = xgf["r"][i1, i2, i3] xgf["theta"] = xgf["theta"][i1, i2, i3] xgf["phi"] = xgf["phi"][i1, i2, i3] xgf["x"] = xgf["x"][i1, i2, i3] xgf["y"] = xgf["y"][i1, i2, i3] xgf["z"] = xgf["z"][i1, i2, i3] xgf["glonctr"] = p["glon"] xgf["glatctr"] = p["glat"] return xgf
""" Plotting helpers ================ """ import os import joblib import logging import sklearn.metrics import pandas as pd from .list_runs import ListRuns import seaborn as sns import matplotlib.pyplot as plt import numpy as np import math logger = logging.getLogger(__name__) def plot_confusion_matrix( run, log_scale, normalize_sum, normalize_test, stylesheet, figsize_x, figsize_y, vmin, vmax, vmin_norm, vmax_norm, plot_formats ): f_path = os.path.join(os.getcwd(), 'output', run) if not os.path.isdir(f_path): raise FileNotFoundError(f'Could not find run directory {f_path}') test_output_file = os.path.join(os.getcwd(), 'output', run, 'test_output.csv') if not os.path.isfile(test_output_file): raise FileNotFoundError(f'No file {test_output_file} found for run {run}. Pass the option `write_test_output: true` when training the model.') if stylesheet: plt.style.use(stylesheet) df = pd.read_csv(test_output_file) labels = sorted(set(df.label).union(set(df.prediction))) cnf_matrix = sklearn.metrics.confusion_matrix(df.label, df.prediction) df = pd.DataFrame(cnf_matrix, columns=labels, index=labels) # Plotting fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y)) fmt = 'd' f_name = run df_sum = df.sum().sum() vmin = df.min().min() if vmin is None else vmin vmax = df.max().max() if vmax is None else vmax if normalize_sum: df = df.div(df.sum().sum().astype(float)) * 1000 vmin = df.min().min() if vmin_norm is None else vmin_norm vmax = df.max().max() if vmax_norm is None else vmax_norm fmt = '3.0f' f_name += '_normalized_sum' elif normalize_test: df = df.divide(df.sum(axis=1), axis=0) * 1000 vmin = df.min().min() if vmax_norm is None else vmax_norm vmax = df.max().max() if vmin_norm is None else vmin_norm fmt = '3.0f' f_name += '_normalized_test' df_annot = df.copy() if log_scale: vmin = np.log(vmin) if vmin >= 1 else 0.0 vmax = np.log(vmax) df = np.log(df + 1) f_name += '_log_scale' ax = sns.heatmap( df, ax=ax, annot=df_annot, fmt=fmt, annot_kws={"fontsize": 8}, vmin=vmin, vmax=vmax) if log_scale: # Set colorbar ticks cbar = ax.collections[0].colorbar ticks = list(range(math.floor(vmin), math.ceil(vmax))) cbar.set_ticks(ticks) exp_0 = lambda x: np.exp(x) if x > 0 else 0.0 cbar.set_ticklabels(np.vectorize(exp_0)(ticks).astype(int)) ax.set(xlabel='predicted label', ylabel='true label') if normalize_sum or normalize_test: ax.set_title(f'Total samples: {df_sum}') save_fig(fig, 'confusion_matrix', f_name, plot_formats=plot_formats) def plot_compare_runs(runs, performance_scores, order_by): df = [] run_dict = {} for run in runs: if ':' in run: run_name, alt_name = run.split(':') run_dict[run_name] = alt_name else: run_dict[run] = run for run, alt_name in run_dict.items(): _df = ListRuns.collect_results(run=run) _df['name'] = alt_name if len(_df) == 0: raise FileNotFoundError(f'Could not find the run "{run}" in ./output/') elif len(_df) > 1: raise ValueError(f'Run name "{run}" is not unique. Found {len(_df):,} matching runs for this pattern.') df.append(_df) df = pd.concat(df) # collect scores and vlines scores = [] vlines = [] for score in performance_scores: if ':' in score: score, _ = score.split(':') vlines.append(score) scores.append(score) scores = list(set(scores)) vlines = list(set(vlines)) # melt df = df[['name', *scores]].melt(id_vars=['name'], var_name='performance', value_name='score') # order hue_order = None order = None if order_by is not None: order = df[df.performance == order_by].sort_values('score').name.tolist() hue_order = df[df.name == order[-1]].sort_values('score').performance.tolist() hue_order.remove(order_by) hue_order.insert(0, order_by) # plot g = sns.catplot(x='score', y='name', hue='performance', kind='bar', orient='h', ci=None, aspect=2, palette='colorblind', data=df, order=order, hue_order=hue_order) for vline_score in vlines: vline_values = df[df.performance == vline_score]['score'].values for v in vline_values: g.ax.axvline(v, ls='--', c='.1', lw=.5) fig = plt.gcf() save_fig(fig, 'compare_runs', '-'.join(run_dict.values())) def plot_label_distribution(data_path, mode='test', label='category', merged=True): assert mode in ['train', 'test'] assert label in ['category', 'type'] assert type(merged) == bool config_dir = [label] if merged: config_dir.append('merged') data_dir = os.path.join( data_path, mode, '_'.join(config_dir)) data_dir_unambiguous = os.path.join( data_path, mode, '_'.join(config_dir + ['unambiguous'])) title = f"{label.capitalize()} {mode.capitalize()} " \ f"{"Merged" if merged else ""}" df = pd.read_csv(os.path.join(data_dir, 'all.csv')) df_unambiguous = pd.read_csv(os.path.join(data_dir_unambiguous, 'all.csv')) labels = dict(df.label.value_counts()) labels_unambiguous = dict(df_unambiguous.label.value_counts()) # plotting fig, ax = plt.subplots(1, 1, figsize=(6, 4)) g = sns.barplot( x=list(labels.values()), y=list(labels.keys()), ax=ax, orient='h', label='Full', color=sns.color_palette('muted')[0], edgecolor='w') g.set_xscale('log') g_unambiguous = sns.barplot( x=list(labels_unambiguous.values()), y=list(labels_unambiguous.keys()), ax=ax, orient='h', label='Unambiguous', color=sns.color_palette('bright')[0], edgecolor='w') g_unambiguous.set_xscale('log') ax.legend(loc='lower right') ax.set(title=title, xlabel='Number of samples', ylabel='Label') save_fig(fig, 'label_distribution', data_dir) file_name = '_'.join(config_dir + [mode, 'label-distribution']) pics_dir = os.path.join(data_path, 'pics') if not os.path.isdir(pics_dir): os.mkdir(pics_dir) save_fig(fig, pics_dir, file_name) def save_fig(fig, fig_type, name, plot_formats=['png'], dpi=300): folder = os.path.join(os.getcwd(), 'plots', fig_type) if not os.path.isdir(folder): os.makedirs(folder) def f_name(fmt): f_name = '{}.{}'.format(name, fmt) return os.path.abspath(os.path.join(folder, f_name)) for fmt in plot_formats: f_path = f_name(fmt) logger.info(f'Writing figure file {f_path}') fig.savefig(f_path, bbox_inches='tight', dpi=dpi)
""" Plotting helpers ================ """ import os import joblib import logging import sklearn.metrics import pandas as pd from .list_runs import ListRuns import seaborn as sns import matplotlib.pyplot as plt import numpy as np import math logger = logging.getLogger(__name__) def plot_confusion_matrix( run, log_scale, normalize_sum, normalize_test, stylesheet, figsize_x, figsize_y, vmin, vmax, vmin_norm, vmax_norm, plot_formats ): f_path = os.path.join(os.getcwd(), 'output', run) if not os.path.isdir(f_path): raise FileNotFoundError(f'Could not find run directory {f_path}') test_output_file = os.path.join(os.getcwd(), 'output', run, 'test_output.csv') if not os.path.isfile(test_output_file): raise FileNotFoundError(f'No file {test_output_file} found for run {run}. Pass the option `write_test_output: true` when training the model.') if stylesheet: plt.style.use(stylesheet) df = pd.read_csv(test_output_file) labels = sorted(set(df.label).union(set(df.prediction))) cnf_matrix = sklearn.metrics.confusion_matrix(df.label, df.prediction) df = pd.DataFrame(cnf_matrix, columns=labels, index=labels) # Plotting fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y)) fmt = 'd' f_name = run df_sum = df.sum().sum() vmin = df.min().min() if vmin is None else vmin vmax = df.max().max() if vmax is None else vmax if normalize_sum: df = df.div(df.sum().sum().astype(float)) * 1000 vmin = df.min().min() if vmin_norm is None else vmin_norm vmax = df.max().max() if vmax_norm is None else vmax_norm fmt = '3.0f' f_name += '_normalized_sum' elif normalize_test: df = df.divide(df.sum(axis=1), axis=0) * 1000 vmin = df.min().min() if vmax_norm is None else vmax_norm vmax = df.max().max() if vmin_norm is None else vmin_norm fmt = '3.0f' f_name += '_normalized_test' df_annot = df.copy() if log_scale: vmin = np.log(vmin) if vmin >= 1 else 0.0 vmax = np.log(vmax) df = np.log(df + 1) f_name += '_log_scale' ax = sns.heatmap( df, ax=ax, annot=df_annot, fmt=fmt, annot_kws={"fontsize": 8}, vmin=vmin, vmax=vmax) if log_scale: # Set colorbar ticks cbar = ax.collections[0].colorbar ticks = list(range(math.floor(vmin), math.ceil(vmax))) cbar.set_ticks(ticks) exp_0 = lambda x: np.exp(x) if x > 0 else 0.0 cbar.set_ticklabels(np.vectorize(exp_0)(ticks).astype(int)) ax.set(xlabel='predicted label', ylabel='true label') if normalize_sum or normalize_test: ax.set_title(f'Total samples: {df_sum}') save_fig(fig, 'confusion_matrix', f_name, plot_formats=plot_formats) def plot_compare_runs(runs, performance_scores, order_by): df = [] run_dict = {} for run in runs: if ':' in run: run_name, alt_name = run.split(':') run_dict[run_name] = alt_name else: run_dict[run] = run for run, alt_name in run_dict.items(): _df = ListRuns.collect_results(run=run) _df['name'] = alt_name if len(_df) == 0: raise FileNotFoundError(f'Could not find the run "{run}" in ./output/') elif len(_df) > 1: raise ValueError(f'Run name "{run}" is not unique. Found {len(_df):,} matching runs for this pattern.') df.append(_df) df = pd.concat(df) # collect scores and vlines scores = [] vlines = [] for score in performance_scores: if ':' in score: score, _ = score.split(':') vlines.append(score) scores.append(score) scores = list(set(scores)) vlines = list(set(vlines)) # melt df = df[['name', *scores]].melt(id_vars=['name'], var_name='performance', value_name='score') # order hue_order = None order = None if order_by is not None: order = df[df.performance == order_by].sort_values('score').name.tolist() hue_order = df[df.name == order[-1]].sort_values('score').performance.tolist() hue_order.remove(order_by) hue_order.insert(0, order_by) # plot g = sns.catplot(x='score', y='name', hue='performance', kind='bar', orient='h', ci=None, aspect=2, palette='colorblind', data=df, order=order, hue_order=hue_order) for vline_score in vlines: vline_values = df[df.performance == vline_score]['score'].values for v in vline_values: g.ax.axvline(v, ls='--', c='.1', lw=.5) fig = plt.gcf() save_fig(fig, 'compare_runs', '-'.join(run_dict.values())) def plot_label_distribution(data_path, mode='test', label='category', merged=True): assert mode in ['train', 'test'] assert label in ['category', 'type'] assert type(merged) == bool config_dir = [label] if merged: config_dir.append('merged') data_dir = os.path.join( data_path, mode, '_'.join(config_dir)) data_dir_unambiguous = os.path.join( data_path, mode, '_'.join(config_dir + ['unambiguous'])) title = f"{label.capitalize()} {mode.capitalize()} " \ f"{'Merged' if merged else ''}" df = pd.read_csv(os.path.join(data_dir, 'all.csv')) df_unambiguous = pd.read_csv(os.path.join(data_dir_unambiguous, 'all.csv')) labels = dict(df.label.value_counts()) labels_unambiguous = dict(df_unambiguous.label.value_counts()) # plotting fig, ax = plt.subplots(1, 1, figsize=(6, 4)) g = sns.barplot( x=list(labels.values()), y=list(labels.keys()), ax=ax, orient='h', label='Full', color=sns.color_palette('muted')[0], edgecolor='w') g.set_xscale('log') g_unambiguous = sns.barplot( x=list(labels_unambiguous.values()), y=list(labels_unambiguous.keys()), ax=ax, orient='h', label='Unambiguous', color=sns.color_palette('bright')[0], edgecolor='w') g_unambiguous.set_xscale('log') ax.legend(loc='lower right') ax.set(title=title, xlabel='Number of samples', ylabel='Label') save_fig(fig, 'label_distribution', data_dir) file_name = '_'.join(config_dir + [mode, 'label-distribution']) pics_dir = os.path.join(data_path, 'pics') if not os.path.isdir(pics_dir): os.mkdir(pics_dir) save_fig(fig, pics_dir, file_name) def save_fig(fig, fig_type, name, plot_formats=['png'], dpi=300): folder = os.path.join(os.getcwd(), 'plots', fig_type) if not os.path.isdir(folder): os.makedirs(folder) def f_name(fmt): f_name = '{}.{}'.format(name, fmt) return os.path.abspath(os.path.join(folder, f_name)) for fmt in plot_formats: f_path = f_name(fmt) logger.info(f'Writing figure file {f_path}') fig.savefig(f_path, bbox_inches='tight', dpi=dpi)
from __future__ import annotations import re from functools import wraps from typing import Callable, Any, TypeVar from warepy import format_message, snakefy from puft.core.db.model_not_found_error import ModelNotFoundError from puft.tools.log import log from flask import Flask import flask_migrate from flask_sqlalchemy import SQLAlchemy from flask_sqlalchemy import Model as BaseModel import sqlalchemy as sa from sqlalchemy.ext.declarative import declared_attr from puft.core.sv.sv import Sv from .db_type_enum import DbTypeEnum AnyModel = TypeVar('AnyModel', bound='orm.Model') # TODO: Fix type hinting for decorated functions under this decorator. def migration_implemented(func: Callable): @wraps(func) def inner(self_instance, *args, **kwargs): if type(self_instance) is not Db: raise TypeError( "Decorator migration_implemented cannot be" f" applied to type {type(self_instance)}") elif not self_instance.migration: error_message = "Migrate object hasn't been implemented yet" raise AttributeError(error_message) else: return func(self_instance, *args, **kwargs) return inner class Mapper(BaseModel): """Base orm model responsible of holding model's data and at least it's basic CRUD operations. Contains create(), get_first(), get_all() and delete_first() methods as Create, Retrieve and Delete representatives. Update representatives are defined individually at each subclass (e.g. `set_something()`), and by default accessed via basic model alteration, e.g. `MyModel.name = 'Another name'`. """ # sqlalchemy used instead of `orm` class to avoid reference errors # https://flask-sqlalchemy.palletsprojects.com/en/2.x/customizing/ id = sa.Column(sa.Integer, primary_key=True) @declared_attr def __tablename__(cls) -> str: cls_name: str = cls.__name__ # type: ignore return snakefy(cls_name) @declared_attr def __mapper_args__(cls) -> dict[str, Any]: args: dict[str, Any] = {} if hasattr(cls, 'type'): # If classes intended to build an inheritance tree, they must # include `type` attr args.update({ 'polymorphic_on': 'type', 'polymorphic_identity': cls.__tablename__ }) return args @classmethod @log.catch(exclude=(NotImplementedError)) def create(cls: AnyModel, **kwargs) -> AnyModel: """Create model and return it. Accepts all given kwargs and thus is recommended to be redefined at subclasses. Model creation intended to be done only through this method. """ return cls(**kwargs) # type: ignore @classmethod @log.catch(exclude=(ModelNotFoundError)) def get_first( cls, order_by: object | list[object] | None = None, **kwargs) -> orm.Model: """Filter first ORM mapped model by given kwargs and return it. Raise: ValueError: No such ORM model in db matched given kwargs """ query: Any = cls.query.filter_by(**kwargs) # type: ignore if order_by is not None: query = cls._order_query(query, order_by) model: orm.Model = query.first() if not model: raise ModelNotFoundError(model_name=cls.__name__, **kwargs) else: return model @classmethod @log.catch(exclude=(ModelNotFoundError)) def get_all( cls, order_by: object | list[object] | None = None, limit: int | None = None, **kwargs) -> list[orm.Model]: """Filter all ORM mapped models by given kwargs and return them. Raise: ValueError: No such ORM model in db matched given kwargs """ query: Any = cls.query.filter_by(**kwargs) # type: ignore if order_by is not None: query = cls._order_query(query, order_by) if limit: query = query.limit(limit) elif limit: query = query.limit(limit) models: list[orm.Model] = query.all() if not models: raise ModelNotFoundError(model_name=cls.__name__, **kwargs) else: return models @classmethod def delete_first( cls, order_by: object | list[object] | None = None, **kwargs) -> None: """Delete first accessed by `get_first()` method model.""" db: Db = Db.instance() model: orm.Model = cls.get_first(order_by=order_by, **kwargs) db.native_db.session.delete(model) db.commit() @staticmethod def _order_query(query: Any, order_by: object | list[object]) -> object: if type(order_by) is list: return query.order_by(*order_by) else: return query.order_by(order_by) class orm: # Helper references for shorter writing at ORMs. # Ignore lines added for a workaround to fix issue: # https://github.com/microsoft/pylance-release/issues/187 native_db = SQLAlchemy(model_class=Mapper) Model: Any = native_db.Model column = native_db.Column # type: ignore integer = native_db.Integer # type: ignore string = native_db.String # type: ignore text = native_db.Text # type: ignore float = native_db.Float # type: ignore boolean = native_db.Boolean # type: ignore foreign_key = native_db.ForeignKey # type: ignore table = native_db.Table # type: ignore check_constraint = native_db.CheckConstraint # type: ignore relationship = native_db.relationship # type: ignore backref = native_db.backref # type: ignore pickle = native_db.PickleType # type: ignore binary = native_db.LargeBinary # type: ignore datetime = native_db.DateTime # type: ignore class Db(Sv): """Operates over database processes.""" def __init__(self, config: dict) -> None: super().__init__(config) self.DEFAULT_URI = f"sqlite:///{self.config["root_path"]}/sqlite3.db" self.native_db = orm.native_db # For now sv config propagated to Db domain. self._assign_uri_from_config(config) def _assign_uri_from_config(self, config: dict) -> None: raw_uri = config.get("uri", None) # type: str if not raw_uri: log.info(f"URI for database is not specified, using default") raw_uri = self.DEFAULT_URI else: # Case 1: SQLite Db. # Developer can give relative path to the Db (it will be absolutized at ConfigCell.parse()), # by setting sqlite Db extension to `.db`, e.g. `./instance/sqlite3.db`, # or by setting full absolute path with protocol, e.g. `sqlite:////home/user/project/instance/sqlite3.db`. if raw_uri.rfind(".db") != -1 or "sqlite:///" in raw_uri: if "sqlite:///" not in raw_uri: # Set absolute path to db. # Ref: https://stackoverflow.com/a/44687471/14748231 self.uri = "sqlite:///" + raw_uri else: self.uri = raw_uri self.type_enum = DbTypeEnum.SQLITE # Case 2: PostgreSQL Db. elif re.match(r"postgresql(\+\w+)?://", raw_uri): # No need to calculate path since psql uri should be given in full form. self.uri = raw_uri self.type_enum = DbTypeEnum.PSQL else: raise ValueError(format_message("Unrecognized or yet unsupported type of Db uri: {}", raw_uri)) # WARNING: Never print full Db uri to config, since it may contain user's password (as in case of # psql) log.info(f"Set db type: {self.type_enum.value}") @migration_implemented def init_migration(self, directory: str = "migrations", multidb: bool = False) -> None: """Initializes migration support for the application.""" flask_migrate.init(directory=directory, multidb=multidb) @migration_implemented def migrate_migration( self, directory: str = "migrations", message = None, sql = False, head: str = "head", splice = False, branch_label = None, version_path = None, rev_id = None ) -> None: flask_migrate.migrate( directory=directory, message=message, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @migration_implemented def upgrade_migration( self, directory: str = "migrations", revision: str = "head", sql = False, tag = None ) -> None: flask_migrate.upgrade( directory=directory, revision=revision, sql=sql, tag=tag ) def setup(self, flask_app: Flask) -> None: """Setup Db and migration object with given Flask app.""" flask_app.config["SQLALCHEMY_DATABASE_URI"] = self.uri flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False self.native_db.init_app(flask_app) # render_as_batch kwarg required only for sqlite3 databases to avoid # ALTER TABLE issue on migrations # https://blog.miguelgrinberg.com/post/fixing-alter-table-errors-with-flask-migrate-and-sqlite if self.type_enum is DbTypeEnum.SQLITE: is_sqlite_db = True else: is_sqlite_db = False self.migration = flask_migrate.Migrate(flask_app, self.native_db, render_as_batch=is_sqlite_db) def get_native_db(self) -> SQLAlchemy: return self.native_db @migration_implemented def get_migration(self) -> flask_migrate.Migrate: """Return migration object. Raise: AttributeError: If Migrate object hasn't implemented yet. """ return self.migration @migration_implemented def create_all(self) -> None: self.native_db.create_all() @migration_implemented def drop_all(self): "Drop all tables." self.native_db.drop_all() @migration_implemented def _add(self, entity): """Place an object in the session.""" # TODO: Add functionality to accept multiple entities as *args. self.native_db.session.add(entity) def push(self, entity): """Add entity to session and immediately commit the session.""" self._add(entity) self.commit() @migration_implemented def commit(self): """Commit current transaction.""" self.native_db.session.commit() @migration_implemented def rollback(self): self.native_db.session.rollback() @migration_implemented def remove(self): self.native_db.session.remove()
from __future__ import annotations import re from functools import wraps from typing import Callable, Any, TypeVar from warepy import format_message, snakefy from puft.core.db.model_not_found_error import ModelNotFoundError from puft.tools.log import log from flask import Flask import flask_migrate from flask_sqlalchemy import SQLAlchemy from flask_sqlalchemy import Model as BaseModel import sqlalchemy as sa from sqlalchemy.ext.declarative import declared_attr from puft.core.sv.sv import Sv from .db_type_enum import DbTypeEnum AnyModel = TypeVar('AnyModel', bound='orm.Model') # TODO: Fix type hinting for decorated functions under this decorator. def migration_implemented(func: Callable): @wraps(func) def inner(self_instance, *args, **kwargs): if type(self_instance) is not Db: raise TypeError( "Decorator migration_implemented cannot be" f" applied to type {type(self_instance)}") elif not self_instance.migration: error_message = "Migrate object hasn't been implemented yet" raise AttributeError(error_message) else: return func(self_instance, *args, **kwargs) return inner class Mapper(BaseModel): """Base orm model responsible of holding model's data and at least it's basic CRUD operations. Contains create(), get_first(), get_all() and delete_first() methods as Create, Retrieve and Delete representatives. Update representatives are defined individually at each subclass (e.g. `set_something()`), and by default accessed via basic model alteration, e.g. `MyModel.name = 'Another name'`. """ # sqlalchemy used instead of `orm` class to avoid reference errors # https://flask-sqlalchemy.palletsprojects.com/en/2.x/customizing/ id = sa.Column(sa.Integer, primary_key=True) @declared_attr def __tablename__(cls) -> str: cls_name: str = cls.__name__ # type: ignore return snakefy(cls_name) @declared_attr def __mapper_args__(cls) -> dict[str, Any]: args: dict[str, Any] = {} if hasattr(cls, 'type'): # If classes intended to build an inheritance tree, they must # include `type` attr args.update({ 'polymorphic_on': 'type', 'polymorphic_identity': cls.__tablename__ }) return args @classmethod @log.catch(exclude=(NotImplementedError)) def create(cls: AnyModel, **kwargs) -> AnyModel: """Create model and return it. Accepts all given kwargs and thus is recommended to be redefined at subclasses. Model creation intended to be done only through this method. """ return cls(**kwargs) # type: ignore @classmethod @log.catch(exclude=(ModelNotFoundError)) def get_first( cls, order_by: object | list[object] | None = None, **kwargs) -> orm.Model: """Filter first ORM mapped model by given kwargs and return it. Raise: ValueError: No such ORM model in db matched given kwargs """ query: Any = cls.query.filter_by(**kwargs) # type: ignore if order_by is not None: query = cls._order_query(query, order_by) model: orm.Model = query.first() if not model: raise ModelNotFoundError(model_name=cls.__name__, **kwargs) else: return model @classmethod @log.catch(exclude=(ModelNotFoundError)) def get_all( cls, order_by: object | list[object] | None = None, limit: int | None = None, **kwargs) -> list[orm.Model]: """Filter all ORM mapped models by given kwargs and return them. Raise: ValueError: No such ORM model in db matched given kwargs """ query: Any = cls.query.filter_by(**kwargs) # type: ignore if order_by is not None: query = cls._order_query(query, order_by) if limit: query = query.limit(limit) elif limit: query = query.limit(limit) models: list[orm.Model] = query.all() if not models: raise ModelNotFoundError(model_name=cls.__name__, **kwargs) else: return models @classmethod def delete_first( cls, order_by: object | list[object] | None = None, **kwargs) -> None: """Delete first accessed by `get_first()` method model.""" db: Db = Db.instance() model: orm.Model = cls.get_first(order_by=order_by, **kwargs) db.native_db.session.delete(model) db.commit() @staticmethod def _order_query(query: Any, order_by: object | list[object]) -> object: if type(order_by) is list: return query.order_by(*order_by) else: return query.order_by(order_by) class orm: # Helper references for shorter writing at ORMs. # Ignore lines added for a workaround to fix issue: # https://github.com/microsoft/pylance-release/issues/187 native_db = SQLAlchemy(model_class=Mapper) Model: Any = native_db.Model column = native_db.Column # type: ignore integer = native_db.Integer # type: ignore string = native_db.String # type: ignore text = native_db.Text # type: ignore float = native_db.Float # type: ignore boolean = native_db.Boolean # type: ignore foreign_key = native_db.ForeignKey # type: ignore table = native_db.Table # type: ignore check_constraint = native_db.CheckConstraint # type: ignore relationship = native_db.relationship # type: ignore backref = native_db.backref # type: ignore pickle = native_db.PickleType # type: ignore binary = native_db.LargeBinary # type: ignore datetime = native_db.DateTime # type: ignore class Db(Sv): """Operates over database processes.""" def __init__(self, config: dict) -> None: super().__init__(config) self.DEFAULT_URI = f"sqlite:///{self.config['root_path']}/sqlite3.db" self.native_db = orm.native_db # For now sv config propagated to Db domain. self._assign_uri_from_config(config) def _assign_uri_from_config(self, config: dict) -> None: raw_uri = config.get("uri", None) # type: str if not raw_uri: log.info(f"URI for database is not specified, using default") raw_uri = self.DEFAULT_URI else: # Case 1: SQLite Db. # Developer can give relative path to the Db (it will be absolutized at ConfigCell.parse()), # by setting sqlite Db extension to `.db`, e.g. `./instance/sqlite3.db`, # or by setting full absolute path with protocol, e.g. `sqlite:////home/user/project/instance/sqlite3.db`. if raw_uri.rfind(".db") != -1 or "sqlite:///" in raw_uri: if "sqlite:///" not in raw_uri: # Set absolute path to db. # Ref: https://stackoverflow.com/a/44687471/14748231 self.uri = "sqlite:///" + raw_uri else: self.uri = raw_uri self.type_enum = DbTypeEnum.SQLITE # Case 2: PostgreSQL Db. elif re.match(r"postgresql(\+\w+)?://", raw_uri): # No need to calculate path since psql uri should be given in full form. self.uri = raw_uri self.type_enum = DbTypeEnum.PSQL else: raise ValueError(format_message("Unrecognized or yet unsupported type of Db uri: {}", raw_uri)) # WARNING: Never print full Db uri to config, since it may contain user's password (as in case of # psql) log.info(f"Set db type: {self.type_enum.value}") @migration_implemented def init_migration(self, directory: str = "migrations", multidb: bool = False) -> None: """Initializes migration support for the application.""" flask_migrate.init(directory=directory, multidb=multidb) @migration_implemented def migrate_migration( self, directory: str = "migrations", message = None, sql = False, head: str = "head", splice = False, branch_label = None, version_path = None, rev_id = None ) -> None: flask_migrate.migrate( directory=directory, message=message, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id ) @migration_implemented def upgrade_migration( self, directory: str = "migrations", revision: str = "head", sql = False, tag = None ) -> None: flask_migrate.upgrade( directory=directory, revision=revision, sql=sql, tag=tag ) def setup(self, flask_app: Flask) -> None: """Setup Db and migration object with given Flask app.""" flask_app.config["SQLALCHEMY_DATABASE_URI"] = self.uri flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False self.native_db.init_app(flask_app) # render_as_batch kwarg required only for sqlite3 databases to avoid # ALTER TABLE issue on migrations # https://blog.miguelgrinberg.com/post/fixing-alter-table-errors-with-flask-migrate-and-sqlite if self.type_enum is DbTypeEnum.SQLITE: is_sqlite_db = True else: is_sqlite_db = False self.migration = flask_migrate.Migrate(flask_app, self.native_db, render_as_batch=is_sqlite_db) def get_native_db(self) -> SQLAlchemy: return self.native_db @migration_implemented def get_migration(self) -> flask_migrate.Migrate: """Return migration object. Raise: AttributeError: If Migrate object hasn't implemented yet. """ return self.migration @migration_implemented def create_all(self) -> None: self.native_db.create_all() @migration_implemented def drop_all(self): "Drop all tables." self.native_db.drop_all() @migration_implemented def _add(self, entity): """Place an object in the session.""" # TODO: Add functionality to accept multiple entities as *args. self.native_db.session.add(entity) def push(self, entity): """Add entity to session and immediately commit the session.""" self._add(entity) self.commit() @migration_implemented def commit(self): """Commit current transaction.""" self.native_db.session.commit() @migration_implemented def rollback(self): self.native_db.session.rollback() @migration_implemented def remove(self): self.native_db.session.remove()
"""Classes to manage credentials.""" import asyncio import json import logging from typing import Mapping, Tuple from ....cache.base import BaseCache from ....core.error import BaseError from ....core.profile import Profile from ....indy.holder import IndyHolder, IndyHolderError from ....indy.issuer import IndyIssuer, IndyIssuerRevocationRegistryFullError from ....ledger.multiple_ledger.ledger_requests_executor import ( GET_CRED_DEF, GET_SCHEMA, IndyLedgerRequestsExecutor, ) from ....messaging.credential_definitions.util import ( CRED_DEF_TAGS, CRED_DEF_SENT_RECORD_TYPE, ) from ....messaging.responder import BaseResponder from ....revocation.indy import IndyRevocation from ....revocation.models.revocation_registry import RevocationRegistry from ....revocation.models.issuer_rev_reg_record import IssuerRevRegRecord from ....revocation.util import notify_revocation_reg_event from ....storage.base import BaseStorage from ....storage.error import StorageError, StorageNotFoundError from .messages.credential_ack import CredentialAck from .messages.credential_issue import CredentialIssue from .messages.credential_offer import CredentialOffer from .messages.credential_problem_report import ( CredentialProblemReport, ProblemReportReason, ) from .messages.credential_proposal import CredentialProposal from .messages.credential_request import CredentialRequest from .messages.inner.credential_preview import CredentialPreview from .models.credential_exchange import ( V10CredentialExchange, ) LOGGER = logging.getLogger(__name__) class CredentialManagerError(BaseError): """Credential error.""" class CredentialManager: """Class for managing credentials.""" def __init__(self, profile: Profile): """ Initialize a CredentialManager. Args: profile: The profile instance for this credential manager """ self._profile = profile @property def profile(self) -> Profile: """ Accessor for the current profile instance. Returns: The profile instance for this credential manager """ return self._profile async def _match_sent_cred_def_id(self, tag_query: Mapping[str, str]) -> str: """Return most recent matching id of cred def that agent sent to ledger.""" async with self._profile.session() as session: storage = session.inject(BaseStorage) found = await storage.find_all_records( type_filter=CRED_DEF_SENT_RECORD_TYPE, tag_query=tag_query ) if not found: raise CredentialManagerError( f"Issuer has no operable cred def for proposal spec {tag_query}" ) return max(found, key=lambda r: int(r.tags["epoch"])).tags["cred_def_id"] async def prepare_send( self, connection_id: str, credential_proposal: CredentialProposal, auto_remove: bool = None, comment: str = None, ) -> Tuple[V10CredentialExchange, CredentialOffer]: """ Set up a new credential exchange for an automated send. Args: connection_id: Connection to create offer for credential_proposal: The credential proposal with preview auto_remove: Flag to automatically remove the record on completion Returns: A tuple of the new credential exchange record and credential offer message """ if auto_remove is None: auto_remove = not self._profile.settings.get("preserve_exchange_records") credential_exchange = V10CredentialExchange( connection_id=connection_id, initiator=V10CredentialExchange.INITIATOR_SELF, role=V10CredentialExchange.ROLE_ISSUER, credential_proposal_dict=credential_proposal, auto_issue=True, auto_remove=auto_remove, trace=(credential_proposal._trace is not None), ) (credential_exchange, credential_offer) = await self.create_offer( cred_ex_record=credential_exchange, counter_proposal=None, comment=comment, ) return (credential_exchange, credential_offer) async def create_proposal( self, connection_id: str, *, auto_offer: bool = None, auto_remove: bool = None, comment: str = None, credential_preview: CredentialPreview = None, schema_id: str = None, schema_issuer_did: str = None, schema_name: str = None, schema_version: str = None, cred_def_id: str = None, issuer_did: str = None, trace: bool = False, ) -> V10CredentialExchange: """ Create a credential proposal. Args: connection_id: Connection to create proposal for auto_offer: Should this proposal request automatically be handled to offer a credential auto_remove: Should the record be automatically removed on completion comment: Optional human-readable comment to include in proposal credential_preview: The credential preview to use to create the credential proposal schema_id: Schema id for credential proposal schema_issuer_did: Schema issuer DID for credential proposal schema_name: Schema name for credential proposal schema_version: Schema version for credential proposal cred_def_id: Credential definition id for credential proposal issuer_did: Issuer DID for credential proposal Returns: Resulting credential exchange record including credential proposal """ credential_proposal_message = CredentialProposal( comment=comment, credential_proposal=credential_preview, schema_id=schema_id, schema_issuer_did=schema_issuer_did, schema_name=schema_name, schema_version=schema_version, cred_def_id=cred_def_id, issuer_did=issuer_did, ) credential_proposal_message.assign_trace_decorator( self._profile.settings, trace ) if auto_remove is None: auto_remove = not self._profile.settings.get("preserve_exchange_records") cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=credential_proposal_message._thread_id, initiator=V10CredentialExchange.INITIATOR_SELF, role=V10CredentialExchange.ROLE_HOLDER, state=V10CredentialExchange.STATE_PROPOSAL_SENT, credential_proposal_dict=credential_proposal_message, auto_offer=auto_offer, auto_remove=auto_remove, trace=trace, ) async with self._profile.session() as session: await cred_ex_record.save(session, reason="create credential proposal") return cred_ex_record async def receive_proposal( self, message: CredentialProposal, connection_id: str ) -> V10CredentialExchange: """ Receive a credential proposal. Returns: The resulting credential exchange record, created """ # at this point, cred def and schema still open to potential negotiation cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=message._thread_id, initiator=V10CredentialExchange.INITIATOR_EXTERNAL, role=V10CredentialExchange.ROLE_ISSUER, state=V10CredentialExchange.STATE_PROPOSAL_RECEIVED, credential_proposal_dict=message, auto_offer=self._profile.settings.get( "debug.auto_respond_credential_proposal" ), auto_issue=self._profile.settings.get( "debug.auto_respond_credential_request" ), auto_remove=not self._profile.settings.get("preserve_exchange_records"), trace=(message._trace is not None), ) async with self._profile.session() as session: await cred_ex_record.save(session, reason="receive credential proposal") return cred_ex_record async def create_offer( self, cred_ex_record: V10CredentialExchange, counter_proposal: CredentialProposal = None, comment: str = None, ) -> Tuple[V10CredentialExchange, CredentialOffer]: """ Create a credential offer, update credential exchange record. Args: cred_ex_record: Credential exchange to create offer for comment: optional human-readable comment to set in offer message Returns: A tuple (credential exchange record, credential offer message) """ async def _create(cred_def_id): issuer = self._profile.inject(IndyIssuer) offer_json = await issuer.create_credential_offer(cred_def_id) return json.loads(offer_json) credential_proposal_message = ( counter_proposal if counter_proposal else cred_ex_record.credential_proposal_dict ) credential_proposal_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) cred_def_id = await self._match_sent_cred_def_id( { t: getattr(credential_proposal_message, t) for t in CRED_DEF_TAGS if getattr(credential_proposal_message, t) } ) credential_preview = credential_proposal_message.credential_proposal # vet attributes ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( cred_def_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: schema_id = await ledger.credential_definition_id2schema_id(cred_def_id) schema = await ledger.get_schema(schema_id) schema_attrs = {attr for attr in schema["attrNames"]} preview_attrs = {attr for attr in credential_preview.attr_dict()} if preview_attrs != schema_attrs: raise CredentialManagerError( f"Preview attributes {preview_attrs} " f"mismatch corresponding schema attributes {schema_attrs}" ) credential_offer = None cache_key = f"credential_offer::{cred_def_id}" cache = self._profile.inject_or(BaseCache) if cache: async with cache.acquire(cache_key) as entry: if entry.result: credential_offer = entry.result else: credential_offer = await _create(cred_def_id) await entry.set_result(credential_offer, 3600) if not credential_offer: credential_offer = await _create(cred_def_id) credential_offer_message = CredentialOffer( comment=comment, credential_preview=credential_preview, offers_attach=[CredentialOffer.wrap_indy_offer(credential_offer)], ) credential_offer_message._thread = {"thid": cred_ex_record.thread_id} credential_offer_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) cred_ex_record.thread_id = credential_offer_message._thread_id cred_ex_record.schema_id = credential_offer["schema_id"] cred_ex_record.credential_definition_id = credential_offer["cred_def_id"] cred_ex_record.state = V10CredentialExchange.STATE_OFFER_SENT cred_ex_record.credential_proposal_dict = ( # any counter replaces original credential_proposal_message ) cred_ex_record.credential_offer = credential_offer cred_ex_record.credential_offer_dict = credential_offer_message async with self._profile.session() as session: await cred_ex_record.save(session, reason="create credential offer") return (cred_ex_record, credential_offer_message) async def receive_offer( self, message: CredentialOffer, connection_id: str ) -> V10CredentialExchange: """ Receive a credential offer. Returns: The credential exchange record, updated """ credential_preview = message.credential_preview indy_offer = message.indy_offer(0) schema_id = indy_offer["schema_id"] cred_def_id = indy_offer["cred_def_id"] credential_proposal_dict = CredentialProposal( comment=message.comment, credential_proposal=credential_preview, schema_id=schema_id, cred_def_id=cred_def_id, ) async with self._profile.transaction() as txn: # Get credential exchange record (holder sent proposal first) # or create it (issuer sent offer first) try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: # issuer sent this offer free of any proposal cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=message._thread_id, initiator=V10CredentialExchange.INITIATOR_EXTERNAL, role=V10CredentialExchange.ROLE_HOLDER, auto_remove=not self._profile.settings.get( "preserve_exchange_records" ), trace=(message._trace is not None), ) else: if cred_ex_record.state != V10CredentialExchange.STATE_PROPOSAL_SENT: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_PROPOSAL_SENT})" ) cred_ex_record.credential_proposal_dict = credential_proposal_dict cred_ex_record.credential_offer = indy_offer cred_ex_record.state = V10CredentialExchange.STATE_OFFER_RECEIVED cred_ex_record.schema_id = schema_id cred_ex_record.credential_definition_id = cred_def_id await cred_ex_record.save(txn, reason="receive credential offer") await txn.commit() return cred_ex_record async def create_request( self, cred_ex_record: V10CredentialExchange, holder_did: str ) -> Tuple[V10CredentialExchange, CredentialRequest]: """ Create a credential request. Args: cred_ex_record: Credential exchange record for which to create request holder_did: holder DID Returns: A tuple (credential exchange record, credential request message) """ if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_OFFER_RECEIVED})" ) credential_definition_id = cred_ex_record.credential_definition_id cred_offer_ser = cred_ex_record._credential_offer.ser cred_req_ser = None cred_req_meta = None async def _create(): ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( credential_definition_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: credential_definition = await ledger.get_credential_definition( credential_definition_id ) holder = self._profile.inject(IndyHolder) request_json, metadata_json = await holder.create_credential_request( cred_offer_ser, credential_definition, holder_did, ) return { "request": json.loads(request_json), "metadata": json.loads(metadata_json), } if cred_ex_record.credential_request: LOGGER.warning( "create_request called multiple times for v1.0 credential exchange: %s", cred_ex_record.credential_exchange_id, ) cred_req_ser = cred_ex_record._credential_request.ser cred_req_meta = cred_ex_record.credential_request_metadata else: nonce = cred_offer_ser["nonce"] cache_key = ( f"credential_request::{credential_definition_id}::{holder_did}::{nonce}" ) cred_req_result = None cache = self._profile.inject_or(BaseCache) if cache: async with cache.acquire(cache_key) as entry: if entry.result: cred_req_result = entry.result else: cred_req_result = await _create() await entry.set_result(cred_req_result, 3600) if not cred_req_result: cred_req_result = await _create() cred_req_ser = cred_req_result["request"] cred_req_meta = cred_req_result["metadata"] credential_request_message = CredentialRequest( requests_attach=[CredentialRequest.wrap_indy_cred_req(cred_req_ser)] ) credential_request_message._thread = {"thid": cred_ex_record.thread_id} credential_request_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_OFFER_RECEIVED})" ) cred_ex_record.credential_request = cred_req_ser cred_ex_record.credential_request_metadata = cred_req_meta cred_ex_record.state = V10CredentialExchange.STATE_REQUEST_SENT await cred_ex_record.save(txn, reason="create credential request") await txn.commit() return (cred_ex_record, credential_request_message) async def receive_request(self, message: CredentialRequest, connection_id: str): """ Receive a credential request. Args: credential_request_message: Credential request to receive Returns: credential exchange record, retrieved and updated """ assert len(message.requests_attach or []) == 1 credential_request = message.indy_cred_req(0) async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: try: cred_ex_record = await V10CredentialExchange.retrieve_by_tag_filter( txn, {"thread_id": message._thread_id}, {"connection_id": None}, for_update=True, ) cred_ex_record.connection_id = connection_id except StorageNotFoundError: raise CredentialManagerError( "Indy issue credential format can't start from credential request" ) from None if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_SENT: LOGGER.error( "Skipping credential request; exchange state is %s (id=%s)", cred_ex_record.state, cred_ex_record.credential_exchange_id, ) return None cred_ex_record.credential_request = credential_request cred_ex_record.state = V10CredentialExchange.STATE_REQUEST_RECEIVED await cred_ex_record.save(txn, reason="receive credential request") await txn.commit() return cred_ex_record async def issue_credential( self, cred_ex_record: V10CredentialExchange, *, comment: str = None, retries: int = 5, ) -> Tuple[V10CredentialExchange, CredentialIssue]: """ Issue a credential. Args: cred_ex_record: The credential exchange record for which to issue a credential comment: optional human-readable comment pertaining to credential issue Returns: Tuple: (Updated credential exchange record, credential message) """ if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_RECEIVED})" ) schema_id = cred_ex_record.schema_id rev_reg = None credential_ser = None if cred_ex_record.credential: LOGGER.warning( "issue_credential called multiple times for " + "credential exchange record %s - abstaining", cred_ex_record.credential_exchange_id, ) credential_ser = cred_ex_record._credential.ser else: cred_offer_ser = cred_ex_record._credential_offer.ser cred_req_ser = cred_ex_record._credential_request.ser ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( schema_id, txn_record_type=GET_SCHEMA, ) )[1] async with ledger: schema = await ledger.get_schema(schema_id) credential_definition = await ledger.get_credential_definition( cred_ex_record.credential_definition_id ) tails_path = None if credential_definition["value"].get("revocation"): revoc = IndyRevocation(self._profile) try: active_rev_reg_rec = await revoc.get_active_issuer_rev_reg_record( cred_ex_record.credential_definition_id ) rev_reg = await active_rev_reg_rec.get_registry() cred_ex_record.revoc_reg_id = active_rev_reg_rec.revoc_reg_id tails_path = rev_reg.tails_local_path await rev_reg.get_or_fetch_local_tails_path() except StorageNotFoundError: async with self._profile.session() as session: posted_rev_reg_recs = ( await IssuerRevRegRecord.query_by_cred_def_id( session, cred_ex_record.credential_definition_id, state=IssuerRevRegRecord.STATE_POSTED, ) ) if not posted_rev_reg_recs: # Send next 2 rev regs, publish tails files in background async with self._profile.session() as session: old_rev_reg_recs = sorted( await IssuerRevRegRecord.query_by_cred_def_id( session, cred_ex_record.credential_definition_id, ) ) # prefer to reuse prior rev reg size cred_def_id = cred_ex_record.credential_definition_id rev_reg_size = ( old_rev_reg_recs[0].max_cred_num if old_rev_reg_recs else None ) for _ in range(2): await notify_revocation_reg_event( self.profile, cred_def_id, rev_reg_size, auto_create_rev_reg=True, ) if retries > 0: LOGGER.info( "Waiting 2s on posted rev reg for cred def %s, retrying", cred_ex_record.credential_definition_id, ) await asyncio.sleep(2) return await self.issue_credential( cred_ex_record=cred_ex_record, comment=comment, retries=retries - 1, ) raise CredentialManagerError( f"Cred def id {cred_ex_record.credential_definition_id} " "has no active revocation registry" ) from None del revoc credential_values = ( cred_ex_record.credential_proposal_dict.credential_proposal.attr_dict( decode=False ) ) issuer = self._profile.inject(IndyIssuer) try: ( credential_json, cred_ex_record.revocation_id, ) = await issuer.create_credential( schema, cred_offer_ser, cred_req_ser, credential_values, cred_ex_record.credential_exchange_id, cred_ex_record.revoc_reg_id, tails_path, ) credential_ser = json.loads(credential_json) # If the rev reg is now full if rev_reg and rev_reg.max_creds == int(cred_ex_record.revocation_id): async with self._profile.session() as session: await active_rev_reg_rec.set_state( session, IssuerRevRegRecord.STATE_FULL, ) # Send next 1 rev reg, publish tails file in background cred_def_id = cred_ex_record.credential_definition_id rev_reg_size = active_rev_reg_rec.max_cred_num await notify_revocation_reg_event( self.profile, cred_def_id, rev_reg_size, auto_create_rev_reg=True, ) except IndyIssuerRevocationRegistryFullError: # unlucky: duelling instance issued last cred near same time as us async with self._profile.session() as session: await active_rev_reg_rec.set_state( session, IssuerRevRegRecord.STATE_FULL, ) if retries > 0: # use next rev reg; at worst, lucky instance is putting one up LOGGER.info( "Waiting 1s and retrying: revocation registry %s is full", active_rev_reg_rec.revoc_reg_id, ) await asyncio.sleep(1) return await self.issue_credential( cred_ex_record=cred_ex_record, comment=comment, retries=retries - 1, ) raise async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_RECEIVED})" ) cred_ex_record.state = V10CredentialExchange.STATE_ISSUED cred_ex_record.credential = credential_ser await cred_ex_record.save(txn, reason="issue credential") await txn.commit() credential_message = CredentialIssue( comment=comment, credentials_attach=[ CredentialIssue.wrap_indy_credential(cred_ex_record._credential.ser) ], ) credential_message._thread = {"thid": cred_ex_record.thread_id} credential_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) return (cred_ex_record, credential_message) async def receive_credential( self, message: CredentialIssue, connection_id: str ) -> V10CredentialExchange: """ Receive a credential from an issuer. Hold in storage potentially to be processed by controller before storing. Returns: Credential exchange record, retrieved and updated """ assert len(message.credentials_attach or []) == 1 raw_credential = message.indy_credential(0) async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: raise CredentialManagerError( "No credential exchange record found for received credential" ) from None if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_SENT: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_SENT})" ) cred_ex_record.raw_credential = raw_credential cred_ex_record.state = V10CredentialExchange.STATE_CREDENTIAL_RECEIVED await cred_ex_record.save(txn, reason="receive credential") await txn.commit() return cred_ex_record async def store_credential( self, cred_ex_record: V10CredentialExchange, credential_id: str = None ) -> V10CredentialExchange: """ Store a credential in holder wallet; send ack to issuer. Args: cred_ex_record: credential exchange record with credential to store and ack credential_id: optional credential identifier to override default on storage Returns: Updated credential exchange record """ if cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_CREDENTIAL_RECEIVED})" ) raw_cred_serde = cred_ex_record._raw_credential revoc_reg_def = None ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( raw_cred_serde.de.cred_def_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: credential_definition = await ledger.get_credential_definition( raw_cred_serde.de.cred_def_id ) if raw_cred_serde.de.rev_reg_id: revoc_reg_def = await ledger.get_revoc_reg_def( raw_cred_serde.de.rev_reg_id ) holder = self._profile.inject(IndyHolder) if ( cred_ex_record.credential_proposal_dict and cred_ex_record.credential_proposal_dict.credential_proposal ): mime_types = ( cred_ex_record.credential_proposal_dict.credential_proposal.mime_types() ) else: mime_types = None if revoc_reg_def: revoc_reg = RevocationRegistry.from_definition(revoc_reg_def, True) await revoc_reg.get_or_fetch_local_tails_path() try: credential_id = await holder.store_credential( credential_definition, raw_cred_serde.ser, cred_ex_record.credential_request_metadata, mime_types, credential_id=credential_id, rev_reg_def=revoc_reg_def, ) except IndyHolderError as e: LOGGER.error("Error storing credential: %s: %s", e.error_code, e.message) raise e credential_json = await holder.get_credential(credential_id) credential = json.loads(credential_json) async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_CREDENTIAL_RECEIVED})" ) cred_ex_record.credential_id = credential_id cred_ex_record.credential = credential cred_ex_record.revoc_reg_id = credential.get("rev_reg_id", None) cred_ex_record.revocation_id = credential.get("cred_rev_id", None) await cred_ex_record.save(txn, reason="store credential") await txn.commit() return cred_ex_record async def send_credential_ack( self, cred_ex_record: V10CredentialExchange, ) -> Tuple[V10CredentialExchange, CredentialAck]: """ Create, send, and return ack message for input credential exchange record. Delete credential exchange record if set to auto-remove. Returns: a tuple of the updated credential exchange record and the credential ack message for tracing """ credential_ack_message = CredentialAck() credential_ack_message.assign_thread_id( cred_ex_record.thread_id, cred_ex_record.parent_thread_id ) credential_ack_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) try: async with self._profile.transaction() as txn: try: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) except StorageNotFoundError: LOGGER.warning( "Skipping credential exchange ack, record not found: '%s'", cred_ex_record.credential_exchange_id, ) return (cred_ex_record, None) if ( cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED ): LOGGER.warning( "Skipping credential exchange ack, state is '%s' for record '%s'", cred_ex_record.state, cred_ex_record.credential_exchange_id, ) return (cred_ex_record, None) cred_ex_record.state = V10CredentialExchange.STATE_ACKED await cred_ex_record.save(txn, reason="ack credential") await txn.commit() if cred_ex_record.auto_remove: async with self._profile.session() as session: await cred_ex_record.delete_record(session) # all done: delete except StorageError: LOGGER.exception( "Error updating credential exchange" ) # holder still owes an ack: carry on responder = self._profile.inject_or(BaseResponder) if responder: await responder.send_reply( credential_ack_message, connection_id=cred_ex_record.connection_id, ) else: LOGGER.warning( "Configuration has no BaseResponder: cannot ack credential on %s", cred_ex_record.thread_id, ) return (cred_ex_record, credential_ack_message) async def receive_credential_ack( self, message: CredentialAck, connection_id: str ) -> V10CredentialExchange: """ Receive credential ack from holder. Returns: credential exchange record, retrieved and updated """ async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: LOGGER.warning( "Skip ack message on credential exchange, record not found %s", message._thread_id, ) return None if cred_ex_record.state == V10CredentialExchange.STATE_ACKED: return None cred_ex_record.state = V10CredentialExchange.STATE_ACKED await cred_ex_record.save(txn, reason="credential acked") await txn.commit() if cred_ex_record.auto_remove: async with self._profile.session() as session: await cred_ex_record.delete_record(session) # all done: delete return cred_ex_record async def receive_problem_report( self, message: CredentialProblemReport, connection_id: str ): """ Receive problem report. Returns: credential exchange record, retrieved and updated """ async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: LOGGER.warning( "Skip problem report on credential exchange, record not found %s", message._thread_id, ) return None cred_ex_record.state = V10CredentialExchange.STATE_ABANDONED code = message.description.get( "code", ProblemReportReason.ISSUANCE_ABANDONED.value, ) cred_ex_record.error_msg = f"{code}: {message.description.get("en", code)}" await cred_ex_record.save(txn, reason="received problem report") await txn.commit() return cred_ex_record
"""Classes to manage credentials.""" import asyncio import json import logging from typing import Mapping, Tuple from ....cache.base import BaseCache from ....core.error import BaseError from ....core.profile import Profile from ....indy.holder import IndyHolder, IndyHolderError from ....indy.issuer import IndyIssuer, IndyIssuerRevocationRegistryFullError from ....ledger.multiple_ledger.ledger_requests_executor import ( GET_CRED_DEF, GET_SCHEMA, IndyLedgerRequestsExecutor, ) from ....messaging.credential_definitions.util import ( CRED_DEF_TAGS, CRED_DEF_SENT_RECORD_TYPE, ) from ....messaging.responder import BaseResponder from ....revocation.indy import IndyRevocation from ....revocation.models.revocation_registry import RevocationRegistry from ....revocation.models.issuer_rev_reg_record import IssuerRevRegRecord from ....revocation.util import notify_revocation_reg_event from ....storage.base import BaseStorage from ....storage.error import StorageError, StorageNotFoundError from .messages.credential_ack import CredentialAck from .messages.credential_issue import CredentialIssue from .messages.credential_offer import CredentialOffer from .messages.credential_problem_report import ( CredentialProblemReport, ProblemReportReason, ) from .messages.credential_proposal import CredentialProposal from .messages.credential_request import CredentialRequest from .messages.inner.credential_preview import CredentialPreview from .models.credential_exchange import ( V10CredentialExchange, ) LOGGER = logging.getLogger(__name__) class CredentialManagerError(BaseError): """Credential error.""" class CredentialManager: """Class for managing credentials.""" def __init__(self, profile: Profile): """ Initialize a CredentialManager. Args: profile: The profile instance for this credential manager """ self._profile = profile @property def profile(self) -> Profile: """ Accessor for the current profile instance. Returns: The profile instance for this credential manager """ return self._profile async def _match_sent_cred_def_id(self, tag_query: Mapping[str, str]) -> str: """Return most recent matching id of cred def that agent sent to ledger.""" async with self._profile.session() as session: storage = session.inject(BaseStorage) found = await storage.find_all_records( type_filter=CRED_DEF_SENT_RECORD_TYPE, tag_query=tag_query ) if not found: raise CredentialManagerError( f"Issuer has no operable cred def for proposal spec {tag_query}" ) return max(found, key=lambda r: int(r.tags["epoch"])).tags["cred_def_id"] async def prepare_send( self, connection_id: str, credential_proposal: CredentialProposal, auto_remove: bool = None, comment: str = None, ) -> Tuple[V10CredentialExchange, CredentialOffer]: """ Set up a new credential exchange for an automated send. Args: connection_id: Connection to create offer for credential_proposal: The credential proposal with preview auto_remove: Flag to automatically remove the record on completion Returns: A tuple of the new credential exchange record and credential offer message """ if auto_remove is None: auto_remove = not self._profile.settings.get("preserve_exchange_records") credential_exchange = V10CredentialExchange( connection_id=connection_id, initiator=V10CredentialExchange.INITIATOR_SELF, role=V10CredentialExchange.ROLE_ISSUER, credential_proposal_dict=credential_proposal, auto_issue=True, auto_remove=auto_remove, trace=(credential_proposal._trace is not None), ) (credential_exchange, credential_offer) = await self.create_offer( cred_ex_record=credential_exchange, counter_proposal=None, comment=comment, ) return (credential_exchange, credential_offer) async def create_proposal( self, connection_id: str, *, auto_offer: bool = None, auto_remove: bool = None, comment: str = None, credential_preview: CredentialPreview = None, schema_id: str = None, schema_issuer_did: str = None, schema_name: str = None, schema_version: str = None, cred_def_id: str = None, issuer_did: str = None, trace: bool = False, ) -> V10CredentialExchange: """ Create a credential proposal. Args: connection_id: Connection to create proposal for auto_offer: Should this proposal request automatically be handled to offer a credential auto_remove: Should the record be automatically removed on completion comment: Optional human-readable comment to include in proposal credential_preview: The credential preview to use to create the credential proposal schema_id: Schema id for credential proposal schema_issuer_did: Schema issuer DID for credential proposal schema_name: Schema name for credential proposal schema_version: Schema version for credential proposal cred_def_id: Credential definition id for credential proposal issuer_did: Issuer DID for credential proposal Returns: Resulting credential exchange record including credential proposal """ credential_proposal_message = CredentialProposal( comment=comment, credential_proposal=credential_preview, schema_id=schema_id, schema_issuer_did=schema_issuer_did, schema_name=schema_name, schema_version=schema_version, cred_def_id=cred_def_id, issuer_did=issuer_did, ) credential_proposal_message.assign_trace_decorator( self._profile.settings, trace ) if auto_remove is None: auto_remove = not self._profile.settings.get("preserve_exchange_records") cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=credential_proposal_message._thread_id, initiator=V10CredentialExchange.INITIATOR_SELF, role=V10CredentialExchange.ROLE_HOLDER, state=V10CredentialExchange.STATE_PROPOSAL_SENT, credential_proposal_dict=credential_proposal_message, auto_offer=auto_offer, auto_remove=auto_remove, trace=trace, ) async with self._profile.session() as session: await cred_ex_record.save(session, reason="create credential proposal") return cred_ex_record async def receive_proposal( self, message: CredentialProposal, connection_id: str ) -> V10CredentialExchange: """ Receive a credential proposal. Returns: The resulting credential exchange record, created """ # at this point, cred def and schema still open to potential negotiation cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=message._thread_id, initiator=V10CredentialExchange.INITIATOR_EXTERNAL, role=V10CredentialExchange.ROLE_ISSUER, state=V10CredentialExchange.STATE_PROPOSAL_RECEIVED, credential_proposal_dict=message, auto_offer=self._profile.settings.get( "debug.auto_respond_credential_proposal" ), auto_issue=self._profile.settings.get( "debug.auto_respond_credential_request" ), auto_remove=not self._profile.settings.get("preserve_exchange_records"), trace=(message._trace is not None), ) async with self._profile.session() as session: await cred_ex_record.save(session, reason="receive credential proposal") return cred_ex_record async def create_offer( self, cred_ex_record: V10CredentialExchange, counter_proposal: CredentialProposal = None, comment: str = None, ) -> Tuple[V10CredentialExchange, CredentialOffer]: """ Create a credential offer, update credential exchange record. Args: cred_ex_record: Credential exchange to create offer for comment: optional human-readable comment to set in offer message Returns: A tuple (credential exchange record, credential offer message) """ async def _create(cred_def_id): issuer = self._profile.inject(IndyIssuer) offer_json = await issuer.create_credential_offer(cred_def_id) return json.loads(offer_json) credential_proposal_message = ( counter_proposal if counter_proposal else cred_ex_record.credential_proposal_dict ) credential_proposal_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) cred_def_id = await self._match_sent_cred_def_id( { t: getattr(credential_proposal_message, t) for t in CRED_DEF_TAGS if getattr(credential_proposal_message, t) } ) credential_preview = credential_proposal_message.credential_proposal # vet attributes ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( cred_def_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: schema_id = await ledger.credential_definition_id2schema_id(cred_def_id) schema = await ledger.get_schema(schema_id) schema_attrs = {attr for attr in schema["attrNames"]} preview_attrs = {attr for attr in credential_preview.attr_dict()} if preview_attrs != schema_attrs: raise CredentialManagerError( f"Preview attributes {preview_attrs} " f"mismatch corresponding schema attributes {schema_attrs}" ) credential_offer = None cache_key = f"credential_offer::{cred_def_id}" cache = self._profile.inject_or(BaseCache) if cache: async with cache.acquire(cache_key) as entry: if entry.result: credential_offer = entry.result else: credential_offer = await _create(cred_def_id) await entry.set_result(credential_offer, 3600) if not credential_offer: credential_offer = await _create(cred_def_id) credential_offer_message = CredentialOffer( comment=comment, credential_preview=credential_preview, offers_attach=[CredentialOffer.wrap_indy_offer(credential_offer)], ) credential_offer_message._thread = {"thid": cred_ex_record.thread_id} credential_offer_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) cred_ex_record.thread_id = credential_offer_message._thread_id cred_ex_record.schema_id = credential_offer["schema_id"] cred_ex_record.credential_definition_id = credential_offer["cred_def_id"] cred_ex_record.state = V10CredentialExchange.STATE_OFFER_SENT cred_ex_record.credential_proposal_dict = ( # any counter replaces original credential_proposal_message ) cred_ex_record.credential_offer = credential_offer cred_ex_record.credential_offer_dict = credential_offer_message async with self._profile.session() as session: await cred_ex_record.save(session, reason="create credential offer") return (cred_ex_record, credential_offer_message) async def receive_offer( self, message: CredentialOffer, connection_id: str ) -> V10CredentialExchange: """ Receive a credential offer. Returns: The credential exchange record, updated """ credential_preview = message.credential_preview indy_offer = message.indy_offer(0) schema_id = indy_offer["schema_id"] cred_def_id = indy_offer["cred_def_id"] credential_proposal_dict = CredentialProposal( comment=message.comment, credential_proposal=credential_preview, schema_id=schema_id, cred_def_id=cred_def_id, ) async with self._profile.transaction() as txn: # Get credential exchange record (holder sent proposal first) # or create it (issuer sent offer first) try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: # issuer sent this offer free of any proposal cred_ex_record = V10CredentialExchange( connection_id=connection_id, thread_id=message._thread_id, initiator=V10CredentialExchange.INITIATOR_EXTERNAL, role=V10CredentialExchange.ROLE_HOLDER, auto_remove=not self._profile.settings.get( "preserve_exchange_records" ), trace=(message._trace is not None), ) else: if cred_ex_record.state != V10CredentialExchange.STATE_PROPOSAL_SENT: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_PROPOSAL_SENT})" ) cred_ex_record.credential_proposal_dict = credential_proposal_dict cred_ex_record.credential_offer = indy_offer cred_ex_record.state = V10CredentialExchange.STATE_OFFER_RECEIVED cred_ex_record.schema_id = schema_id cred_ex_record.credential_definition_id = cred_def_id await cred_ex_record.save(txn, reason="receive credential offer") await txn.commit() return cred_ex_record async def create_request( self, cred_ex_record: V10CredentialExchange, holder_did: str ) -> Tuple[V10CredentialExchange, CredentialRequest]: """ Create a credential request. Args: cred_ex_record: Credential exchange record for which to create request holder_did: holder DID Returns: A tuple (credential exchange record, credential request message) """ if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_OFFER_RECEIVED})" ) credential_definition_id = cred_ex_record.credential_definition_id cred_offer_ser = cred_ex_record._credential_offer.ser cred_req_ser = None cred_req_meta = None async def _create(): ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( credential_definition_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: credential_definition = await ledger.get_credential_definition( credential_definition_id ) holder = self._profile.inject(IndyHolder) request_json, metadata_json = await holder.create_credential_request( cred_offer_ser, credential_definition, holder_did, ) return { "request": json.loads(request_json), "metadata": json.loads(metadata_json), } if cred_ex_record.credential_request: LOGGER.warning( "create_request called multiple times for v1.0 credential exchange: %s", cred_ex_record.credential_exchange_id, ) cred_req_ser = cred_ex_record._credential_request.ser cred_req_meta = cred_ex_record.credential_request_metadata else: nonce = cred_offer_ser["nonce"] cache_key = ( f"credential_request::{credential_definition_id}::{holder_did}::{nonce}" ) cred_req_result = None cache = self._profile.inject_or(BaseCache) if cache: async with cache.acquire(cache_key) as entry: if entry.result: cred_req_result = entry.result else: cred_req_result = await _create() await entry.set_result(cred_req_result, 3600) if not cred_req_result: cred_req_result = await _create() cred_req_ser = cred_req_result["request"] cred_req_meta = cred_req_result["metadata"] credential_request_message = CredentialRequest( requests_attach=[CredentialRequest.wrap_indy_cred_req(cred_req_ser)] ) credential_request_message._thread = {"thid": cred_ex_record.thread_id} credential_request_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_OFFER_RECEIVED})" ) cred_ex_record.credential_request = cred_req_ser cred_ex_record.credential_request_metadata = cred_req_meta cred_ex_record.state = V10CredentialExchange.STATE_REQUEST_SENT await cred_ex_record.save(txn, reason="create credential request") await txn.commit() return (cred_ex_record, credential_request_message) async def receive_request(self, message: CredentialRequest, connection_id: str): """ Receive a credential request. Args: credential_request_message: Credential request to receive Returns: credential exchange record, retrieved and updated """ assert len(message.requests_attach or []) == 1 credential_request = message.indy_cred_req(0) async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: try: cred_ex_record = await V10CredentialExchange.retrieve_by_tag_filter( txn, {"thread_id": message._thread_id}, {"connection_id": None}, for_update=True, ) cred_ex_record.connection_id = connection_id except StorageNotFoundError: raise CredentialManagerError( "Indy issue credential format can't start from credential request" ) from None if cred_ex_record.state != V10CredentialExchange.STATE_OFFER_SENT: LOGGER.error( "Skipping credential request; exchange state is %s (id=%s)", cred_ex_record.state, cred_ex_record.credential_exchange_id, ) return None cred_ex_record.credential_request = credential_request cred_ex_record.state = V10CredentialExchange.STATE_REQUEST_RECEIVED await cred_ex_record.save(txn, reason="receive credential request") await txn.commit() return cred_ex_record async def issue_credential( self, cred_ex_record: V10CredentialExchange, *, comment: str = None, retries: int = 5, ) -> Tuple[V10CredentialExchange, CredentialIssue]: """ Issue a credential. Args: cred_ex_record: The credential exchange record for which to issue a credential comment: optional human-readable comment pertaining to credential issue Returns: Tuple: (Updated credential exchange record, credential message) """ if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_RECEIVED})" ) schema_id = cred_ex_record.schema_id rev_reg = None credential_ser = None if cred_ex_record.credential: LOGGER.warning( "issue_credential called multiple times for " + "credential exchange record %s - abstaining", cred_ex_record.credential_exchange_id, ) credential_ser = cred_ex_record._credential.ser else: cred_offer_ser = cred_ex_record._credential_offer.ser cred_req_ser = cred_ex_record._credential_request.ser ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( schema_id, txn_record_type=GET_SCHEMA, ) )[1] async with ledger: schema = await ledger.get_schema(schema_id) credential_definition = await ledger.get_credential_definition( cred_ex_record.credential_definition_id ) tails_path = None if credential_definition["value"].get("revocation"): revoc = IndyRevocation(self._profile) try: active_rev_reg_rec = await revoc.get_active_issuer_rev_reg_record( cred_ex_record.credential_definition_id ) rev_reg = await active_rev_reg_rec.get_registry() cred_ex_record.revoc_reg_id = active_rev_reg_rec.revoc_reg_id tails_path = rev_reg.tails_local_path await rev_reg.get_or_fetch_local_tails_path() except StorageNotFoundError: async with self._profile.session() as session: posted_rev_reg_recs = ( await IssuerRevRegRecord.query_by_cred_def_id( session, cred_ex_record.credential_definition_id, state=IssuerRevRegRecord.STATE_POSTED, ) ) if not posted_rev_reg_recs: # Send next 2 rev regs, publish tails files in background async with self._profile.session() as session: old_rev_reg_recs = sorted( await IssuerRevRegRecord.query_by_cred_def_id( session, cred_ex_record.credential_definition_id, ) ) # prefer to reuse prior rev reg size cred_def_id = cred_ex_record.credential_definition_id rev_reg_size = ( old_rev_reg_recs[0].max_cred_num if old_rev_reg_recs else None ) for _ in range(2): await notify_revocation_reg_event( self.profile, cred_def_id, rev_reg_size, auto_create_rev_reg=True, ) if retries > 0: LOGGER.info( "Waiting 2s on posted rev reg for cred def %s, retrying", cred_ex_record.credential_definition_id, ) await asyncio.sleep(2) return await self.issue_credential( cred_ex_record=cred_ex_record, comment=comment, retries=retries - 1, ) raise CredentialManagerError( f"Cred def id {cred_ex_record.credential_definition_id} " "has no active revocation registry" ) from None del revoc credential_values = ( cred_ex_record.credential_proposal_dict.credential_proposal.attr_dict( decode=False ) ) issuer = self._profile.inject(IndyIssuer) try: ( credential_json, cred_ex_record.revocation_id, ) = await issuer.create_credential( schema, cred_offer_ser, cred_req_ser, credential_values, cred_ex_record.credential_exchange_id, cred_ex_record.revoc_reg_id, tails_path, ) credential_ser = json.loads(credential_json) # If the rev reg is now full if rev_reg and rev_reg.max_creds == int(cred_ex_record.revocation_id): async with self._profile.session() as session: await active_rev_reg_rec.set_state( session, IssuerRevRegRecord.STATE_FULL, ) # Send next 1 rev reg, publish tails file in background cred_def_id = cred_ex_record.credential_definition_id rev_reg_size = active_rev_reg_rec.max_cred_num await notify_revocation_reg_event( self.profile, cred_def_id, rev_reg_size, auto_create_rev_reg=True, ) except IndyIssuerRevocationRegistryFullError: # unlucky: duelling instance issued last cred near same time as us async with self._profile.session() as session: await active_rev_reg_rec.set_state( session, IssuerRevRegRecord.STATE_FULL, ) if retries > 0: # use next rev reg; at worst, lucky instance is putting one up LOGGER.info( "Waiting 1s and retrying: revocation registry %s is full", active_rev_reg_rec.revoc_reg_id, ) await asyncio.sleep(1) return await self.issue_credential( cred_ex_record=cred_ex_record, comment=comment, retries=retries - 1, ) raise async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_RECEIVED})" ) cred_ex_record.state = V10CredentialExchange.STATE_ISSUED cred_ex_record.credential = credential_ser await cred_ex_record.save(txn, reason="issue credential") await txn.commit() credential_message = CredentialIssue( comment=comment, credentials_attach=[ CredentialIssue.wrap_indy_credential(cred_ex_record._credential.ser) ], ) credential_message._thread = {"thid": cred_ex_record.thread_id} credential_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) return (cred_ex_record, credential_message) async def receive_credential( self, message: CredentialIssue, connection_id: str ) -> V10CredentialExchange: """ Receive a credential from an issuer. Hold in storage potentially to be processed by controller before storing. Returns: Credential exchange record, retrieved and updated """ assert len(message.credentials_attach or []) == 1 raw_credential = message.indy_credential(0) async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: raise CredentialManagerError( "No credential exchange record found for received credential" ) from None if cred_ex_record.state != V10CredentialExchange.STATE_REQUEST_SENT: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_REQUEST_SENT})" ) cred_ex_record.raw_credential = raw_credential cred_ex_record.state = V10CredentialExchange.STATE_CREDENTIAL_RECEIVED await cred_ex_record.save(txn, reason="receive credential") await txn.commit() return cred_ex_record async def store_credential( self, cred_ex_record: V10CredentialExchange, credential_id: str = None ) -> V10CredentialExchange: """ Store a credential in holder wallet; send ack to issuer. Args: cred_ex_record: credential exchange record with credential to store and ack credential_id: optional credential identifier to override default on storage Returns: Updated credential exchange record """ if cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_CREDENTIAL_RECEIVED})" ) raw_cred_serde = cred_ex_record._raw_credential revoc_reg_def = None ledger_exec_inst = self._profile.inject(IndyLedgerRequestsExecutor) ledger = ( await ledger_exec_inst.get_ledger_for_identifier( raw_cred_serde.de.cred_def_id, txn_record_type=GET_CRED_DEF, ) )[1] async with ledger: credential_definition = await ledger.get_credential_definition( raw_cred_serde.de.cred_def_id ) if raw_cred_serde.de.rev_reg_id: revoc_reg_def = await ledger.get_revoc_reg_def( raw_cred_serde.de.rev_reg_id ) holder = self._profile.inject(IndyHolder) if ( cred_ex_record.credential_proposal_dict and cred_ex_record.credential_proposal_dict.credential_proposal ): mime_types = ( cred_ex_record.credential_proposal_dict.credential_proposal.mime_types() ) else: mime_types = None if revoc_reg_def: revoc_reg = RevocationRegistry.from_definition(revoc_reg_def, True) await revoc_reg.get_or_fetch_local_tails_path() try: credential_id = await holder.store_credential( credential_definition, raw_cred_serde.ser, cred_ex_record.credential_request_metadata, mime_types, credential_id=credential_id, rev_reg_def=revoc_reg_def, ) except IndyHolderError as e: LOGGER.error("Error storing credential: %s: %s", e.error_code, e.message) raise e credential_json = await holder.get_credential(credential_id) credential = json.loads(credential_json) async with self._profile.transaction() as txn: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) if cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED: raise CredentialManagerError( f"Credential exchange {cred_ex_record.credential_exchange_id} " f"in {cred_ex_record.state} state " f"(must be {V10CredentialExchange.STATE_CREDENTIAL_RECEIVED})" ) cred_ex_record.credential_id = credential_id cred_ex_record.credential = credential cred_ex_record.revoc_reg_id = credential.get("rev_reg_id", None) cred_ex_record.revocation_id = credential.get("cred_rev_id", None) await cred_ex_record.save(txn, reason="store credential") await txn.commit() return cred_ex_record async def send_credential_ack( self, cred_ex_record: V10CredentialExchange, ) -> Tuple[V10CredentialExchange, CredentialAck]: """ Create, send, and return ack message for input credential exchange record. Delete credential exchange record if set to auto-remove. Returns: a tuple of the updated credential exchange record and the credential ack message for tracing """ credential_ack_message = CredentialAck() credential_ack_message.assign_thread_id( cred_ex_record.thread_id, cred_ex_record.parent_thread_id ) credential_ack_message.assign_trace_decorator( self._profile.settings, cred_ex_record.trace ) try: async with self._profile.transaction() as txn: try: cred_ex_record = await V10CredentialExchange.retrieve_by_id( txn, cred_ex_record.credential_exchange_id, for_update=True ) except StorageNotFoundError: LOGGER.warning( "Skipping credential exchange ack, record not found: '%s'", cred_ex_record.credential_exchange_id, ) return (cred_ex_record, None) if ( cred_ex_record.state != V10CredentialExchange.STATE_CREDENTIAL_RECEIVED ): LOGGER.warning( "Skipping credential exchange ack, state is '%s' for record '%s'", cred_ex_record.state, cred_ex_record.credential_exchange_id, ) return (cred_ex_record, None) cred_ex_record.state = V10CredentialExchange.STATE_ACKED await cred_ex_record.save(txn, reason="ack credential") await txn.commit() if cred_ex_record.auto_remove: async with self._profile.session() as session: await cred_ex_record.delete_record(session) # all done: delete except StorageError: LOGGER.exception( "Error updating credential exchange" ) # holder still owes an ack: carry on responder = self._profile.inject_or(BaseResponder) if responder: await responder.send_reply( credential_ack_message, connection_id=cred_ex_record.connection_id, ) else: LOGGER.warning( "Configuration has no BaseResponder: cannot ack credential on %s", cred_ex_record.thread_id, ) return (cred_ex_record, credential_ack_message) async def receive_credential_ack( self, message: CredentialAck, connection_id: str ) -> V10CredentialExchange: """ Receive credential ack from holder. Returns: credential exchange record, retrieved and updated """ async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: LOGGER.warning( "Skip ack message on credential exchange, record not found %s", message._thread_id, ) return None if cred_ex_record.state == V10CredentialExchange.STATE_ACKED: return None cred_ex_record.state = V10CredentialExchange.STATE_ACKED await cred_ex_record.save(txn, reason="credential acked") await txn.commit() if cred_ex_record.auto_remove: async with self._profile.session() as session: await cred_ex_record.delete_record(session) # all done: delete return cred_ex_record async def receive_problem_report( self, message: CredentialProblemReport, connection_id: str ): """ Receive problem report. Returns: credential exchange record, retrieved and updated """ async with self._profile.transaction() as txn: try: cred_ex_record = await ( V10CredentialExchange.retrieve_by_connection_and_thread( txn, connection_id, message._thread_id, for_update=True ) ) except StorageNotFoundError: LOGGER.warning( "Skip problem report on credential exchange, record not found %s", message._thread_id, ) return None cred_ex_record.state = V10CredentialExchange.STATE_ABANDONED code = message.description.get( "code", ProblemReportReason.ISSUANCE_ABANDONED.value, ) cred_ex_record.error_msg = f"{code}: {message.description.get('en', code)}" await cred_ex_record.save(txn, reason="received problem report") await txn.commit() return cred_ex_record
""" Manager and Serializer for libraries. """ import logging from typing import ( Any, Dict, List, Optional, ) from sqlalchemy import and_, false, not_, or_, true from sqlalchemy.orm.exc import MultipleResultsFound from sqlalchemy.orm.exc import NoResultFound from galaxy import ( exceptions, util, ) from galaxy.managers import ( folders, roles, ) from galaxy.managers.context import ProvidesAppContext from galaxy.schema.fields import EncodedDatabaseIdField from galaxy.util import ( pretty_print_time_interval, unicodify, ) log = logging.getLogger(__name__) # ============================================================================= class LibraryManager: """ Interface/service object for interacting with libraries. """ def get(self, trans, decoded_library_id, check_accessible=True): """ Get the library from the DB. :param decoded_library_id: decoded library id :type decoded_library_id: int :param check_accessible: flag whether to check that user can access item :type check_accessible: bool :returns: the requested library :rtype: galaxy.model.Library """ try: library = trans.sa_session.query(trans.app.model.Library).filter(trans.app.model.Library.table.c.id == decoded_library_id).one() except MultipleResultsFound: raise exceptions.InconsistentDatabase('Multiple libraries found with the same id.') except NoResultFound: raise exceptions.RequestParameterInvalidException('No library found with the id provided.') except Exception as e: raise exceptions.InternalServerError('Error loading from the database.' + unicodify(e)) library = self.secure(trans, library, check_accessible) return library def create(self, trans, name, description='', synopsis=''): """ Create a new library. """ if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can create libraries.') else: library = trans.app.model.Library(name=name, description=description, synopsis=synopsis) root_folder = trans.app.model.LibraryFolder(name=name, description='') library.root_folder = root_folder trans.sa_session.add_all((library, root_folder)) trans.sa_session.flush() return library def update(self, trans, library, name=None, description=None, synopsis=None): """ Update the given library """ changed = False if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can update libraries.') if library.deleted: raise exceptions.RequestParameterInvalidException('You cannot modify a deleted library. Undelete it first.') if name is not None: library.name = name changed = True # When library is renamed the root folder has to be renamed too. folder_manager = folders.FolderManager() folder_manager.update(trans, library.root_folder, name=name) if description is not None: library.description = description changed = True if synopsis is not None: library.synopsis = synopsis changed = True if changed: trans.sa_session.add(library) trans.sa_session.flush() return library def delete(self, trans, library, undelete=False): """ Mark given library deleted/undeleted based on the flag. """ if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can delete and undelete libraries.') if undelete: library.deleted = False else: library.deleted = True trans.sa_session.add(library) trans.sa_session.flush() return library def list(self, trans, deleted: Optional[bool] = False): """ Return a list of libraries from the DB. :param deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted`` :type deleted: boolean (optional) :returns: query that will emit all accessible libraries :rtype: sqlalchemy query :returns: dict of 3 sets with available actions for user's accessible libraries and a set of ids of all public libraries. These are used for limiting the number of queries when dictifying the libraries later on. :rtype: dict """ is_admin = trans.user_is_admin query = trans.sa_session.query(trans.app.model.Library) library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action restricted_library_ids = {lp.library_id for lp in ( trans.sa_session.query(trans.model.LibraryPermissions).filter( trans.model.LibraryPermissions.table.c.action == library_access_action ).distinct())} prefetched_ids = {'restricted_library_ids': restricted_library_ids} if is_admin: if deleted is None: # Flag is not specified, do not filter on it. pass elif deleted: query = query.filter(trans.app.model.Library.table.c.deleted == true()) else: query = query.filter(trans.app.model.Library.table.c.deleted == false()) else: # Nonadmins can't see deleted libraries query = query.filter(trans.app.model.Library.table.c.deleted == false()) current_user_role_ids = [role.id for role in trans.get_current_user_roles()] all_actions = trans.sa_session.query(trans.model.LibraryPermissions).filter(trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids)) library_add_action = trans.app.security_agent.permitted_actions.LIBRARY_ADD.action library_modify_action = trans.app.security_agent.permitted_actions.LIBRARY_MODIFY.action library_manage_action = trans.app.security_agent.permitted_actions.LIBRARY_MANAGE.action accessible_restricted_library_ids = set() allowed_library_add_ids = set() allowed_library_modify_ids = set() allowed_library_manage_ids = set() for action in all_actions: if action.action == library_access_action: accessible_restricted_library_ids.add(action.library_id) if action.action == library_add_action: allowed_library_add_ids.add(action.library_id) if action.action == library_modify_action: allowed_library_modify_ids.add(action.library_id) if action.action == library_manage_action: allowed_library_manage_ids.add(action.library_id) query = query.filter(or_( not_(trans.model.Library.table.c.id.in_(restricted_library_ids)), trans.model.Library.table.c.id.in_(accessible_restricted_library_ids) )) prefetched_ids['allowed_library_add_ids'] = allowed_library_add_ids prefetched_ids['allowed_library_modify_ids'] = allowed_library_modify_ids prefetched_ids['allowed_library_manage_ids'] = allowed_library_manage_ids return query, prefetched_ids def secure(self, trans, library, check_accessible=True): """ Check if library is accessible to user. :param library: library :type library: galaxy.model.Library :param check_accessible: flag whether to check that user can access library :type check_accessible: bool :returns: the original library :rtype: galaxy.model.Library """ # all libraries are accessible to an admin if trans.user_is_admin: return library if check_accessible: library = self.check_accessible(trans, library) return library def check_accessible(self, trans, library): """ Check whether the library is accessible to current user. """ if not trans.app.security_agent.can_access_library(trans.get_current_user_roles(), library): raise exceptions.ObjectNotFound('Library with the id provided was not found.') elif library.deleted: raise exceptions.ObjectNotFound('Library with the id provided is deleted.') else: return library def get_library_dict(self, trans, library, prefetched_ids=None): """ Return library data in the form of a dictionary. :param library: library :type library: galaxy.model.Library :param prefetched_ids: dict of 3 sets with available actions for user's accessible libraries and a set of ids of all public libraries. These are used for limiting the number of queries when dictifying a set of libraries. :type prefetched_ids: dict :returns: dict with data about the library :rtype: dictionary """ restricted_library_ids = prefetched_ids.get('restricted_library_ids', None) if prefetched_ids else None allowed_library_add_ids = prefetched_ids.get('allowed_library_add_ids', None) if prefetched_ids else None allowed_library_modify_ids = prefetched_ids.get('allowed_library_modify_ids', None) if prefetched_ids else None allowed_library_manage_ids = prefetched_ids.get('allowed_library_manage_ids', None) if prefetched_ids else None library_dict = library.to_dict(view='element', value_mapper={'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id}) library_dict['public'] = False if (restricted_library_ids and library.id in restricted_library_ids) else True library_dict['create_time_pretty'] = pretty_print_time_interval(library.create_time, precise=True) if not trans.user_is_admin: if prefetched_ids: library_dict['can_user_add'] = True if (allowed_library_add_ids and library.id in allowed_library_add_ids) else False library_dict['can_user_modify'] = True if (allowed_library_modify_ids and library.id in allowed_library_modify_ids) else False library_dict['can_user_manage'] = True if (allowed_library_manage_ids and library.id in allowed_library_manage_ids) else False else: current_user_roles = trans.get_current_user_roles() library_dict['can_user_add'] = trans.app.security_agent.can_add_library_item(current_user_roles, library) library_dict['can_user_modify'] = trans.app.security_agent.can_modify_library_item(current_user_roles, library) library_dict['can_user_manage'] = trans.app.security_agent.can_manage_library_item(current_user_roles, library) else: library_dict['can_user_add'] = True library_dict['can_user_modify'] = True library_dict['can_user_manage'] = True return library_dict def get_current_roles(self, trans, library): """ Load all permissions currently related to the given library. :param library: the model object :type library: galaxy.model.Library :rtype: dictionary :returns: dict of current roles for all available permission types """ access_library_role_list = [(access_role.name, trans.security.encode_id(access_role.id)) for access_role in self.get_access_roles(trans, library)] modify_library_role_list = [(modify_role.name, trans.security.encode_id(modify_role.id)) for modify_role in self.get_modify_roles(trans, library)] manage_library_role_list = [(manage_role.name, trans.security.encode_id(manage_role.id)) for manage_role in self.get_manage_roles(trans, library)] add_library_item_role_list = [(add_role.name, trans.security.encode_id(add_role.id)) for add_role in self.get_add_roles(trans, library)] return dict(access_library_role_list=access_library_role_list, modify_library_role_list=modify_library_role_list, manage_library_role_list=manage_library_role_list, add_library_item_role_list=add_library_item_role_list) def get_access_roles(self, trans, library): """ Load access roles for all library permissions """ return set(library.get_access_roles(trans.app.security_agent)) def get_modify_roles(self, trans, library): """ Load modify roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_MODIFY)) def get_manage_roles(self, trans, library): """ Load manage roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_MANAGE)) def get_add_roles(self, trans, library): """ Load add roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_ADD)) def set_permission_roles(self, trans, library, access_roles, modify_roles, manage_roles, add_roles): """ Set permissions on the given library. """ def make_public(self, trans, library): """ Makes the given library public (removes all access roles) """ trans.app.security_agent.make_library_public(library) return self.is_public(trans, library) def is_public(self, trans, library): """ Return true if lib is public. """ return trans.app.security_agent.library_is_public(library) def get_containing_library_from_library_dataset(trans, library_dataset): """Given a library_dataset, get the containing library""" folder = library_dataset.folder while folder.parent: folder = folder.parent # We have folder set to the library's root folder, which has the same name as the library for library in trans.sa_session.query(trans.model.Library).filter( and_(trans.model.Library.table.c.deleted == false(), trans.model.Library.table.c.name == folder.name)): # Just to double-check if library.root_folder == folder: return library return None class LibrariesManager: """ Interface/service object for sharing logic between controllers. """ def __init__( self, folder_manager: folders.FolderManager, library_manager: LibraryManager, role_manager: roles.RoleManager, ): self.folder_manager = folder_manager self.library_manager = library_manager self.role_manager = role_manager def index(self, trans: ProvidesAppContext, deleted: Optional[bool] = False) -> List[Any]: """Returns a list of summary data for all libraries. :param deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted`` :type deleted: boolean (optional) :returns: list of dictionaries containing library information :rtype: list .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys` """ query, prefetched_ids = self.library_manager.list(trans, deleted) libraries = [] for library in query: libraries.append(self.library_manager.get_library_dict(trans, library, prefetched_ids)) return libraries def show(self, trans, id: EncodedDatabaseIdField): """ Returns detailed information about a library. :param id: the encoded id of the library :type id: an encoded id string :param deleted: if True, allow information on a ``deleted`` library :type deleted: boolean :returns: detailed library information :rtype: dict .. seealso:: :attr:`galaxy.model.Library.dict_element_visible_keys` :raises: MalformedId, ObjectNotFound """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def create(self, trans, payload: Dict[str, str]): """Creates a new library. .. note:: Currently, only admin users can create libraries. :param payload: dictionary structure containing:: :param name: (required) the new library's name :type name: str :param description: the new library's description :type description: str :param synopsis: the new library's synopsis :type synopsis: str :type payload: dict :returns: detailed library information :rtype: dict :raises: RequestParameterMissingException """ name = payload.get('name', None) if not name: raise exceptions.RequestParameterMissingException("Missing required parameter 'name'.") description = payload.get('description', '') synopsis = payload.get('synopsis', '') if synopsis in ['None', None]: synopsis = '' library = self.library_manager.create(trans, name, description, synopsis) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def update(self, trans, id: EncodedDatabaseIdField, payload: Dict[str, str]): """Updates the library defined by an ``encoded_id`` with the data in the payload. .. note:: Currently, only admin users can update libraries. Also the library must not be `deleted`. :param id: the encoded id of the library :type id: an encoded id string :param payload: dictionary structure containing:: :param name: new library's name, cannot be empty :type name: str :param description: new library's description :type description: str :param synopsis: new library's synopsis :type synopsis: str :type payload: dict :returns: detailed library information :rtype: dict :raises: RequestParameterMissingException """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) name = payload.get('name', None) if name == '': raise exceptions.RequestParameterMissingException("Parameter 'name' of library is required. You cannot remove it.") description = payload.get('description', None) synopsis = payload.get('synopsis', None) updated_library = self.library_manager.update(trans, library, name, description, synopsis) library_dict = self.library_manager.get_library_dict(trans, updated_library) return library_dict def delete(self, trans, id: EncodedDatabaseIdField, undelete: Optional[bool] = False): """Marks the library with the given ``id`` as `deleted` (or removes the `deleted` mark if the `undelete` param is true) .. note:: Currently, only admin users can un/delete libraries. :param id: the encoded id of the library to un/delete :type id: an encoded id string :param undelete: (optional) flag specifying whether the item should be deleted or undeleted, defaults to false: :type undelete: bool :returns: detailed library information :rtype: dictionary .. seealso:: :attr:`galaxy.model.Library.dict_element_visible_keys` """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) library = self.library_manager.delete(trans, library, undelete) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def get_permissions( self, trans, id: EncodedDatabaseIdField, scope: Optional[str] = 'current', is_library_access: Optional[bool] = False, page: Optional[int] = 1, page_limit: Optional[int] = 10, query: Optional[str] = None, ): """Load all permissions for the given library id and return it. :param id: the encoded id of the library :type id: an encoded id string :param scope: either 'current' or 'available' :type scope: string :param is_library_access: indicates whether the roles available for the library access are requested :type is_library_access: bool :returns: dictionary with all applicable permissions' values :rtype: dictionary :raises: InsufficientPermissionsException """ current_user_roles = trans.get_current_user_roles() is_admin = trans.user_is_admin library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) if not (is_admin or trans.app.security_agent.can_manage_library_item(current_user_roles, library)): raise exceptions.InsufficientPermissionsException('You do not have proper permission to access permissions of this library.') if scope == 'current' or scope is None: roles = self.library_manager.get_current_roles(trans, library) return roles # Return roles that are available to select. elif scope == 'available': roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library, query, page, page_limit, is_library_access) return_roles = [] for role in roles: role_id = trans.security.encode_id(role.id) return_roles.append(dict(id=role_id, name=role.name, type=role.type)) return dict(roles=return_roles, page=page, page_limit=page_limit, total=total_roles) else: raise exceptions.RequestParameterInvalidException("The value of 'scope' parameter is invalid. Alllowed values: current, available") def set_permissions(self, trans, id: EncodedDatabaseIdField, payload: Dict[str, Any]): """Set permissions of the given library to the given role ids. :param id: the encoded id of the library to set the permissions of :type id: an encoded id string :param payload: dictionary structure containing: :param action: (required) describes what action should be performed available actions: remove_restrictions, set_permissions :type action: str :param access_ids[]: list of Role.id defining roles that should have access permission on the library :type access_ids[]: string or list :param add_ids[]: list of Role.id defining roles that should have add item permission on the library :type add_ids[]: string or list :param manage_ids[]: list of Role.id defining roles that should have manage permission on the library :type manage_ids[]: string or list :param modify_ids[]: list of Role.id defining roles that should have modify permission on the library :type modify_ids[]: string or list :type: dictionary :returns: dict of current roles for all available permission types :rtype: dictionary :raises: RequestParameterInvalidException, InsufficientPermissionsException, InternalServerError RequestParameterMissingException """ is_admin = trans.user_is_admin current_user_roles = trans.get_current_user_roles() library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) if not (is_admin or trans.app.security_agent.can_manage_library_item(current_user_roles, library)): raise exceptions.InsufficientPermissionsException('You do not have proper permission to modify permissions of this library.') new_access_roles_ids = util.listify(payload.get('access_ids[]', None)) new_add_roles_ids = util.listify(payload.get('add_ids[]', None)) new_manage_roles_ids = util.listify(payload.get('manage_ids[]', None)) new_modify_roles_ids = util.listify(payload.get('modify_ids[]', None)) action = payload.get('action', None) if action is None: if payload is not None: return self.set_permissions_old(trans, library, payload) else: raise exceptions.RequestParameterMissingException('The mandatory parameter "action" is missing.') elif action == 'remove_restrictions': is_public = self.library_manager.make_public(trans, library) if not is_public: raise exceptions.InternalServerError('An error occurred while making library public.') elif action == 'set_permissions': # ACCESS LIBRARY ROLES valid_access_roles = [] invalid_access_roles_names = [] for role_id in new_access_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library, is_library_access=True) if role in valid_roles: valid_access_roles.append(role) else: invalid_access_roles_names.append(role_id) if len(invalid_access_roles_names) > 0: log.warning("The following roles could not be added to the library access permission: " + str(invalid_access_roles_names)) # ADD TO LIBRARY ROLES valid_add_roles = [] invalid_add_roles_names = [] for role_id in new_add_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_add_roles.append(role) else: invalid_add_roles_names.append(role_id) if len(invalid_add_roles_names) > 0: log.warning("The following roles could not be added to the add library item permission: " + str(invalid_add_roles_names)) # MANAGE LIBRARY ROLES valid_manage_roles = [] invalid_manage_roles_names = [] for role_id in new_manage_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_manage_roles.append(role) else: invalid_manage_roles_names.append(role_id) if len(invalid_manage_roles_names) > 0: log.warning("The following roles could not be added to the manage library permission: " + str(invalid_manage_roles_names)) # MODIFY LIBRARY ROLES valid_modify_roles = [] invalid_modify_roles_names = [] for role_id in new_modify_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_modify_roles.append(role) else: invalid_modify_roles_names.append(role_id) if len(invalid_modify_roles_names) > 0: log.warning("The following roles could not be added to the modify library permission: " + str(invalid_modify_roles_names)) permissions = {trans.app.security_agent.permitted_actions.LIBRARY_ACCESS: valid_access_roles} permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_ADD: valid_add_roles}) permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_MANAGE: valid_manage_roles}) permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_MODIFY: valid_modify_roles}) trans.app.security_agent.set_all_library_permissions(trans, library, permissions) trans.sa_session.refresh(library) # Copy the permissions to the root folder trans.app.security_agent.copy_library_permissions(trans, library, library.root_folder) else: raise exceptions.RequestParameterInvalidException('The mandatory parameter "action" has an invalid value.' 'Allowed values are: "remove_restrictions", set_permissions"') roles = self.library_manager.get_current_roles(trans, library) return roles def set_permissions_old(self, trans, library, payload): """ *** old implementation for backward compatibility *** Updates the library permissions. """ params = util.Params(payload) permissions = {} for k, v in trans.app.model.Library.permitted_actions.items(): role_params = params.get(k + '_in', []) in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in util.listify(role_params)] permissions[trans.app.security_agent.get_action(v.action)] = in_roles trans.app.security_agent.set_all_library_permissions(trans, library, permissions) trans.sa_session.refresh(library) # Copy the permissions to the root folder trans.app.security_agent.copy_library_permissions(trans, library, library.root_folder) item = library.to_dict(view='element', value_mapper={'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id}) return item def __decode_id( self, trans: ProvidesAppContext, encoded_id, object_name: Optional[str] = None, ): """ Try to decode the id. :param object_name: Name of the object the id belongs to. (optional) :type object_name: str """ try: return trans.security.decode_id(encoded_id) except TypeError: raise exceptions.MalformedId(f"Malformed {object_name if object_name is not None else ""} id specified, unable to decode.") except ValueError: raise exceptions.MalformedId(f"Wrong {object_name if object_name is not None else ""} id specified, unable to decode.")
""" Manager and Serializer for libraries. """ import logging from typing import ( Any, Dict, List, Optional, ) from sqlalchemy import and_, false, not_, or_, true from sqlalchemy.orm.exc import MultipleResultsFound from sqlalchemy.orm.exc import NoResultFound from galaxy import ( exceptions, util, ) from galaxy.managers import ( folders, roles, ) from galaxy.managers.context import ProvidesAppContext from galaxy.schema.fields import EncodedDatabaseIdField from galaxy.util import ( pretty_print_time_interval, unicodify, ) log = logging.getLogger(__name__) # ============================================================================= class LibraryManager: """ Interface/service object for interacting with libraries. """ def get(self, trans, decoded_library_id, check_accessible=True): """ Get the library from the DB. :param decoded_library_id: decoded library id :type decoded_library_id: int :param check_accessible: flag whether to check that user can access item :type check_accessible: bool :returns: the requested library :rtype: galaxy.model.Library """ try: library = trans.sa_session.query(trans.app.model.Library).filter(trans.app.model.Library.table.c.id == decoded_library_id).one() except MultipleResultsFound: raise exceptions.InconsistentDatabase('Multiple libraries found with the same id.') except NoResultFound: raise exceptions.RequestParameterInvalidException('No library found with the id provided.') except Exception as e: raise exceptions.InternalServerError('Error loading from the database.' + unicodify(e)) library = self.secure(trans, library, check_accessible) return library def create(self, trans, name, description='', synopsis=''): """ Create a new library. """ if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can create libraries.') else: library = trans.app.model.Library(name=name, description=description, synopsis=synopsis) root_folder = trans.app.model.LibraryFolder(name=name, description='') library.root_folder = root_folder trans.sa_session.add_all((library, root_folder)) trans.sa_session.flush() return library def update(self, trans, library, name=None, description=None, synopsis=None): """ Update the given library """ changed = False if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can update libraries.') if library.deleted: raise exceptions.RequestParameterInvalidException('You cannot modify a deleted library. Undelete it first.') if name is not None: library.name = name changed = True # When library is renamed the root folder has to be renamed too. folder_manager = folders.FolderManager() folder_manager.update(trans, library.root_folder, name=name) if description is not None: library.description = description changed = True if synopsis is not None: library.synopsis = synopsis changed = True if changed: trans.sa_session.add(library) trans.sa_session.flush() return library def delete(self, trans, library, undelete=False): """ Mark given library deleted/undeleted based on the flag. """ if not trans.user_is_admin: raise exceptions.ItemAccessibilityException('Only administrators can delete and undelete libraries.') if undelete: library.deleted = False else: library.deleted = True trans.sa_session.add(library) trans.sa_session.flush() return library def list(self, trans, deleted: Optional[bool] = False): """ Return a list of libraries from the DB. :param deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted`` :type deleted: boolean (optional) :returns: query that will emit all accessible libraries :rtype: sqlalchemy query :returns: dict of 3 sets with available actions for user's accessible libraries and a set of ids of all public libraries. These are used for limiting the number of queries when dictifying the libraries later on. :rtype: dict """ is_admin = trans.user_is_admin query = trans.sa_session.query(trans.app.model.Library) library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action restricted_library_ids = {lp.library_id for lp in ( trans.sa_session.query(trans.model.LibraryPermissions).filter( trans.model.LibraryPermissions.table.c.action == library_access_action ).distinct())} prefetched_ids = {'restricted_library_ids': restricted_library_ids} if is_admin: if deleted is None: # Flag is not specified, do not filter on it. pass elif deleted: query = query.filter(trans.app.model.Library.table.c.deleted == true()) else: query = query.filter(trans.app.model.Library.table.c.deleted == false()) else: # Nonadmins can't see deleted libraries query = query.filter(trans.app.model.Library.table.c.deleted == false()) current_user_role_ids = [role.id for role in trans.get_current_user_roles()] all_actions = trans.sa_session.query(trans.model.LibraryPermissions).filter(trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids)) library_add_action = trans.app.security_agent.permitted_actions.LIBRARY_ADD.action library_modify_action = trans.app.security_agent.permitted_actions.LIBRARY_MODIFY.action library_manage_action = trans.app.security_agent.permitted_actions.LIBRARY_MANAGE.action accessible_restricted_library_ids = set() allowed_library_add_ids = set() allowed_library_modify_ids = set() allowed_library_manage_ids = set() for action in all_actions: if action.action == library_access_action: accessible_restricted_library_ids.add(action.library_id) if action.action == library_add_action: allowed_library_add_ids.add(action.library_id) if action.action == library_modify_action: allowed_library_modify_ids.add(action.library_id) if action.action == library_manage_action: allowed_library_manage_ids.add(action.library_id) query = query.filter(or_( not_(trans.model.Library.table.c.id.in_(restricted_library_ids)), trans.model.Library.table.c.id.in_(accessible_restricted_library_ids) )) prefetched_ids['allowed_library_add_ids'] = allowed_library_add_ids prefetched_ids['allowed_library_modify_ids'] = allowed_library_modify_ids prefetched_ids['allowed_library_manage_ids'] = allowed_library_manage_ids return query, prefetched_ids def secure(self, trans, library, check_accessible=True): """ Check if library is accessible to user. :param library: library :type library: galaxy.model.Library :param check_accessible: flag whether to check that user can access library :type check_accessible: bool :returns: the original library :rtype: galaxy.model.Library """ # all libraries are accessible to an admin if trans.user_is_admin: return library if check_accessible: library = self.check_accessible(trans, library) return library def check_accessible(self, trans, library): """ Check whether the library is accessible to current user. """ if not trans.app.security_agent.can_access_library(trans.get_current_user_roles(), library): raise exceptions.ObjectNotFound('Library with the id provided was not found.') elif library.deleted: raise exceptions.ObjectNotFound('Library with the id provided is deleted.') else: return library def get_library_dict(self, trans, library, prefetched_ids=None): """ Return library data in the form of a dictionary. :param library: library :type library: galaxy.model.Library :param prefetched_ids: dict of 3 sets with available actions for user's accessible libraries and a set of ids of all public libraries. These are used for limiting the number of queries when dictifying a set of libraries. :type prefetched_ids: dict :returns: dict with data about the library :rtype: dictionary """ restricted_library_ids = prefetched_ids.get('restricted_library_ids', None) if prefetched_ids else None allowed_library_add_ids = prefetched_ids.get('allowed_library_add_ids', None) if prefetched_ids else None allowed_library_modify_ids = prefetched_ids.get('allowed_library_modify_ids', None) if prefetched_ids else None allowed_library_manage_ids = prefetched_ids.get('allowed_library_manage_ids', None) if prefetched_ids else None library_dict = library.to_dict(view='element', value_mapper={'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id}) library_dict['public'] = False if (restricted_library_ids and library.id in restricted_library_ids) else True library_dict['create_time_pretty'] = pretty_print_time_interval(library.create_time, precise=True) if not trans.user_is_admin: if prefetched_ids: library_dict['can_user_add'] = True if (allowed_library_add_ids and library.id in allowed_library_add_ids) else False library_dict['can_user_modify'] = True if (allowed_library_modify_ids and library.id in allowed_library_modify_ids) else False library_dict['can_user_manage'] = True if (allowed_library_manage_ids and library.id in allowed_library_manage_ids) else False else: current_user_roles = trans.get_current_user_roles() library_dict['can_user_add'] = trans.app.security_agent.can_add_library_item(current_user_roles, library) library_dict['can_user_modify'] = trans.app.security_agent.can_modify_library_item(current_user_roles, library) library_dict['can_user_manage'] = trans.app.security_agent.can_manage_library_item(current_user_roles, library) else: library_dict['can_user_add'] = True library_dict['can_user_modify'] = True library_dict['can_user_manage'] = True return library_dict def get_current_roles(self, trans, library): """ Load all permissions currently related to the given library. :param library: the model object :type library: galaxy.model.Library :rtype: dictionary :returns: dict of current roles for all available permission types """ access_library_role_list = [(access_role.name, trans.security.encode_id(access_role.id)) for access_role in self.get_access_roles(trans, library)] modify_library_role_list = [(modify_role.name, trans.security.encode_id(modify_role.id)) for modify_role in self.get_modify_roles(trans, library)] manage_library_role_list = [(manage_role.name, trans.security.encode_id(manage_role.id)) for manage_role in self.get_manage_roles(trans, library)] add_library_item_role_list = [(add_role.name, trans.security.encode_id(add_role.id)) for add_role in self.get_add_roles(trans, library)] return dict(access_library_role_list=access_library_role_list, modify_library_role_list=modify_library_role_list, manage_library_role_list=manage_library_role_list, add_library_item_role_list=add_library_item_role_list) def get_access_roles(self, trans, library): """ Load access roles for all library permissions """ return set(library.get_access_roles(trans.app.security_agent)) def get_modify_roles(self, trans, library): """ Load modify roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_MODIFY)) def get_manage_roles(self, trans, library): """ Load manage roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_MANAGE)) def get_add_roles(self, trans, library): """ Load add roles for all library permissions """ return set(trans.app.security_agent.get_roles_for_action(library, trans.app.security_agent.permitted_actions.LIBRARY_ADD)) def set_permission_roles(self, trans, library, access_roles, modify_roles, manage_roles, add_roles): """ Set permissions on the given library. """ def make_public(self, trans, library): """ Makes the given library public (removes all access roles) """ trans.app.security_agent.make_library_public(library) return self.is_public(trans, library) def is_public(self, trans, library): """ Return true if lib is public. """ return trans.app.security_agent.library_is_public(library) def get_containing_library_from_library_dataset(trans, library_dataset): """Given a library_dataset, get the containing library""" folder = library_dataset.folder while folder.parent: folder = folder.parent # We have folder set to the library's root folder, which has the same name as the library for library in trans.sa_session.query(trans.model.Library).filter( and_(trans.model.Library.table.c.deleted == false(), trans.model.Library.table.c.name == folder.name)): # Just to double-check if library.root_folder == folder: return library return None class LibrariesManager: """ Interface/service object for sharing logic between controllers. """ def __init__( self, folder_manager: folders.FolderManager, library_manager: LibraryManager, role_manager: roles.RoleManager, ): self.folder_manager = folder_manager self.library_manager = library_manager self.role_manager = role_manager def index(self, trans: ProvidesAppContext, deleted: Optional[bool] = False) -> List[Any]: """Returns a list of summary data for all libraries. :param deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted`` :type deleted: boolean (optional) :returns: list of dictionaries containing library information :rtype: list .. seealso:: :attr:`galaxy.model.Library.dict_collection_visible_keys` """ query, prefetched_ids = self.library_manager.list(trans, deleted) libraries = [] for library in query: libraries.append(self.library_manager.get_library_dict(trans, library, prefetched_ids)) return libraries def show(self, trans, id: EncodedDatabaseIdField): """ Returns detailed information about a library. :param id: the encoded id of the library :type id: an encoded id string :param deleted: if True, allow information on a ``deleted`` library :type deleted: boolean :returns: detailed library information :rtype: dict .. seealso:: :attr:`galaxy.model.Library.dict_element_visible_keys` :raises: MalformedId, ObjectNotFound """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def create(self, trans, payload: Dict[str, str]): """Creates a new library. .. note:: Currently, only admin users can create libraries. :param payload: dictionary structure containing:: :param name: (required) the new library's name :type name: str :param description: the new library's description :type description: str :param synopsis: the new library's synopsis :type synopsis: str :type payload: dict :returns: detailed library information :rtype: dict :raises: RequestParameterMissingException """ name = payload.get('name', None) if not name: raise exceptions.RequestParameterMissingException("Missing required parameter 'name'.") description = payload.get('description', '') synopsis = payload.get('synopsis', '') if synopsis in ['None', None]: synopsis = '' library = self.library_manager.create(trans, name, description, synopsis) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def update(self, trans, id: EncodedDatabaseIdField, payload: Dict[str, str]): """Updates the library defined by an ``encoded_id`` with the data in the payload. .. note:: Currently, only admin users can update libraries. Also the library must not be `deleted`. :param id: the encoded id of the library :type id: an encoded id string :param payload: dictionary structure containing:: :param name: new library's name, cannot be empty :type name: str :param description: new library's description :type description: str :param synopsis: new library's synopsis :type synopsis: str :type payload: dict :returns: detailed library information :rtype: dict :raises: RequestParameterMissingException """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) name = payload.get('name', None) if name == '': raise exceptions.RequestParameterMissingException("Parameter 'name' of library is required. You cannot remove it.") description = payload.get('description', None) synopsis = payload.get('synopsis', None) updated_library = self.library_manager.update(trans, library, name, description, synopsis) library_dict = self.library_manager.get_library_dict(trans, updated_library) return library_dict def delete(self, trans, id: EncodedDatabaseIdField, undelete: Optional[bool] = False): """Marks the library with the given ``id`` as `deleted` (or removes the `deleted` mark if the `undelete` param is true) .. note:: Currently, only admin users can un/delete libraries. :param id: the encoded id of the library to un/delete :type id: an encoded id string :param undelete: (optional) flag specifying whether the item should be deleted or undeleted, defaults to false: :type undelete: bool :returns: detailed library information :rtype: dictionary .. seealso:: :attr:`galaxy.model.Library.dict_element_visible_keys` """ library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) library = self.library_manager.delete(trans, library, undelete) library_dict = self.library_manager.get_library_dict(trans, library) return library_dict def get_permissions( self, trans, id: EncodedDatabaseIdField, scope: Optional[str] = 'current', is_library_access: Optional[bool] = False, page: Optional[int] = 1, page_limit: Optional[int] = 10, query: Optional[str] = None, ): """Load all permissions for the given library id and return it. :param id: the encoded id of the library :type id: an encoded id string :param scope: either 'current' or 'available' :type scope: string :param is_library_access: indicates whether the roles available for the library access are requested :type is_library_access: bool :returns: dictionary with all applicable permissions' values :rtype: dictionary :raises: InsufficientPermissionsException """ current_user_roles = trans.get_current_user_roles() is_admin = trans.user_is_admin library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) if not (is_admin or trans.app.security_agent.can_manage_library_item(current_user_roles, library)): raise exceptions.InsufficientPermissionsException('You do not have proper permission to access permissions of this library.') if scope == 'current' or scope is None: roles = self.library_manager.get_current_roles(trans, library) return roles # Return roles that are available to select. elif scope == 'available': roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library, query, page, page_limit, is_library_access) return_roles = [] for role in roles: role_id = trans.security.encode_id(role.id) return_roles.append(dict(id=role_id, name=role.name, type=role.type)) return dict(roles=return_roles, page=page, page_limit=page_limit, total=total_roles) else: raise exceptions.RequestParameterInvalidException("The value of 'scope' parameter is invalid. Alllowed values: current, available") def set_permissions(self, trans, id: EncodedDatabaseIdField, payload: Dict[str, Any]): """Set permissions of the given library to the given role ids. :param id: the encoded id of the library to set the permissions of :type id: an encoded id string :param payload: dictionary structure containing: :param action: (required) describes what action should be performed available actions: remove_restrictions, set_permissions :type action: str :param access_ids[]: list of Role.id defining roles that should have access permission on the library :type access_ids[]: string or list :param add_ids[]: list of Role.id defining roles that should have add item permission on the library :type add_ids[]: string or list :param manage_ids[]: list of Role.id defining roles that should have manage permission on the library :type manage_ids[]: string or list :param modify_ids[]: list of Role.id defining roles that should have modify permission on the library :type modify_ids[]: string or list :type: dictionary :returns: dict of current roles for all available permission types :rtype: dictionary :raises: RequestParameterInvalidException, InsufficientPermissionsException, InternalServerError RequestParameterMissingException """ is_admin = trans.user_is_admin current_user_roles = trans.get_current_user_roles() library = self.library_manager.get(trans, self.__decode_id(trans, id, 'library')) if not (is_admin or trans.app.security_agent.can_manage_library_item(current_user_roles, library)): raise exceptions.InsufficientPermissionsException('You do not have proper permission to modify permissions of this library.') new_access_roles_ids = util.listify(payload.get('access_ids[]', None)) new_add_roles_ids = util.listify(payload.get('add_ids[]', None)) new_manage_roles_ids = util.listify(payload.get('manage_ids[]', None)) new_modify_roles_ids = util.listify(payload.get('modify_ids[]', None)) action = payload.get('action', None) if action is None: if payload is not None: return self.set_permissions_old(trans, library, payload) else: raise exceptions.RequestParameterMissingException('The mandatory parameter "action" is missing.') elif action == 'remove_restrictions': is_public = self.library_manager.make_public(trans, library) if not is_public: raise exceptions.InternalServerError('An error occurred while making library public.') elif action == 'set_permissions': # ACCESS LIBRARY ROLES valid_access_roles = [] invalid_access_roles_names = [] for role_id in new_access_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library, is_library_access=True) if role in valid_roles: valid_access_roles.append(role) else: invalid_access_roles_names.append(role_id) if len(invalid_access_roles_names) > 0: log.warning("The following roles could not be added to the library access permission: " + str(invalid_access_roles_names)) # ADD TO LIBRARY ROLES valid_add_roles = [] invalid_add_roles_names = [] for role_id in new_add_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_add_roles.append(role) else: invalid_add_roles_names.append(role_id) if len(invalid_add_roles_names) > 0: log.warning("The following roles could not be added to the add library item permission: " + str(invalid_add_roles_names)) # MANAGE LIBRARY ROLES valid_manage_roles = [] invalid_manage_roles_names = [] for role_id in new_manage_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_manage_roles.append(role) else: invalid_manage_roles_names.append(role_id) if len(invalid_manage_roles_names) > 0: log.warning("The following roles could not be added to the manage library permission: " + str(invalid_manage_roles_names)) # MODIFY LIBRARY ROLES valid_modify_roles = [] invalid_modify_roles_names = [] for role_id in new_modify_roles_ids: role = self.role_manager.get(trans, self.__decode_id(trans, role_id, 'role')) valid_roles, total_roles = trans.app.security_agent.get_valid_roles(trans, library) if role in valid_roles: valid_modify_roles.append(role) else: invalid_modify_roles_names.append(role_id) if len(invalid_modify_roles_names) > 0: log.warning("The following roles could not be added to the modify library permission: " + str(invalid_modify_roles_names)) permissions = {trans.app.security_agent.permitted_actions.LIBRARY_ACCESS: valid_access_roles} permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_ADD: valid_add_roles}) permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_MANAGE: valid_manage_roles}) permissions.update({trans.app.security_agent.permitted_actions.LIBRARY_MODIFY: valid_modify_roles}) trans.app.security_agent.set_all_library_permissions(trans, library, permissions) trans.sa_session.refresh(library) # Copy the permissions to the root folder trans.app.security_agent.copy_library_permissions(trans, library, library.root_folder) else: raise exceptions.RequestParameterInvalidException('The mandatory parameter "action" has an invalid value.' 'Allowed values are: "remove_restrictions", set_permissions"') roles = self.library_manager.get_current_roles(trans, library) return roles def set_permissions_old(self, trans, library, payload): """ *** old implementation for backward compatibility *** Updates the library permissions. """ params = util.Params(payload) permissions = {} for k, v in trans.app.model.Library.permitted_actions.items(): role_params = params.get(k + '_in', []) in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in util.listify(role_params)] permissions[trans.app.security_agent.get_action(v.action)] = in_roles trans.app.security_agent.set_all_library_permissions(trans, library, permissions) trans.sa_session.refresh(library) # Copy the permissions to the root folder trans.app.security_agent.copy_library_permissions(trans, library, library.root_folder) item = library.to_dict(view='element', value_mapper={'id': trans.security.encode_id, 'root_folder_id': trans.security.encode_id}) return item def __decode_id( self, trans: ProvidesAppContext, encoded_id, object_name: Optional[str] = None, ): """ Try to decode the id. :param object_name: Name of the object the id belongs to. (optional) :type object_name: str """ try: return trans.security.decode_id(encoded_id) except TypeError: raise exceptions.MalformedId(f"Malformed {object_name if object_name is not None else ''} id specified, unable to decode.") except ValueError: raise exceptions.MalformedId(f"Wrong {object_name if object_name is not None else ''} id specified, unable to decode.")
import curses import sys import time import subprocess import select import collections import datetime import apache_log_parser from apache_log_parser import LineDoesntMatchException from src import logreporter class HttpLogParser: def c_main(stdscr: 'curses._CursesWindow', log_file, alert_window, alert_threshold, stat_window): #c_main is a naming covention indicating this is a curses main #tail subprocess and log parser setup #the use of sh.tail will limit this appliation to only working on *nix systems with the tail command tail_sub_process = subprocess.Popen(['tail','-F', log_file], stdout=subprocess.PIPE ) polling = select.poll() polling.register(tail_sub_process.stdout) #TODO: refactor log parser into LogReporter log_parser = apache_log_parser.make_parser('%h %u %l %t "%r" %s %B') #reporter internal data structure setup #set log retention length to alter_window + stat_window to ensure we always have the data we need #but don't introduce any memory leaks reporter = logreporter.LogReporter(alert_window + stat_window) stats = {} #initialize stats reporting object #curses display setup size_of_top_data = 8 #-1 required to keep the bottom line of the terminal for the curses and prevent curses exception number_of_logs_to_show = curses.LINES - size_of_top_data -1 display_log_queue = collections.deque([],number_of_logs_to_show) current_state = 'Nominal' alert_string = '' recovery_string = '' current_window = '' stdscr.nodelay(True) #prevent blocking input #main process loop while True: now = datetime.datetime.now() if now.second % stat_window == 0: # only prcoess new stats per time window current_window = now.strftime("%H:%M:%S") + ' - ' + ( now - datetime.timedelta(seconds=stat_window) ).strftime('%H:%M:%S') stats = reporter.getStatsForWindow(stat_window) #TODO explore option of alert reporting in every loop for faster notification of alert alertstate = reporter.isInAlertState(alert_window, alert_threshold) #alertstate will be either [False, 0] or [True, #of Requests/second] if alertstate[0]: if current_state != 'ALERT': alert_string = f" High traffic generated an alert - hits = {alertstate[1]}, triggered at {now.strftime("%H:%M:%S")}" current_state = 'ALERT' recovery_string = '' elif current_state == 'ALERT': current_state = "RECOVERED" recovery_string = f" recovered at {now.strftime("%H:%M:%S")}" alert_string = '' else: current_state = 'Nominal' reporter.prune_logs() #render data stdscr.clear() title = f"HTTP Log monitor - press any key to exit" title_location = int((curses.COLS / 2) - ( len(title) /2 )) stdscr.addstr(0,title_location, title) stdscr.addstr(1,0, f"Stats for window: {current_window}") stdscr.addstr(2,0, f"STATUS: {current_state}") if current_state == 'ALERT': stdscr.addstr(2, 14, alert_string) if current_state == 'RECOVERED': stdscr.addstr(2, 18, recovery_string) line_index = 3 for key in stats: name = key.replace("_", " ") display_string = f"{name}: {stats[key]}" stdscr.addstr(line_index, 0, display_string[:curses.COLS]) line_index += 1 for idx, log in enumerate(display_log_queue): stdscr.addstr(idx + size_of_top_data, 0, log[:curses.COLS]) stdscr.refresh() # injest new data while polling.poll(1): try: log_line = tail_sub_process.stdout.readline() display_log_queue.append(log_line) reporter.add_log( log_parser(str(log_line)) ) except LineDoesntMatchException: print(f"log found that did not match parsing: {log_line}", file=sys.stderr) pass #sleep not only reduces system impact but also prevents stats processes from happening #more than once within 1s time.sleep(1) if stdscr.getch() != curses.ERR: #user input detected exit break
import curses import sys import time import subprocess import select import collections import datetime import apache_log_parser from apache_log_parser import LineDoesntMatchException from src import logreporter class HttpLogParser: def c_main(stdscr: 'curses._CursesWindow', log_file, alert_window, alert_threshold, stat_window): #c_main is a naming covention indicating this is a curses main #tail subprocess and log parser setup #the use of sh.tail will limit this appliation to only working on *nix systems with the tail command tail_sub_process = subprocess.Popen(['tail','-F', log_file], stdout=subprocess.PIPE ) polling = select.poll() polling.register(tail_sub_process.stdout) #TODO: refactor log parser into LogReporter log_parser = apache_log_parser.make_parser('%h %u %l %t "%r" %s %B') #reporter internal data structure setup #set log retention length to alter_window + stat_window to ensure we always have the data we need #but don't introduce any memory leaks reporter = logreporter.LogReporter(alert_window + stat_window) stats = {} #initialize stats reporting object #curses display setup size_of_top_data = 8 #-1 required to keep the bottom line of the terminal for the curses and prevent curses exception number_of_logs_to_show = curses.LINES - size_of_top_data -1 display_log_queue = collections.deque([],number_of_logs_to_show) current_state = 'Nominal' alert_string = '' recovery_string = '' current_window = '' stdscr.nodelay(True) #prevent blocking input #main process loop while True: now = datetime.datetime.now() if now.second % stat_window == 0: # only prcoess new stats per time window current_window = now.strftime("%H:%M:%S") + ' - ' + ( now - datetime.timedelta(seconds=stat_window) ).strftime('%H:%M:%S') stats = reporter.getStatsForWindow(stat_window) #TODO explore option of alert reporting in every loop for faster notification of alert alertstate = reporter.isInAlertState(alert_window, alert_threshold) #alertstate will be either [False, 0] or [True, #of Requests/second] if alertstate[0]: if current_state != 'ALERT': alert_string = f" High traffic generated an alert - hits = {alertstate[1]}, triggered at {now.strftime('%H:%M:%S')}" current_state = 'ALERT' recovery_string = '' elif current_state == 'ALERT': current_state = "RECOVERED" recovery_string = f" recovered at {now.strftime('%H:%M:%S')}" alert_string = '' else: current_state = 'Nominal' reporter.prune_logs() #render data stdscr.clear() title = f"HTTP Log monitor - press any key to exit" title_location = int((curses.COLS / 2) - ( len(title) /2 )) stdscr.addstr(0,title_location, title) stdscr.addstr(1,0, f"Stats for window: {current_window}") stdscr.addstr(2,0, f"STATUS: {current_state}") if current_state == 'ALERT': stdscr.addstr(2, 14, alert_string) if current_state == 'RECOVERED': stdscr.addstr(2, 18, recovery_string) line_index = 3 for key in stats: name = key.replace("_", " ") display_string = f"{name}: {stats[key]}" stdscr.addstr(line_index, 0, display_string[:curses.COLS]) line_index += 1 for idx, log in enumerate(display_log_queue): stdscr.addstr(idx + size_of_top_data, 0, log[:curses.COLS]) stdscr.refresh() # injest new data while polling.poll(1): try: log_line = tail_sub_process.stdout.readline() display_log_queue.append(log_line) reporter.add_log( log_parser(str(log_line)) ) except LineDoesntMatchException: print(f"log found that did not match parsing: {log_line}", file=sys.stderr) pass #sleep not only reduces system impact but also prevents stats processes from happening #more than once within 1s time.sleep(1) if stdscr.getch() != curses.ERR: #user input detected exit break
""" Container, Well, WellGroup objects and associated functions :copyright: 2020 by The Autoprotocol Development Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details """ import json import warnings from .constants import SBS_FORMAT_SHAPES from .unit import Unit SEAL_TYPES = ["ultra-clear", "foil", "breathable"] COVER_TYPES = ["standard", "low_evaporation", "universal"] class Well(object): """ A Well object describes a single location within a container. Do not construct a Well directly -- retrieve it from the related Container object. Parameters ---------- container : Container The Container this well belongs to. index : int The index of this well within the container. """ def __init__(self, container, index): self.container = container self.index = index self.volume = None self.name = None self.properties = {} @staticmethod def validate_properties(properties): if not isinstance(properties, dict): raise TypeError( f"Aliquot properties {properties} are of type " f"{type(properties)}, they should be a `dict`." ) for key, value in properties.items(): if not isinstance(key, str): raise TypeError( f"Aliquot property {key} : {value} has a key of type " f"{type(key)}, it should be a 'str'." ) try: json.dumps(value) except TypeError: raise TypeError( f"Aliquot property {key} : {value} has a value of type " f"{type(value)}, that isn't JSON serializable." ) def set_properties(self, properties): """ Set properties for a Well. Existing property dictionary will be completely overwritten with the new dictionary. Parameters ---------- properties : dict Custom properties for a Well in dictionary form. Returns ------- Well Well with modified properties """ self.validate_properties(properties) self.properties = properties.copy() return self def add_properties(self, properties): """ Add properties to the properties attribute of a Well. If any property with the same key already exists for the Well then: - if both old and new properties are lists then append the new property - otherwise overwrite the old property with the new one Parameters ---------- properties : dict Dictionary of properties to add to a Well. Returns ------- Well Well with modified properties """ self.validate_properties(properties) for key, value in properties.items(): if key in self.properties: values_are_lists = all( isinstance(_, list) for _ in [value, self.properties[key]] ) if values_are_lists: self.properties[key].extend(value) else: message = f"Overwriting existing property {key} for {self}" warnings.warn(message=message) self.properties[key] = value else: self.properties[key] = value return self def set_volume(self, vol): """ Set the theoretical volume of liquid in a Well. Parameters ---------- vol : str, Unit Theoretical volume to indicate for a Well. Returns ------- Well Well with modified volume Raises ------ TypeError Incorrect input-type given ValueError Volume set exceeds maximum well volume """ if not isinstance(vol, str) and not isinstance(vol, Unit): raise TypeError( f"Volume {vol} is of type {type(vol)}, it should be either " f"'str' or 'Unit'." ) v = Unit(vol) max_vol = self.container.container_type.true_max_vol_ul if v > max_vol: raise ValueError( f"Theoretical volume {v} to be set exceeds maximum well " f"volume {max_vol}." ) self.volume = v return self def set_name(self, name): """ Set a name for this well for it to be included in a protocol's "outs" section Parameters ---------- name : str Well name. Returns ------- Well Well with modified name """ self.name = name return self def humanize(self): """ Return the human readable representation of the integer well index given based on the ContainerType of the Well. Uses the humanize function from the ContainerType class. Refer to `ContainerType.humanize()` for more information. Returns ------- str Index of well in Container (in human readable form) """ return self.container.humanize(self.index) def available_volume(self): """ Returns the available volume of a Well. This is calculated as nominal volume - container_type dead volume Returns ------- Unit(volume) Volume in well Raises ------ RuntimeError Well has no volume """ if self.volume is None: raise RuntimeError(f"well {self} has no volume") return self.volume - self.container.container_type.dead_volume_ul def __repr__(self): """ Return a string representation of a Well. """ return f"Well({str(self.container)}, {str(self.index)}, " f"{str(self.volume)})" class WellGroup(object): """ A logical grouping of Wells. Wells in a WellGroup do not necessarily need to be in the same container. Parameters ---------- wells : list List of Well objects contained in this WellGroup. Raises ------ TypeError Wells is not of the right input type """ def __init__(self, wells): if isinstance(wells, Well): wells = [wells] elif isinstance(wells, WellGroup): wells = wells.wells elif isinstance(wells, list): if not all(isinstance(well, Well) for well in wells): raise TypeError("All elements in list must be wells") else: raise TypeError("Wells must be Well, list of wells, WellGroup.") self.wells = wells self.name = None def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ else: return False def set_properties(self, properties): """ Set the same properties for each Well in a WellGroup. Parameters ---------- properties : dict Dictionary of properties to set on Well(s). Returns ------- WellGroup WellGroup with modified properties """ for w in self.wells: w.set_properties(properties) return self def add_properties(self, properties): """ Add the same properties for each Well in a WellGroup. Parameters ---------- properties : dict Dictionary of properties to set on Well(s). Returns ------- WellGroup WellGroup with modified properties """ for w in self.wells: w.add_properties(properties) return self def set_volume(self, vol): """ Set the volume of every well in the group to vol. Parameters ---------- vol : Unit, str Theoretical volume of each well in the WellGroup. Returns ------- WellGroup WellGroup with modified volume """ for w in self.wells: w.set_volume(vol) return self def indices(self): """ Return the indices of the wells in the group in human-readable form, given that all of the wells belong to the same container. Returns ------- list(str) List of humanized indices from this WellGroup """ indices = [] for w in self.wells: assert w.container == self.wells[0].container, ( "All wells in WellGroup must belong to the same container to " "get their indices." ) indices.append(w.humanize()) return indices def append(self, other): """ Append another well to this WellGroup. Parameters ---------- other : Well Well to append to this WellGroup. Returns ------- WellGroup WellGroup with appended well Raises ------ TypeError other is not of type Well """ if not isinstance(other, Well): raise TypeError("Input given is not of type 'Well'.") else: return self.wells.append(other) def extend(self, other): """ Extend this WellGroup with another WellGroup. Parameters ---------- other : WellGroup or list of Wells WellGroup to extend this WellGroup. Returns ------- WellGroup WellGroup extended with specified WellGroup Raises ------ TypeError Input WellGroup is not of the right type """ if not isinstance(other, (WellGroup, list)): raise TypeError("Input given is not of type 'WellGroup' or " "'list'.") else: if not all(isinstance(well, Well) for well in other): raise TypeError("Input given is not of type 'Well'.") return self.wells.extend(WellGroup(other).wells) def set_group_name(self, name): """ Assigns a name to a WellGroup. Parameters ---------- name: str WellGroup name Returns ------- str Name of wellgroup """ self.name = name return self def wells_with(self, prop, val=None): """ Returns a wellgroup of wells with the specified property and value Parameters ---------- prop: str the property you are searching for val: str, optional the value assigned to the property Returns ------- WellGroup WellGroup with modified properties Raises ------ TypeError property or value defined does not have right input type """ if not isinstance(prop, str): raise TypeError(f"property is not a string: {prop!r}") if val is not None: return WellGroup( [ w for w in self.wells if prop in w.properties and w.properties[prop] is val ] ) else: return WellGroup([w for w in self.wells if prop in w.properties]) def pop(self, index=-1): """ Removes and returns the last well in the wellgroup, unless an index is specified. If index is specified, the well at that index is removed from the wellgroup and returned. Parameters ---------- index: int, optional the index of the well you want to remove and return Returns ------- Well Well with selected index from WellGroup """ return self.wells.pop(index) def insert(self, i, well): """ Insert a well at a given position. Parameters ---------- i : int index to insert the well at well : Well insert this well at the index Returns ------- WellGroup WellGroup with inserted wells Raises ------ TypeError index or well defined does not have right input type """ if not isinstance(i, int): raise TypeError("Input given is not of type 'Int'") if not isinstance(well, Well): raise TypeError("Input given is not of type 'Well'") if i >= len(self.wells): return self.wells.append(well) else: self.wells = self.wells[:i] + [well] + self.wells[i:] return self.wells def __setitem__(self, key, item): """ Set a specific Well in a WellGroup. Parameters ---------- key : int Position in a WellGroup in robotized form. item: Well Well or WellGroup to be added Raises ------ TypeError Item specified is not of type `Well` """ if not isinstance(item, Well): raise TypeError("Input given is not of type 'Well'.") self.wells[key] = item def __getitem__(self, key): """ Return a specific Well from a WellGroup. Parameters ---------- key : int Position in a WellGroup in robotized form. Returns ------- Well Specified well from given key """ return self.wells[key] def __len__(self): """ Return the number of Wells in a WellGroup. """ return len(self.wells) def __repr__(self): """ Return a string representation of a WellGroup. """ return "WellGroup(%s)" % (str(self.wells)) def __add__(self, other): """ Append a Well or Wells from another WellGroup to this WellGroup. Parameters ---------- other : Well, WellGroup. Returns ------- WellGroup WellGroup with appended wells Raises ------ TypeError Input given is not of type Well or WellGroup """ if not isinstance(other, (Well, WellGroup)): raise TypeError("You can only add a Well or WellGroups " "together.") if isinstance(other, Well): return WellGroup(self.wells + [other]) else: return WellGroup(self.wells + other.wells) # pylint: disable=redefined-builtin class Container(object): """ A reference to a specific physical container (e.g. a tube or 96-well microplate). Every Container has an associated ContainerType, which defines the well count and arrangement, amongst other properties. There are several methods on Container which present a convenient interface for defining subsets of wells on which to operate. These methods return a WellGroup. Containers are usually declared using the Protocol.ref method. Parameters ---------- id : str, optional Alphanumerical identifier for a Container. container_type : ContainerType ContainerType associated with a Container. name : str, optional name of the container/ref being created. storage : str, optional name of the storage condition. cover : str, optional name of the cover on the container. Raises ------ AttributeError Invalid cover-type given """ def __init__(self, id, container_type, name=None, storage=None, cover=None): self.name = name self.id = id self.container_type = container_type self.storage = storage self.cover = cover self._wells = [Well(self, idx) for idx in range(container_type.well_count)] if self.cover and not (self.is_covered() or self.is_sealed()): raise AttributeError(f"{cover} is not a valid seal or cover type.") def well(self, i): """ Return a Well object representing the well at the index specified of this Container. Parameters ---------- i : int, str Well reference in the form of an integer (ex: 0) or human-readable string (ex: "A1"). Returns ------- Well Well for given reference Raises ------ TypeError index given is not of the right type """ if not isinstance(i, (int, str)): raise TypeError("Well reference given is not of type 'int' or " "'str'.") return self._wells[self.robotize(i)] def well_from_coordinates(self, row, column): """ Gets the well at 0-indexed position (row, column) within the container. The origin is in the top left corner. Parameters ---------- row : int The 0-indexed row index of the well to be fetched column : int The 0-indexed column index of the well to be fetched Returns ------- Well The well at position (row, column) """ return self.well( self.container_type.well_from_coordinates(row=row, column=column) ) def tube(self): """ Checks if container is tube and returns a Well representing the zeroth well. Returns ------- Well Zeroth well of tube Raises ------- AttributeError If container is not tube """ if self.container_type.is_tube: return self.well(0) else: raise AttributeError( f"{self} is a {self.container_type.shortname} " f"and is not a tube" ) def wells(self, *args): """ Return a WellGroup containing references to wells corresponding to the index or indices given. Parameters ---------- args : str, int, list Reference or list of references to a well index either as an integer or a string. Returns ------- WellGroup Wells from specified references Raises ------ TypeError Well reference is not of a valid input type """ if isinstance(args[0], list): wells = args[0] else: wells = [args[0]] for a in args[1:]: if isinstance(a, list): wells.extend(a) else: wells.extend([a]) for w in wells: if not isinstance(w, (str, int, list)): raise TypeError( "Well reference given is not of type" " 'int', 'str' or 'list'." ) return WellGroup([self.well(w) for w in wells]) def robotize(self, well_ref): """ Return the integer representation of the well index given, based on the ContainerType of the Container. Uses the robotize function from the ContainerType class. Refer to `ContainerType.robotize()` for more information. """ if not isinstance(well_ref, (str, int, Well, list)): raise TypeError( "Well reference given is not of type 'str' " "'int', 'Well' or 'list'." ) return self.container_type.robotize(well_ref) def humanize(self, well_ref): """ Return the human readable representation of the integer well index given based on the ContainerType of the Container. Uses the humanize function from the ContainerType class. Refer to `ContainerType.humanize()` for more information. """ if not isinstance(well_ref, (int, str, list)): raise TypeError( "Well reference given is not of type 'int'," "'str' or 'list'." ) return self.container_type.humanize(well_ref) def decompose(self, well_ref): """ Return a tuple representing the column and row number of the well index given based on the ContainerType of the Container. Uses the decompose function from the ContainerType class. Refer to `ContainerType.decompose()` for more information. """ if not isinstance(well_ref, (int, str, Well)): raise TypeError( "Well reference given is not of type 'int', " "'str' or Well." ) return self.container_type.decompose(well_ref) def all_wells(self, columnwise=False): """ Return a WellGroup representing all Wells belonging to this Container. Parameters ---------- columnwise : bool, optional returns the WellGroup columnwise instead of rowwise (ordered by well index). Returns ------- WellGroup WellGroup of all Wells in Container """ if columnwise: num_cols = self.container_type.col_count num_rows = self.container_type.well_count // num_cols return WellGroup( [ self._wells[row * num_cols + col] for col in range(num_cols) for row in range(num_rows) ] ) else: return WellGroup(self._wells) def inner_wells(self, columnwise=False): """ Return a WellGroup of all wells on a plate excluding wells in the top and bottom rows and in the first and last columns. Parameters ---------- columnwise : bool, optional returns the WellGroup columnwise instead of rowwise (ordered by well index). Returns ------- WellGroup WellGroup of inner wells """ num_cols = self.container_type.col_count num_rows = self.container_type.row_count() inner_wells = [] if columnwise: for c in range(1, num_cols - 1): wells = [] for r in range(1, num_rows - 1): wells.append((r * num_cols) + c) inner_wells.extend(wells) else: well = num_cols for _ in range(1, num_rows - 1): inner_wells.extend(range(well + 1, well + (num_cols - 1))) well += num_cols inner_wells = [self._wells[x] for x in inner_wells] return WellGroup(inner_wells) def wells_from(self, start, num, columnwise=False): """ Return a WellGroup of Wells belonging to this Container starting from the index indicated (in integer or string form) and including the number of proceeding wells specified. Wells are counted from the starting well rowwise unless columnwise is True. Parameters ---------- start : Well or int or str Starting well specified as a Well object, a human-readable well index or an integer well index. num : int Number of wells to include in the Wellgroup. columnwise : bool, optional Specifies whether the wells included should be counted columnwise instead of the default rowwise. Returns ------- WellGroup WellGroup of selected wells Raises ------ TypeError Incorrect input types, e.g. `num` has to be of type int """ if not isinstance(start, (str, int, Well)): raise TypeError( "Well reference given is not of type 'str'," "'int', or 'Well'." ) if not isinstance(num, int): raise TypeError("Number of wells given is not of type 'int'.") start = self.robotize(start) if columnwise: row, col = self.decompose(start) num_rows = self.container_type.row_count() start = col * num_rows + row return WellGroup(self.all_wells(columnwise).wells[start : start + num]) def is_sealed(self): """ Check if Container is sealed. """ return self.cover in SEAL_TYPES def is_covered(self): """ Check if Container is covered. """ return self.cover in COVER_TYPES def quadrant(self, quad): """ Return a WellGroup of Wells corresponding to the selected quadrant of this Container. Parameters ---------- quad : int or str Specifies the quadrant number of the well (ex. 2) Returns ------- WellGroup WellGroup of wells for the specified quadrant Raises ------ ValueError Invalid quadrant specified for this Container type """ # TODO(Define what each quadrant number corresponds toL) if isinstance(quad, str): quad = quad.lower() if quad == "a1": quad = 0 elif quad == "a2": quad = 1 elif quad == "b1": quad = 2 elif quad == "b2": quad = 3 else: raise ValueError("Invalid quadrant index.") # n_wells: n_cols allowed_layouts = {96: 12, 384: 24} n_wells = self.container_type.well_count if ( n_wells not in allowed_layouts or self.container_type.col_count != allowed_layouts[n_wells] ): raise ValueError( "Quadrant is only defined for standard 96 and " "384-well plates" ) if n_wells == 96: if quad == 0: return WellGroup(self._wells) else: raise ValueError( "0 or 'A1' is the only valid quadrant for a 96-well " "plate." ) if quad not in [0, 1, 2, 3]: raise ValueError( f"Invalid quadrant {quad} for plate type " f"{str(self.name)}" ) start_well = [0, 1, 24, 25] wells = [] for row_offset in range(start_well[quad], 384, 48): for col_offset in range(0, 24, 2): wells.append(row_offset + col_offset) return self.wells(wells) def set_storage(self, storage): """ Set the storage condition of a container, will overwrite an existing storage condition, will remove discard True. Parameters ---------- storage : str Storage condition. Returns ------- Container Container with modified storage condition Raises ---------- TypeError If storage condition not of type str. """ if not isinstance(storage, str): raise TypeError( f"Storage condition given ({storage}) is not of " f"type str. {type(storage)}." ) self.storage = storage return self def discard(self): """ Set the storage condition of a container to None and container to be discarded if ref in protocol. Example ---------- .. code-block:: python p = Protocol() container = p.ref("new_container", cont_type="96-pcr", storage="cold_20") p.incubate(c, "warm_37", "30:minute") container.discard() Autoprotocol generated: .. code-block:: json "refs": { "new_container": { "new": "96-pcr", "discard": true } } """ self.storage = None return self # pylint: disable=too-many-locals def wells_from_shape(self, origin, shape): """ Gets a WellGroup that originates from the `origin` and is distributed across the container in `shape`. This group has a Well for each index in `range(shape["rows"] * shape["columns"])`. In cases where the container dimensions are smaller than the shape format's dimensions the returned WellGroup will reference some wells multiple times. This is analogous to an SBS96-formatted liquid handler acting with multiple tips in each well of an SBS24-formatted plate. Parameters ---------- origin : int or str The index of the top left corner origin of the shape shape : dict See Also Instruction.builders.shape Returns ------- WellGroup The group of wells distributed in `shape` from the `origin` Raises ------ ValueError if the shape exceeds the extents of the container """ from .instruction import Instruction shape = Instruction.builders.shape(**shape) origin = self.well(origin) # unpacking container and shape format properties container_rows = self.container_type.row_count() container_cols = self.container_type.col_count format_rows = SBS_FORMAT_SHAPES[shape["format"]]["rows"] format_cols = SBS_FORMAT_SHAPES[shape["format"]]["columns"] # getting the row and column values for the origin origin_row, origin_col = self.decompose(origin) # ratios of container shape to format shape row_scaling = container_rows / format_rows col_scaling = container_cols / format_cols # the 0-indexed coordinates of all wells in origin plate to be included well_rows = [] well_cols = [] for idx in range(shape["rows"]): well_row = int(origin_row + idx * row_scaling) well_rows.append(well_row) for idx in range(shape["columns"]): well_col = int(origin_col + idx * col_scaling) well_cols.append(well_col) # coordinates of the tail (bottom right well) should not exceed bounds tail_row = well_rows[-1] tail_col = well_cols[-1] # tail_row and tail_col are 0-indexed based # container_rows and container_cols are 1-indexed based if tail_row + 1 > container_rows or tail_col + 1 > container_cols: raise ValueError( f"origin: {origin} with shape: {shape} exceeds the bounds of " f"container: {self}" ) return WellGroup( [self.well_from_coordinates(x, y) for x in well_rows for y in well_cols] ) def __repr__(self): """ Return a string representation of a Container using the specified name. (ex. Container('my_plate')) """ return ( f"Container({str(self.name)}" f"{", cover=" + self.cover if self.cover else ""})" )
""" Container, Well, WellGroup objects and associated functions :copyright: 2020 by The Autoprotocol Development Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details """ import json import warnings from .constants import SBS_FORMAT_SHAPES from .unit import Unit SEAL_TYPES = ["ultra-clear", "foil", "breathable"] COVER_TYPES = ["standard", "low_evaporation", "universal"] class Well(object): """ A Well object describes a single location within a container. Do not construct a Well directly -- retrieve it from the related Container object. Parameters ---------- container : Container The Container this well belongs to. index : int The index of this well within the container. """ def __init__(self, container, index): self.container = container self.index = index self.volume = None self.name = None self.properties = {} @staticmethod def validate_properties(properties): if not isinstance(properties, dict): raise TypeError( f"Aliquot properties {properties} are of type " f"{type(properties)}, they should be a `dict`." ) for key, value in properties.items(): if not isinstance(key, str): raise TypeError( f"Aliquot property {key} : {value} has a key of type " f"{type(key)}, it should be a 'str'." ) try: json.dumps(value) except TypeError: raise TypeError( f"Aliquot property {key} : {value} has a value of type " f"{type(value)}, that isn't JSON serializable." ) def set_properties(self, properties): """ Set properties for a Well. Existing property dictionary will be completely overwritten with the new dictionary. Parameters ---------- properties : dict Custom properties for a Well in dictionary form. Returns ------- Well Well with modified properties """ self.validate_properties(properties) self.properties = properties.copy() return self def add_properties(self, properties): """ Add properties to the properties attribute of a Well. If any property with the same key already exists for the Well then: - if both old and new properties are lists then append the new property - otherwise overwrite the old property with the new one Parameters ---------- properties : dict Dictionary of properties to add to a Well. Returns ------- Well Well with modified properties """ self.validate_properties(properties) for key, value in properties.items(): if key in self.properties: values_are_lists = all( isinstance(_, list) for _ in [value, self.properties[key]] ) if values_are_lists: self.properties[key].extend(value) else: message = f"Overwriting existing property {key} for {self}" warnings.warn(message=message) self.properties[key] = value else: self.properties[key] = value return self def set_volume(self, vol): """ Set the theoretical volume of liquid in a Well. Parameters ---------- vol : str, Unit Theoretical volume to indicate for a Well. Returns ------- Well Well with modified volume Raises ------ TypeError Incorrect input-type given ValueError Volume set exceeds maximum well volume """ if not isinstance(vol, str) and not isinstance(vol, Unit): raise TypeError( f"Volume {vol} is of type {type(vol)}, it should be either " f"'str' or 'Unit'." ) v = Unit(vol) max_vol = self.container.container_type.true_max_vol_ul if v > max_vol: raise ValueError( f"Theoretical volume {v} to be set exceeds maximum well " f"volume {max_vol}." ) self.volume = v return self def set_name(self, name): """ Set a name for this well for it to be included in a protocol's "outs" section Parameters ---------- name : str Well name. Returns ------- Well Well with modified name """ self.name = name return self def humanize(self): """ Return the human readable representation of the integer well index given based on the ContainerType of the Well. Uses the humanize function from the ContainerType class. Refer to `ContainerType.humanize()` for more information. Returns ------- str Index of well in Container (in human readable form) """ return self.container.humanize(self.index) def available_volume(self): """ Returns the available volume of a Well. This is calculated as nominal volume - container_type dead volume Returns ------- Unit(volume) Volume in well Raises ------ RuntimeError Well has no volume """ if self.volume is None: raise RuntimeError(f"well {self} has no volume") return self.volume - self.container.container_type.dead_volume_ul def __repr__(self): """ Return a string representation of a Well. """ return f"Well({str(self.container)}, {str(self.index)}, " f"{str(self.volume)})" class WellGroup(object): """ A logical grouping of Wells. Wells in a WellGroup do not necessarily need to be in the same container. Parameters ---------- wells : list List of Well objects contained in this WellGroup. Raises ------ TypeError Wells is not of the right input type """ def __init__(self, wells): if isinstance(wells, Well): wells = [wells] elif isinstance(wells, WellGroup): wells = wells.wells elif isinstance(wells, list): if not all(isinstance(well, Well) for well in wells): raise TypeError("All elements in list must be wells") else: raise TypeError("Wells must be Well, list of wells, WellGroup.") self.wells = wells self.name = None def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ else: return False def set_properties(self, properties): """ Set the same properties for each Well in a WellGroup. Parameters ---------- properties : dict Dictionary of properties to set on Well(s). Returns ------- WellGroup WellGroup with modified properties """ for w in self.wells: w.set_properties(properties) return self def add_properties(self, properties): """ Add the same properties for each Well in a WellGroup. Parameters ---------- properties : dict Dictionary of properties to set on Well(s). Returns ------- WellGroup WellGroup with modified properties """ for w in self.wells: w.add_properties(properties) return self def set_volume(self, vol): """ Set the volume of every well in the group to vol. Parameters ---------- vol : Unit, str Theoretical volume of each well in the WellGroup. Returns ------- WellGroup WellGroup with modified volume """ for w in self.wells: w.set_volume(vol) return self def indices(self): """ Return the indices of the wells in the group in human-readable form, given that all of the wells belong to the same container. Returns ------- list(str) List of humanized indices from this WellGroup """ indices = [] for w in self.wells: assert w.container == self.wells[0].container, ( "All wells in WellGroup must belong to the same container to " "get their indices." ) indices.append(w.humanize()) return indices def append(self, other): """ Append another well to this WellGroup. Parameters ---------- other : Well Well to append to this WellGroup. Returns ------- WellGroup WellGroup with appended well Raises ------ TypeError other is not of type Well """ if not isinstance(other, Well): raise TypeError("Input given is not of type 'Well'.") else: return self.wells.append(other) def extend(self, other): """ Extend this WellGroup with another WellGroup. Parameters ---------- other : WellGroup or list of Wells WellGroup to extend this WellGroup. Returns ------- WellGroup WellGroup extended with specified WellGroup Raises ------ TypeError Input WellGroup is not of the right type """ if not isinstance(other, (WellGroup, list)): raise TypeError("Input given is not of type 'WellGroup' or " "'list'.") else: if not all(isinstance(well, Well) for well in other): raise TypeError("Input given is not of type 'Well'.") return self.wells.extend(WellGroup(other).wells) def set_group_name(self, name): """ Assigns a name to a WellGroup. Parameters ---------- name: str WellGroup name Returns ------- str Name of wellgroup """ self.name = name return self def wells_with(self, prop, val=None): """ Returns a wellgroup of wells with the specified property and value Parameters ---------- prop: str the property you are searching for val: str, optional the value assigned to the property Returns ------- WellGroup WellGroup with modified properties Raises ------ TypeError property or value defined does not have right input type """ if not isinstance(prop, str): raise TypeError(f"property is not a string: {prop!r}") if val is not None: return WellGroup( [ w for w in self.wells if prop in w.properties and w.properties[prop] is val ] ) else: return WellGroup([w for w in self.wells if prop in w.properties]) def pop(self, index=-1): """ Removes and returns the last well in the wellgroup, unless an index is specified. If index is specified, the well at that index is removed from the wellgroup and returned. Parameters ---------- index: int, optional the index of the well you want to remove and return Returns ------- Well Well with selected index from WellGroup """ return self.wells.pop(index) def insert(self, i, well): """ Insert a well at a given position. Parameters ---------- i : int index to insert the well at well : Well insert this well at the index Returns ------- WellGroup WellGroup with inserted wells Raises ------ TypeError index or well defined does not have right input type """ if not isinstance(i, int): raise TypeError("Input given is not of type 'Int'") if not isinstance(well, Well): raise TypeError("Input given is not of type 'Well'") if i >= len(self.wells): return self.wells.append(well) else: self.wells = self.wells[:i] + [well] + self.wells[i:] return self.wells def __setitem__(self, key, item): """ Set a specific Well in a WellGroup. Parameters ---------- key : int Position in a WellGroup in robotized form. item: Well Well or WellGroup to be added Raises ------ TypeError Item specified is not of type `Well` """ if not isinstance(item, Well): raise TypeError("Input given is not of type 'Well'.") self.wells[key] = item def __getitem__(self, key): """ Return a specific Well from a WellGroup. Parameters ---------- key : int Position in a WellGroup in robotized form. Returns ------- Well Specified well from given key """ return self.wells[key] def __len__(self): """ Return the number of Wells in a WellGroup. """ return len(self.wells) def __repr__(self): """ Return a string representation of a WellGroup. """ return "WellGroup(%s)" % (str(self.wells)) def __add__(self, other): """ Append a Well or Wells from another WellGroup to this WellGroup. Parameters ---------- other : Well, WellGroup. Returns ------- WellGroup WellGroup with appended wells Raises ------ TypeError Input given is not of type Well or WellGroup """ if not isinstance(other, (Well, WellGroup)): raise TypeError("You can only add a Well or WellGroups " "together.") if isinstance(other, Well): return WellGroup(self.wells + [other]) else: return WellGroup(self.wells + other.wells) # pylint: disable=redefined-builtin class Container(object): """ A reference to a specific physical container (e.g. a tube or 96-well microplate). Every Container has an associated ContainerType, which defines the well count and arrangement, amongst other properties. There are several methods on Container which present a convenient interface for defining subsets of wells on which to operate. These methods return a WellGroup. Containers are usually declared using the Protocol.ref method. Parameters ---------- id : str, optional Alphanumerical identifier for a Container. container_type : ContainerType ContainerType associated with a Container. name : str, optional name of the container/ref being created. storage : str, optional name of the storage condition. cover : str, optional name of the cover on the container. Raises ------ AttributeError Invalid cover-type given """ def __init__(self, id, container_type, name=None, storage=None, cover=None): self.name = name self.id = id self.container_type = container_type self.storage = storage self.cover = cover self._wells = [Well(self, idx) for idx in range(container_type.well_count)] if self.cover and not (self.is_covered() or self.is_sealed()): raise AttributeError(f"{cover} is not a valid seal or cover type.") def well(self, i): """ Return a Well object representing the well at the index specified of this Container. Parameters ---------- i : int, str Well reference in the form of an integer (ex: 0) or human-readable string (ex: "A1"). Returns ------- Well Well for given reference Raises ------ TypeError index given is not of the right type """ if not isinstance(i, (int, str)): raise TypeError("Well reference given is not of type 'int' or " "'str'.") return self._wells[self.robotize(i)] def well_from_coordinates(self, row, column): """ Gets the well at 0-indexed position (row, column) within the container. The origin is in the top left corner. Parameters ---------- row : int The 0-indexed row index of the well to be fetched column : int The 0-indexed column index of the well to be fetched Returns ------- Well The well at position (row, column) """ return self.well( self.container_type.well_from_coordinates(row=row, column=column) ) def tube(self): """ Checks if container is tube and returns a Well representing the zeroth well. Returns ------- Well Zeroth well of tube Raises ------- AttributeError If container is not tube """ if self.container_type.is_tube: return self.well(0) else: raise AttributeError( f"{self} is a {self.container_type.shortname} " f"and is not a tube" ) def wells(self, *args): """ Return a WellGroup containing references to wells corresponding to the index or indices given. Parameters ---------- args : str, int, list Reference or list of references to a well index either as an integer or a string. Returns ------- WellGroup Wells from specified references Raises ------ TypeError Well reference is not of a valid input type """ if isinstance(args[0], list): wells = args[0] else: wells = [args[0]] for a in args[1:]: if isinstance(a, list): wells.extend(a) else: wells.extend([a]) for w in wells: if not isinstance(w, (str, int, list)): raise TypeError( "Well reference given is not of type" " 'int', 'str' or 'list'." ) return WellGroup([self.well(w) for w in wells]) def robotize(self, well_ref): """ Return the integer representation of the well index given, based on the ContainerType of the Container. Uses the robotize function from the ContainerType class. Refer to `ContainerType.robotize()` for more information. """ if not isinstance(well_ref, (str, int, Well, list)): raise TypeError( "Well reference given is not of type 'str' " "'int', 'Well' or 'list'." ) return self.container_type.robotize(well_ref) def humanize(self, well_ref): """ Return the human readable representation of the integer well index given based on the ContainerType of the Container. Uses the humanize function from the ContainerType class. Refer to `ContainerType.humanize()` for more information. """ if not isinstance(well_ref, (int, str, list)): raise TypeError( "Well reference given is not of type 'int'," "'str' or 'list'." ) return self.container_type.humanize(well_ref) def decompose(self, well_ref): """ Return a tuple representing the column and row number of the well index given based on the ContainerType of the Container. Uses the decompose function from the ContainerType class. Refer to `ContainerType.decompose()` for more information. """ if not isinstance(well_ref, (int, str, Well)): raise TypeError( "Well reference given is not of type 'int', " "'str' or Well." ) return self.container_type.decompose(well_ref) def all_wells(self, columnwise=False): """ Return a WellGroup representing all Wells belonging to this Container. Parameters ---------- columnwise : bool, optional returns the WellGroup columnwise instead of rowwise (ordered by well index). Returns ------- WellGroup WellGroup of all Wells in Container """ if columnwise: num_cols = self.container_type.col_count num_rows = self.container_type.well_count // num_cols return WellGroup( [ self._wells[row * num_cols + col] for col in range(num_cols) for row in range(num_rows) ] ) else: return WellGroup(self._wells) def inner_wells(self, columnwise=False): """ Return a WellGroup of all wells on a plate excluding wells in the top and bottom rows and in the first and last columns. Parameters ---------- columnwise : bool, optional returns the WellGroup columnwise instead of rowwise (ordered by well index). Returns ------- WellGroup WellGroup of inner wells """ num_cols = self.container_type.col_count num_rows = self.container_type.row_count() inner_wells = [] if columnwise: for c in range(1, num_cols - 1): wells = [] for r in range(1, num_rows - 1): wells.append((r * num_cols) + c) inner_wells.extend(wells) else: well = num_cols for _ in range(1, num_rows - 1): inner_wells.extend(range(well + 1, well + (num_cols - 1))) well += num_cols inner_wells = [self._wells[x] for x in inner_wells] return WellGroup(inner_wells) def wells_from(self, start, num, columnwise=False): """ Return a WellGroup of Wells belonging to this Container starting from the index indicated (in integer or string form) and including the number of proceeding wells specified. Wells are counted from the starting well rowwise unless columnwise is True. Parameters ---------- start : Well or int or str Starting well specified as a Well object, a human-readable well index or an integer well index. num : int Number of wells to include in the Wellgroup. columnwise : bool, optional Specifies whether the wells included should be counted columnwise instead of the default rowwise. Returns ------- WellGroup WellGroup of selected wells Raises ------ TypeError Incorrect input types, e.g. `num` has to be of type int """ if not isinstance(start, (str, int, Well)): raise TypeError( "Well reference given is not of type 'str'," "'int', or 'Well'." ) if not isinstance(num, int): raise TypeError("Number of wells given is not of type 'int'.") start = self.robotize(start) if columnwise: row, col = self.decompose(start) num_rows = self.container_type.row_count() start = col * num_rows + row return WellGroup(self.all_wells(columnwise).wells[start : start + num]) def is_sealed(self): """ Check if Container is sealed. """ return self.cover in SEAL_TYPES def is_covered(self): """ Check if Container is covered. """ return self.cover in COVER_TYPES def quadrant(self, quad): """ Return a WellGroup of Wells corresponding to the selected quadrant of this Container. Parameters ---------- quad : int or str Specifies the quadrant number of the well (ex. 2) Returns ------- WellGroup WellGroup of wells for the specified quadrant Raises ------ ValueError Invalid quadrant specified for this Container type """ # TODO(Define what each quadrant number corresponds toL) if isinstance(quad, str): quad = quad.lower() if quad == "a1": quad = 0 elif quad == "a2": quad = 1 elif quad == "b1": quad = 2 elif quad == "b2": quad = 3 else: raise ValueError("Invalid quadrant index.") # n_wells: n_cols allowed_layouts = {96: 12, 384: 24} n_wells = self.container_type.well_count if ( n_wells not in allowed_layouts or self.container_type.col_count != allowed_layouts[n_wells] ): raise ValueError( "Quadrant is only defined for standard 96 and " "384-well plates" ) if n_wells == 96: if quad == 0: return WellGroup(self._wells) else: raise ValueError( "0 or 'A1' is the only valid quadrant for a 96-well " "plate." ) if quad not in [0, 1, 2, 3]: raise ValueError( f"Invalid quadrant {quad} for plate type " f"{str(self.name)}" ) start_well = [0, 1, 24, 25] wells = [] for row_offset in range(start_well[quad], 384, 48): for col_offset in range(0, 24, 2): wells.append(row_offset + col_offset) return self.wells(wells) def set_storage(self, storage): """ Set the storage condition of a container, will overwrite an existing storage condition, will remove discard True. Parameters ---------- storage : str Storage condition. Returns ------- Container Container with modified storage condition Raises ---------- TypeError If storage condition not of type str. """ if not isinstance(storage, str): raise TypeError( f"Storage condition given ({storage}) is not of " f"type str. {type(storage)}." ) self.storage = storage return self def discard(self): """ Set the storage condition of a container to None and container to be discarded if ref in protocol. Example ---------- .. code-block:: python p = Protocol() container = p.ref("new_container", cont_type="96-pcr", storage="cold_20") p.incubate(c, "warm_37", "30:minute") container.discard() Autoprotocol generated: .. code-block:: json "refs": { "new_container": { "new": "96-pcr", "discard": true } } """ self.storage = None return self # pylint: disable=too-many-locals def wells_from_shape(self, origin, shape): """ Gets a WellGroup that originates from the `origin` and is distributed across the container in `shape`. This group has a Well for each index in `range(shape["rows"] * shape["columns"])`. In cases where the container dimensions are smaller than the shape format's dimensions the returned WellGroup will reference some wells multiple times. This is analogous to an SBS96-formatted liquid handler acting with multiple tips in each well of an SBS24-formatted plate. Parameters ---------- origin : int or str The index of the top left corner origin of the shape shape : dict See Also Instruction.builders.shape Returns ------- WellGroup The group of wells distributed in `shape` from the `origin` Raises ------ ValueError if the shape exceeds the extents of the container """ from .instruction import Instruction shape = Instruction.builders.shape(**shape) origin = self.well(origin) # unpacking container and shape format properties container_rows = self.container_type.row_count() container_cols = self.container_type.col_count format_rows = SBS_FORMAT_SHAPES[shape["format"]]["rows"] format_cols = SBS_FORMAT_SHAPES[shape["format"]]["columns"] # getting the row and column values for the origin origin_row, origin_col = self.decompose(origin) # ratios of container shape to format shape row_scaling = container_rows / format_rows col_scaling = container_cols / format_cols # the 0-indexed coordinates of all wells in origin plate to be included well_rows = [] well_cols = [] for idx in range(shape["rows"]): well_row = int(origin_row + idx * row_scaling) well_rows.append(well_row) for idx in range(shape["columns"]): well_col = int(origin_col + idx * col_scaling) well_cols.append(well_col) # coordinates of the tail (bottom right well) should not exceed bounds tail_row = well_rows[-1] tail_col = well_cols[-1] # tail_row and tail_col are 0-indexed based # container_rows and container_cols are 1-indexed based if tail_row + 1 > container_rows or tail_col + 1 > container_cols: raise ValueError( f"origin: {origin} with shape: {shape} exceeds the bounds of " f"container: {self}" ) return WellGroup( [self.well_from_coordinates(x, y) for x in well_rows for y in well_cols] ) def __repr__(self): """ Return a string representation of a Container using the specified name. (ex. Container('my_plate')) """ return ( f"Container({str(self.name)}" f"{', cover=' + self.cover if self.cover else ''})" )
import asyncio import dataclasses import logging from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple, Set, Any from blspy import PrivateKey, G1Element from chia.consensus.block_rewards import calculate_base_farmer_reward from chia.pools.pool_wallet import PoolWallet from chia.pools.pool_wallet_info import create_pool_state, FARMING_TO_POOL, PoolWalletInfo, PoolState from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.server.outbound_message import NodeType, make_msg from chia.simulator.simulator_protocol import FarmNewBlockProtocol from chia.types.announcement import Announcement from chia.types.blockchain_format.coin import Coin from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.spend_bundle import SpendBundle from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash from chia.util.byte_types import hexstr_to_bytes from chia.util.ints import uint32, uint64, uint8 from chia.util.keychain import KeyringIsLocked, bytes_to_mnemonic, generate_mnemonic from chia.util.path import path_from_root from chia.util.ws_message import WsRpcMessage, create_payload_dict from chia.wallet.cat_wallet.cat_constants import DEFAULT_CATS from chia.wallet.cat_wallet.cat_wallet import CATWallet from chia.wallet.derive_keys import master_sk_to_singleton_owner_sk, master_sk_to_wallet_sk_unhardened, MAX_POOL_WALLETS from chia.wallet.rl_wallet.rl_wallet import RLWallet from chia.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk from chia.wallet.did_wallet.did_wallet import DIDWallet from chia.wallet.trade_record import TradeRecord from chia.wallet.trading.offer import Offer from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.transaction_type import TransactionType from chia.wallet.util.wallet_types import AmountWithPuzzlehash, WalletType from chia.wallet.wallet_info import WalletInfo from chia.wallet.wallet_node import WalletNode from chia.util.config import load_config from chia.consensus.coinbase import create_puzzlehash_for_pk # Timeout for response from wallet/full node for sending a transaction TIMEOUT = 30 log = logging.getLogger(__name__) class WalletRpcApi: def __init__(self, wallet_node: WalletNode): assert wallet_node is not None self.service = wallet_node self.service_name = "chia_wallet" self.balance_cache: Dict[int, Any] = {} def get_routes(self) -> Dict[str, Callable]: return { # Key management "/log_in": self.log_in, "/get_logged_in_fingerprint": self.get_logged_in_fingerprint, "/get_public_keys": self.get_public_keys, "/get_private_key": self.get_private_key, "/generate_mnemonic": self.generate_mnemonic, "/add_key": self.add_key, "/delete_key": self.delete_key, "/check_delete_key": self.check_delete_key, "/delete_all_keys": self.delete_all_keys, # Wallet node "/get_sync_status": self.get_sync_status, "/get_height_info": self.get_height_info, "/push_tx": self.push_tx, "/farm_block": self.farm_block, # Only when node simulator is running # this function is just here for backwards-compatibility. It will probably # be removed in the future "/get_initial_freeze_period": self.get_initial_freeze_period, "/get_network_info": self.get_network_info, # Wallet management "/get_wallets": self.get_wallets, "/create_new_wallet": self.create_new_wallet, # Wallet "/get_wallet_balance": self.get_wallet_balance, "/get_transaction": self.get_transaction, "/get_transactions": self.get_transactions, "/get_transaction_count": self.get_transaction_count, "/get_next_address": self.get_next_address, "/send_transaction": self.send_transaction, "/send_transaction_multi": self.send_transaction_multi, "/get_farmed_amount": self.get_farmed_amount, "/create_signed_transaction": self.create_signed_transaction, "/delete_unconfirmed_transactions": self.delete_unconfirmed_transactions, # CATs and trading "/cat_set_name": self.cat_set_name, "/cat_asset_id_to_name": self.cat_asset_id_to_name, "/cat_get_name": self.cat_get_name, "/cat_spend": self.cat_spend, "/cat_get_asset_id": self.cat_get_asset_id, "/create_offer_for_ids": self.create_offer_for_ids, "/get_offer_summary": self.get_offer_summary, "/check_offer_validity": self.check_offer_validity, "/take_offer": self.take_offer, "/get_offer": self.get_offer, "/get_all_offers": self.get_all_offers, "/get_offers_count": self.get_offers_count, "/cancel_offer": self.cancel_offer, "/get_cat_list": self.get_cat_list, # DID Wallet "/did_update_recovery_ids": self.did_update_recovery_ids, "/did_get_pubkey": self.did_get_pubkey, "/did_get_did": self.did_get_did, "/did_recovery_spend": self.did_recovery_spend, "/did_get_recovery_list": self.did_get_recovery_list, "/did_create_attest": self.did_create_attest, "/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery, "/did_create_backup_file": self.did_create_backup_file, # RL wallet "/rl_set_user_info": self.rl_set_user_info, "/send_clawback_transaction:": self.send_clawback_transaction, "/add_rate_limited_funds:": self.add_rate_limited_funds, # Pool Wallet "/pw_join_pool": self.pw_join_pool, "/pw_self_pool": self.pw_self_pool, "/pw_absorb_rewards": self.pw_absorb_rewards, "/pw_status": self.pw_status, } async def _state_changed(self, *args) -> List[WsRpcMessage]: """ Called by the WalletNode or WalletStateManager when something has changed in the wallet. This gives us an opportunity to send notifications to all connected clients via WebSocket. """ payloads = [] if args[0] is not None and args[0] == "sync_changed": # Metrics is the only current consumer for this event payloads.append(create_payload_dict(args[0], {}, self.service_name, "metrics")) if len(args) < 2: return payloads data = { "state": args[0], } if args[1] is not None: data["wallet_id"] = args[1] if args[2] is not None: data["additional_data"] = args[2] payloads.append(create_payload_dict("state_changed", data, self.service_name, "wallet_ui")) if args[0] == "coin_added": payloads.append(create_payload_dict(args[0], data, self.service_name, "metrics")) return payloads async def _stop_wallet(self): """ Stops a currently running wallet/key, which allows starting the wallet with a new key. Each key has it's own wallet database. """ if self.service is not None: self.service._close() peers_close_task: Optional[asyncio.Task] = await self.service._await_closed() if peers_close_task is not None: await peers_close_task async def _convert_tx_puzzle_hash(self, tx: TransactionRecord) -> TransactionRecord: assert self.service.wallet_state_manager is not None return dataclasses.replace( tx, to_puzzle_hash=( await self.service.wallet_state_manager.convert_puzzle_hash(tx.wallet_id, tx.to_puzzle_hash) ), ) ########################################################################################## # Key management ########################################################################################## async def log_in(self, request): """ Logs in the wallet with a specific key. """ fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerprint": fingerprint} await self._stop_wallet() self.balance_cache = {} started = await self.service._start(fingerprint) if started is True: return {"fingerprint": fingerprint} return {"success": False, "error": "Unknown Error"} async def get_logged_in_fingerprint(self, request: Dict): return {"fingerprint": self.service.logged_in_fingerprint} async def get_public_keys(self, request: Dict): try: assert self.service.keychain_proxy is not None # An offering to the mypy gods fingerprints = [ sk.get_g1().get_fingerprint() for (sk, seed) in await self.service.keychain_proxy.get_all_private_keys() ] except KeyringIsLocked: return {"keyring_is_locked": True} except Exception: return {"public_key_fingerprints": []} else: return {"public_key_fingerprints": fingerprints} async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]: try: assert self.service.keychain_proxy is not None # An offering to the mypy gods all_keys = await self.service.keychain_proxy.get_all_private_keys() for sk, seed in all_keys: if sk.get_g1().get_fingerprint() == fingerprint: return sk, seed except Exception as e: log.error(f"Failed to get private key by fingerprint: {e}") return None, None async def get_private_key(self, request): fingerprint = request["fingerprint"] sk, seed = await self._get_private_key(fingerprint) if sk is not None: s = bytes_to_mnemonic(seed) if seed is not None else None return { "private_key": { "fingerprint": fingerprint, "sk": bytes(sk).hex(), "pk": bytes(sk.get_g1()).hex(), "farmer_pk": bytes(master_sk_to_farmer_sk(sk).get_g1()).hex(), "pool_pk": bytes(master_sk_to_pool_sk(sk).get_g1()).hex(), "seed": s, }, } return {"success": False, "private_key": {"fingerprint": fingerprint}} async def generate_mnemonic(self, request: Dict): return {"mnemonic": generate_mnemonic().split(" ")} async def add_key(self, request): if "mnemonic" not in request: raise ValueError("Mnemonic not in request") # Adding a key from 24 word mnemonic mnemonic = request["mnemonic"] passphrase = "" try: sk = await self.service.keychain_proxy.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } except Exception as e: return {"success": False, "error": str(e)} fingerprint = sk.get_g1().get_fingerprint() await self._stop_wallet() # Makes sure the new key is added to config properly started = False try: await self.service.keychain_proxy.check_keys(self.service.root_path) except Exception as e: log.error(f"Failed to check_keys after adding a new key: {e}") started = await self.service._start(fingerprint=fingerprint) if started is True: return {"fingerprint": fingerprint} raise ValueError("Failed to start") async def delete_key(self, request): await self._stop_wallet() fingerprint = request["fingerprint"] try: await self.service.keychain_proxy.delete_key_by_fingerprint(fingerprint) except Exception as e: log.error(f"Failed to delete key by fingerprint: {e}") return {"success": False, "error": str(e)} path = path_from_root( self.service.root_path, f"{self.service.config["database_path"]}-{fingerprint}", ) if path.exists(): path.unlink() return {} async def _check_key_used_for_rewards( self, new_root: Path, sk: PrivateKey, max_ph_to_search: int ) -> Tuple[bool, bool]: """Checks if the given key is used for either the farmer rewards or pool rewards returns a tuple of two booleans The first is true if the key is used as the Farmer rewards, otherwise false The second is true if the key is used as the Pool rewards, otherwise false Returns both false if the key cannot be found with the given fingerprint """ if sk is None: return False, False config: Dict = load_config(new_root, "config.yaml") farmer_target = config["farmer"].get("xch_target_address") pool_target = config["pool"].get("xch_target_address") found_farmer = False found_pool = False selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] for i in range(max_ph_to_search): if found_farmer and found_pool: break phs = [ encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix), encode_puzzle_hash( create_puzzlehash_for_pk(master_sk_to_wallet_sk_unhardened(sk, uint32(i)).get_g1()), prefix ), ] for ph in phs: if ph == farmer_target: found_farmer = True if ph == pool_target: found_pool = True return found_farmer, found_pool async def check_delete_key(self, request): """Check the key use prior to possible deletion checks whether key is used for either farm or pool rewards checks if any wallets have a non-zero balance """ used_for_farmer: bool = False used_for_pool: bool = False walletBalance: bool = False fingerprint = request["fingerprint"] sk, _ = await self._get_private_key(fingerprint) if sk is not None: used_for_farmer, used_for_pool = await self._check_key_used_for_rewards(self.service.root_path, sk, 100) if self.service.logged_in_fingerprint != fingerprint: await self._stop_wallet() await self.service._start(fingerprint=fingerprint) wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() for w in wallets: wallet = self.service.wallet_state_manager.wallets[w.id] unspent = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(w.id) balance = await wallet.get_confirmed_balance(unspent) pending_balance = await wallet.get_unconfirmed_balance(unspent) if (balance + pending_balance) > 0: walletBalance = True break return { "fingerprint": fingerprint, "used_for_farmer_rewards": used_for_farmer, "used_for_pool_rewards": used_for_pool, "wallet_balance": walletBalance, } async def delete_all_keys(self, request: Dict): await self._stop_wallet() try: assert self.service.keychain_proxy is not None # An offering to the mypy gods await self.service.keychain_proxy.delete_all_keys() except Exception as e: log.error(f"Failed to delete all keys: {e}") return {"success": False, "error": str(e)} path = path_from_root(self.service.root_path, self.service.config["database_path"]) if path.exists(): path.unlink() return {} ########################################################################################## # Wallet Node ########################################################################################## async def get_sync_status(self, request: Dict): assert self.service.wallet_state_manager is not None syncing = self.service.wallet_state_manager.sync_mode synced = await self.service.wallet_state_manager.synced() return {"synced": synced, "syncing": syncing, "genesis_initialized": True} async def get_height_info(self, request: Dict): assert self.service.wallet_state_manager is not None height = await self.service.wallet_state_manager.blockchain.get_finished_sync_up_to() return {"height": height} async def get_network_info(self, request: Dict): assert self.service.wallet_state_manager is not None network_name = self.service.config["selected_network"] address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"] return {"network_name": network_name, "network_prefix": address_prefix} async def push_tx(self, request: Dict): assert self.service.server is not None nodes = self.service.server.get_full_node_connections() if len(nodes) == 0: raise ValueError("Wallet is not currently connected to any full node peers") await self.service.push_tx(SpendBundle.from_bytes(hexstr_to_bytes(request["spend_bundle"]))) return {} async def farm_block(self, request): raw_puzzle_hash = decode_puzzle_hash(request["address"]) request = FarmNewBlockProtocol(raw_puzzle_hash) msg = make_msg(ProtocolMessageTypes.farm_new_block, request) await self.service.server.send_to_all([msg], NodeType.FULL_NODE) return {} ########################################################################################## # Wallet Management ########################################################################################## async def get_wallets(self, request: Dict): assert self.service.wallet_state_manager is not None wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() return {"wallets": wallets} async def create_new_wallet(self, request: Dict): assert self.service.wallet_state_manager is not None wallet_state_manager = self.service.wallet_state_manager if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") main_wallet = wallet_state_manager.main_wallet fee = uint64(request.get("fee", 0)) if request["wallet_type"] == "cat_wallet": # If not provided, the name will be autogenerated based on the tail hash. name = request.get("name", None) if request["mode"] == "new": async with self.service.wallet_state_manager.lock: cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet( wallet_state_manager, main_wallet, {"identifier": "genesis_by_id"}, uint64(request["amount"]), name, ) asset_id = cat_wallet.get_asset_id() self.service.wallet_state_manager.state_changed("wallet_created") return {"type": cat_wallet.type(), "asset_id": asset_id, "wallet_id": cat_wallet.id()} elif request["mode"] == "existing": async with self.service.wallet_state_manager.lock: cat_wallet = await CATWallet.create_wallet_for_cat( wallet_state_manager, main_wallet, request["asset_id"], name ) self.service.wallet_state_manager.state_changed("wallet_created") return {"type": cat_wallet.type(), "asset_id": request["asset_id"], "wallet_id": cat_wallet.id()} else: # undefined mode pass elif request["wallet_type"] == "rl_wallet": if request["rl_type"] == "admin": log.info("Create rl admin wallet") async with self.service.wallet_state_manager.lock: rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager) success = await rl_admin.admin_create_coin( uint64(int(request["interval"])), uint64(int(request["limit"])), request["pubkey"], uint64(int(request["amount"])), uint64(int(request["fee"])) if "fee" in request else uint64(0), ) assert rl_admin.rl_info.admin_pubkey is not None return { "success": success, "id": rl_admin.id(), "type": rl_admin.type(), "origin": rl_admin.rl_info.rl_origin, "pubkey": rl_admin.rl_info.admin_pubkey.hex(), } elif request["rl_type"] == "user": log.info("Create rl user wallet") async with self.service.wallet_state_manager.lock: rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager) assert rl_user.rl_info.user_pubkey is not None return { "id": rl_user.id(), "type": rl_user.type(), "pubkey": rl_user.rl_info.user_pubkey.hex(), } else: # undefined rl_type pass elif request["wallet_type"] == "did_wallet": if request["did_type"] == "new": backup_dids = [] num_needed = 0 for d in request["backup_dids"]: backup_dids.append(hexstr_to_bytes(d)) if len(backup_dids) > 0: num_needed = uint64(request["num_of_backup_ids_needed"]) async with self.service.wallet_state_manager.lock: did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet( wallet_state_manager, main_wallet, uint64(request["amount"]), backup_dids, uint64(num_needed), ) my_did = did_wallet.get_my_DID() return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), } elif request["did_type"] == "recovery": async with self.service.wallet_state_manager.lock: did_wallet = await DIDWallet.create_new_did_wallet_from_recovery( wallet_state_manager, main_wallet, request["filename"] ) assert did_wallet.did_info.temp_coin is not None assert did_wallet.did_info.temp_puzhash is not None assert did_wallet.did_info.temp_pubkey is not None my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() coin_list = did_wallet.did_info.temp_coin.as_list() newpuzhash = did_wallet.did_info.temp_puzhash pubkey = did_wallet.did_info.temp_pubkey return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), "coin_name": coin_name, "coin_list": coin_list, "newpuzhash": newpuzhash.hex(), "pubkey": pubkey.hex(), "backup_dids": did_wallet.did_info.backup_ids, "num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed, } else: # undefined did_type pass elif request["wallet_type"] == "pool_wallet": if request["mode"] == "new": owner_puzzle_hash: bytes32 = await self.service.wallet_state_manager.main_wallet.get_puzzle_hash(True) from chia.pools.pool_wallet_info import initial_pool_state_from_dict async with self.service.wallet_state_manager.lock: # We assign a pseudo unique id to each pool wallet, so that each one gets its own deterministic # owner and auth keys. The public keys will go on the blockchain, and the private keys can be found # using the root SK and trying each index from zero. The indexes are not fully unique though, # because the PoolWallet is not created until the tx gets confirmed on chain. Therefore if we # make multiple pool wallets at the same time, they will have the same ID. max_pwi = 1 for _, wallet in self.service.wallet_state_manager.wallets.items(): if wallet.type() == WalletType.POOLING_WALLET: pool_wallet_index = await wallet.get_pool_wallet_index() if pool_wallet_index > max_pwi: max_pwi = pool_wallet_index if max_pwi + 1 >= (MAX_POOL_WALLETS - 1): raise ValueError(f"Too many pool wallets ({max_pwi}), cannot create any more on this key.") owner_sk: PrivateKey = master_sk_to_singleton_owner_sk( self.service.wallet_state_manager.private_key, uint32(max_pwi + 1) ) owner_pk: G1Element = owner_sk.get_g1() initial_target_state = initial_pool_state_from_dict( request["initial_target_state"], owner_pk, owner_puzzle_hash ) assert initial_target_state is not None try: delayed_address = None if "p2_singleton_delayed_ph" in request: delayed_address = bytes32.from_hexstr(request["p2_singleton_delayed_ph"]) tr, p2_singleton_puzzle_hash, launcher_id = await PoolWallet.create_new_pool_wallet_transaction( wallet_state_manager, main_wallet, initial_target_state, fee, request.get("p2_singleton_delay_time", None), delayed_address, ) except Exception as e: raise ValueError(str(e)) return { "total_fee": fee * 2, "transaction": tr, "launcher_id": launcher_id.hex(), "p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex(), } elif request["mode"] == "recovery": raise ValueError("Need upgraded singleton for on-chain recovery") else: # undefined wallet_type pass return None ########################################################################################## # Wallet ########################################################################################## async def get_wallet_balance(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] # If syncing return the last available info or 0s syncing = self.service.wallet_state_manager.sync_mode if syncing: if wallet_id in self.balance_cache: wallet_balance = self.balance_cache[wallet_id] else: wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": 0, "unconfirmed_wallet_balance": 0, "spendable_balance": 0, "pending_change": 0, "max_send_amount": 0, "unspent_coin_count": 0, "pending_coin_removal_count": 0, } if self.service.logged_in_fingerprint is not None: wallet_balance["fingerprint"] = self.service.logged_in_fingerprint else: async with self.service.wallet_state_manager.lock: unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet( wallet_id ) balance = await wallet.get_confirmed_balance(unspent_records) pending_balance = await wallet.get_unconfirmed_balance(unspent_records) spendable_balance = await wallet.get_spendable_balance(unspent_records) pending_change = await wallet.get_pending_change_balance() max_send_amount = await wallet.get_max_send_amount(unspent_records) unconfirmed_removals: Dict[ bytes32, Coin ] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id) wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": balance, "unconfirmed_wallet_balance": pending_balance, "spendable_balance": spendable_balance, "pending_change": pending_change, "max_send_amount": max_send_amount, "unspent_coin_count": len(unspent_records), "pending_coin_removal_count": len(unconfirmed_removals), } if self.service.logged_in_fingerprint is not None: wallet_balance["fingerprint"] = self.service.logged_in_fingerprint self.balance_cache[wallet_id] = wallet_balance return {"wallet_balance": wallet_balance} async def get_transaction(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"])) tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id) if tr is None: raise ValueError(f"Transaction 0x{transaction_id.hex()} not found") return { "transaction": (await self._convert_tx_puzzle_hash(tr)).to_json_dict_convenience(self.service.config), "transaction_id": tr.name, } async def get_transactions(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) start = request.get("start", 0) end = request.get("end", 50) sort_key = request.get("sort_key", None) reverse = request.get("reverse", False) to_address = request.get("to_address", None) to_puzzle_hash: Optional[bytes32] = None if to_address is not None: to_puzzle_hash = decode_puzzle_hash(to_address) transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between( wallet_id, start, end, sort_key=sort_key, reverse=reverse, to_puzzle_hash=to_puzzle_hash ) return { "transactions": [ (await self._convert_tx_puzzle_hash(tr)).to_json_dict_convenience(self.service.config) for tr in transactions ], "wallet_id": wallet_id, } async def get_transaction_count(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id) return { "count": count, "wallet_id": wallet_id, } # this function is just here for backwards-compatibility. It will probably # be removed in the future async def get_initial_freeze_period(self, _: Dict): # Mon May 03 2021 17:00:00 GMT+0000 return {"INITIAL_FREEZE_END_TIMESTAMP": 1620061200} async def get_next_address(self, request: Dict) -> Dict: """ Returns a new address """ assert self.service.wallet_state_manager is not None if request["new_address"] is True: create_new = True else: create_new = False wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] if wallet.type() == WalletType.STANDARD_WALLET: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) elif wallet.type() == WalletType.CAT: raw_puzzle_hash = await wallet.standard_wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) else: raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes") return { "wallet_id": wallet_id, "address": address, } async def send_transaction(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") wallet_id = int(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() == WalletType.CAT: raise ValueError("send_transaction does not work for CAT wallets") if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) address = request["address"] selected_network = self.service.config["selected_network"] expected_prefix = self.service.config["network_overrides"]["config"][selected_network]["address_prefix"] if address[0 : len(expected_prefix)] != expected_prefix: raise ValueError("Unexpected Address Prefix") puzzle_hash: bytes32 = decode_puzzle_hash(address) memos: List[bytes] = [] if "memos" in request: memos = [mem.encode("utf-8") for mem in request["memos"]] if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee, memos=memos) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx.to_json_dict_convenience(self.service.config), "transaction_id": tx.name, } async def send_transaction_multi(self, request) -> Dict: assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") wallet_id = uint32(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: transaction: Dict = (await self.create_signed_transaction(request, hold_lock=False))["signed_tx"] tr: TransactionRecord = TransactionRecord.from_json_dict_convenience(transaction) await wallet.push_transaction(tr) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return {"transaction": transaction, "transaction_id": tr.name} async def delete_unconfirmed_transactions(self, request): wallet_id = uint32(request["wallet_id"]) if wallet_id not in self.service.wallet_state_manager.wallets: raise ValueError(f"Wallet id {wallet_id} does not exist") if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") async with self.service.wallet_state_manager.lock: async with self.service.wallet_state_manager.tx_store.db_wrapper.lock: await self.service.wallet_state_manager.tx_store.db_wrapper.begin_transaction() await self.service.wallet_state_manager.tx_store.delete_unconfirmed_transactions(wallet_id) if self.service.wallet_state_manager.wallets[wallet_id].type() == WalletType.POOLING_WALLET.value: self.service.wallet_state_manager.wallets[wallet_id].target_state = None await self.service.wallet_state_manager.tx_store.db_wrapper.commit_transaction() # Update the cache await self.service.wallet_state_manager.tx_store.rebuild_tx_cache() return {} ########################################################################################## # CATs and Trading ########################################################################################## async def get_cat_list(self, request): return {"cat_list": list(DEFAULT_CATS.values())} async def cat_set_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] await wallet.set_name(str(request["name"])) return {"wallet_id": wallet_id} async def cat_get_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] name: str = await wallet.get_name() return {"wallet_id": wallet_id, "name": name} async def cat_spend(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"]) memos: List[bytes] = [] if "memos" in request: memos = [mem.encode("utf-8") for mem in request["memos"]] if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: txs: TransactionRecord = await wallet.generate_signed_transaction( [amount], [puzzle_hash], fee, memos=[memos] ) for tx in txs: await wallet.standard_wallet.push_transaction(tx) return { "transaction": tx.to_json_dict_convenience(self.service.config), "transaction_id": tx.name, } async def cat_get_asset_id(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] asset_id: str = wallet.get_asset_id() return {"asset_id": asset_id, "wallet_id": wallet_id} async def cat_asset_id_to_name(self, request): assert self.service.wallet_state_manager is not None wallet = await self.service.wallet_state_manager.get_wallet_for_asset_id(request["asset_id"]) if wallet is None: if request["asset_id"] in DEFAULT_CATS: return {"wallet_id": None, "name": DEFAULT_CATS[request["asset_id"]]["name"]} else: raise ValueError("The asset ID specified does not belong to a wallet") else: return {"wallet_id": wallet.id(), "name": (await wallet.get_name())} async def create_offer_for_ids(self, request): assert self.service.wallet_state_manager is not None offer: Dict[str, int] = request["offer"] fee: uint64 = uint64(request.get("fee", 0)) validate_only: bool = request.get("validate_only", False) modified_offer = {} for key in offer: modified_offer[int(key)] = offer[key] async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids( modified_offer, fee=fee, validate_only=validate_only ) if success: return { "offer": Offer.from_bytes(trade_record.offer).to_bech32(), "trade_record": trade_record.to_json_dict_convenience(), } raise ValueError(error) async def get_offer_summary(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) offered, requested = offer.summary() return {"summary": {"offered": offered, "requested": requested, "fees": offer.bundle.fees()}} async def check_offer_validity(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) return {"valid": (await self.service.wallet_state_manager.trade_manager.check_offer_validity(offer))} async def take_offer(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) fee: uint64 = uint64(request.get("fee", 0)) async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(offer, fee=fee) if not success: raise ValueError(error) return {"trade_record": trade_record.to_json_dict_convenience()} async def get_offer(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager trade_id = bytes32.from_hexstr(request["trade_id"]) file_contents: bool = request.get("file_contents", False) trade_record: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(bytes32(trade_id)) if trade_record is None: raise ValueError(f"No trade with trade id: {trade_id.hex()}") offer_to_return: bytes = trade_record.offer if trade_record.taken_offer is None else trade_record.taken_offer offer_value: Optional[str] = Offer.from_bytes(offer_to_return).to_bech32() if file_contents else None return {"trade_record": trade_record.to_json_dict_convenience(), "offer": offer_value} async def get_all_offers(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager start: int = request.get("start", 0) end: int = request.get("end", 10) exclude_my_offers: bool = request.get("exclude_my_offers", False) exclude_taken_offers: bool = request.get("exclude_taken_offers", False) include_completed: bool = request.get("include_completed", False) sort_key: Optional[str] = request.get("sort_key", None) reverse: bool = request.get("reverse", False) file_contents: bool = request.get("file_contents", False) all_trades = await trade_mgr.trade_store.get_trades_between( start, end, sort_key=sort_key, reverse=reverse, exclude_my_offers=exclude_my_offers, exclude_taken_offers=exclude_taken_offers, include_completed=include_completed, ) result = [] offer_values: Optional[List[str]] = [] if file_contents else None for trade in all_trades: result.append(trade.to_json_dict_convenience()) if file_contents and offer_values is not None: offer_to_return: bytes = trade.offer if trade.taken_offer is None else trade.taken_offer offer_values.append(Offer.from_bytes(offer_to_return).to_bech32()) return {"trade_records": result, "offers": offer_values} async def get_offers_count(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager (total, my_offers_count, taken_offers_count) = await trade_mgr.trade_store.get_trades_count() return {"total": total, "my_offers_count": my_offers_count, "taken_offers_count": taken_offers_count} async def cancel_offer(self, request: Dict): assert self.service.wallet_state_manager is not None wsm = self.service.wallet_state_manager secure = request["secure"] trade_id = bytes32.from_hexstr(request["trade_id"]) fee: uint64 = uint64(request.get("fee", 0)) async with self.service.wallet_state_manager.lock: if secure: await wsm.trade_manager.cancel_pending_offer_safely(bytes32(trade_id), fee=fee) else: await wsm.trade_manager.cancel_pending_offer(bytes32(trade_id)) return {} ########################################################################################## # Distributed Identities ########################################################################################## async def did_update_recovery_ids(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = [] for _ in request["new_list"]: recovery_list.append(hexstr_to_bytes(_)) if "num_verifications_required" in request: new_amount_verifications_required = uint64(request["num_verifications_required"]) else: new_amount_verifications_required = len(recovery_list) async with self.service.wallet_state_manager.lock: update_success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required) # Update coin with new ID info spend_bundle = await wallet.create_update_spend() success = spend_bundle is not None and update_success return {"success": success} async def did_get_did(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did: str = wallet.get_my_DID() async with self.service.wallet_state_manager.lock: coins = await wallet.select_coins(1) if coins is None or coins == set(): return {"success": True, "wallet_id": wallet_id, "my_did": my_did} else: coin = coins.pop() return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()} async def did_get_recovery_list(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = wallet.did_info.backup_ids recover_hex_list = [] for _ in recovery_list: recover_hex_list.append(_.hex()) return { "success": True, "wallet_id": wallet_id, "recover_list": recover_hex_list, "num_required": wallet.did_info.num_of_backup_ids_needed, } async def did_recovery_spend(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed: return {"success": False, "reason": "insufficient messages"} async with self.service.wallet_state_manager.lock: ( info_list, message_spend_bundle, ) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"]) if "pubkey" in request: pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) else: assert wallet.did_info.temp_pubkey is not None pubkey = wallet.did_info.temp_pubkey if "puzhash" in request: puzhash = hexstr_to_bytes(request["puzhash"]) else: assert wallet.did_info.temp_puzhash is not None puzhash = wallet.did_info.temp_puzhash success = await wallet.recovery_spend( wallet.did_info.temp_coin, puzhash, info_list, pubkey, message_spend_bundle, ) return {"success": success} async def did_get_pubkey(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex() return {"success": True, "pubkey": pubkey} async def did_create_attest(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: info = await wallet.get_info_for_recovery() coin = hexstr_to_bytes(request["coin_name"]) pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) spend_bundle = await wallet.create_attestment( coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"] ) if spend_bundle is not None: return { "success": True, "message_spend_bundle": bytes(spend_bundle).hex(), "info": [info[0].hex(), info[1].hex(), info[2]], } else: return {"success": False} async def did_get_information_needed_for_recovery(self, request): wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() return { "success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_name": coin_name, "newpuzhash": did_wallet.did_info.temp_puzhash, "pubkey": did_wallet.did_info.temp_pubkey, "backup_dids": did_wallet.did_info.backup_ids, } async def did_create_backup_file(self, request): try: wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] did_wallet.create_backup(request["filename"]) return {"wallet_id": wallet_id, "success": True} except Exception: return {"wallet_id": wallet_id, "success": False} ########################################################################################## # Rate Limited Wallet ########################################################################################## async def rl_set_user_info(self, request): assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) rl_user = self.service.wallet_state_manager.wallets[wallet_id] origin = request["origin"] async with self.service.wallet_state_manager.lock: await rl_user.set_user_info( uint64(request["interval"]), uint64(request["limit"]), origin["parent_coin_info"], origin["puzzle_hash"], origin["amount"], request["admin_pubkey"], ) return {} async def send_clawback_transaction(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] fee = int(request["fee"]) async with self.service.wallet_state_manager.lock: tx = await wallet.clawback_rl_coin_transaction(fee) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx, "transaction_id": tx.name, } async def add_rate_limited_funds(self, request): wallet_id = uint32(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash) async with self.service.wallet_state_manager.lock: await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"]) return {"status": "SUCCESS"} async def get_farmed_amount(self, request): tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards() amount = 0 pool_reward_amount = 0 farmer_reward_amount = 0 fee_amount = 0 last_height_farmed = 0 for record in tx_records: if record.wallet_id not in self.service.wallet_state_manager.wallets: continue if record.type == TransactionType.COINBASE_REWARD: if self.service.wallet_state_manager.wallets[record.wallet_id].type() == WalletType.POOLING_WALLET: # Don't add pool rewards for pool wallets. continue pool_reward_amount += record.amount height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE) if record.type == TransactionType.FEE_REWARD: fee_amount += record.amount - calculate_base_farmer_reward(height) farmer_reward_amount += calculate_base_farmer_reward(height) if height > last_height_farmed: last_height_farmed = height amount += record.amount assert amount == pool_reward_amount + farmer_reward_amount + fee_amount return { "farmed_amount": amount, "pool_reward_amount": pool_reward_amount, "farmer_reward_amount": farmer_reward_amount, "fee_amount": fee_amount, "last_height_farmed": last_height_farmed, } async def create_signed_transaction(self, request, hold_lock=True) -> Dict: assert self.service.wallet_state_manager is not None if "additions" not in request or len(request["additions"]) < 1: raise ValueError("Specify additions list") additions: List[Dict] = request["additions"] amount_0: uint64 = uint64(additions[0]["amount"]) assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT puzzle_hash_0 = bytes32.from_hexstr(additions[0]["puzzle_hash"]) if len(puzzle_hash_0) != 32: raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0.hex()}") memos_0 = None if "memos" not in additions[0] else [mem.encode("utf-8") for mem in additions[0]["memos"]] additional_outputs: List[AmountWithPuzzlehash] = [] for addition in additions[1:]: receiver_ph = bytes32.from_hexstr(addition["puzzle_hash"]) if len(receiver_ph) != 32: raise ValueError(f"Address must be 32 bytes. {receiver_ph.hex()}") amount = uint64(addition["amount"]) if amount > self.service.constants.MAX_COIN_AMOUNT: raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}") memos = [] if "memos" not in addition else [mem.encode("utf-8") for mem in addition["memos"]] additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount, "memos": memos}) fee = uint64(0) if "fee" in request: fee = uint64(request["fee"]) coins = None if "coins" in request and len(request["coins"]) > 0: coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]]) coin_announcements: Optional[Set[Announcement]] = None if ( "coin_announcements" in request and request["coin_announcements"] is not None and len(request["coin_announcements"]) > 0 ): coin_announcements = { Announcement( bytes32.from_hexstr(announcement["coin_id"]), hexstr_to_bytes(announcement["message"]), hexstr_to_bytes(announcement["morph_bytes"]) if "morph_bytes" in announcement and len(announcement["morph_bytes"]) > 0 else None, ) for announcement in request["coin_announcements"] } puzzle_announcements: Optional[Set[Announcement]] = None if ( "puzzle_announcements" in request and request["puzzle_announcements"] is not None and len(request["puzzle_announcements"]) > 0 ): puzzle_announcements = { Announcement( bytes32.from_hexstr(announcement["puzzle_hash"]), hexstr_to_bytes(announcement["message"]), hexstr_to_bytes(announcement["morph_bytes"]) if "morph_bytes" in announcement and len(announcement["morph_bytes"]) > 0 else None, ) for announcement in request["puzzle_announcements"] } if hold_lock: async with self.service.wallet_state_manager.lock: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, bytes32(puzzle_hash_0), fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs, memos=memos_0, coin_announcements_to_consume=coin_announcements, puzzle_announcements_to_consume=puzzle_announcements, ) else: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, bytes32(puzzle_hash_0), fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs, memos=memos_0, coin_announcements_to_consume=coin_announcements, puzzle_announcements_to_consume=puzzle_announcements, ) return {"signed_tx": signed_tx.to_json_dict_convenience(self.service.config)} ########################################################################################## # Pool Wallet ########################################################################################## async def pw_join_pool(self, request) -> Dict: if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") pool_wallet_info: PoolWalletInfo = await wallet.get_current_state() owner_pubkey = pool_wallet_info.current.owner_pubkey target_puzzlehash = None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") if "target_puzzlehash" in request: target_puzzlehash = bytes32(hexstr_to_bytes(request["target_puzzlehash"])) assert target_puzzlehash is not None new_target_state: PoolState = create_pool_state( FARMING_TO_POOL, target_puzzlehash, owner_pubkey, request["pool_url"], uint32(request["relative_lock_height"]), ) async with self.service.wallet_state_manager.lock: total_fee, tx, fee_tx = await wallet.join_pool(new_target_state, fee) return {"total_fee": total_fee, "transaction": tx, "fee_transaction": fee_tx} async def pw_self_pool(self, request) -> Dict: if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} # Leaving a pool requires two state transitions. # First we transition to PoolSingletonState.LEAVING_POOL # Then we transition to FARMING_TO_POOL or SELF_POOLING fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") async with self.service.wallet_state_manager.lock: total_fee, tx, fee_tx = await wallet.self_pool(fee) return {"total_fee": total_fee, "transaction": tx, "fee_transaction": fee_tx} async def pw_absorb_rewards(self, request) -> Dict: """Perform a sweep of the p2_singleton rewards controlled by the pool wallet singleton""" if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before collecting rewards") fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") async with self.service.wallet_state_manager.lock: transaction, fee_tx = await wallet.claim_pool_rewards(fee) state: PoolWalletInfo = await wallet.get_current_state() return {"state": state.to_json_dict(), "transaction": transaction, "fee_transaction": fee_tx} async def pw_status(self, request) -> Dict: """Return the complete state of the Pool wallet with id `request["wallet_id"]`""" if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != WalletType.POOLING_WALLET.value: raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") state: PoolWalletInfo = await wallet.get_current_state() unconfirmed_transactions: List[TransactionRecord] = await wallet.get_unconfirmed_transactions() return { "state": state.to_json_dict(), "unconfirmed_transactions": unconfirmed_transactions, }
import asyncio import dataclasses import logging from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple, Set, Any from blspy import PrivateKey, G1Element from chia.consensus.block_rewards import calculate_base_farmer_reward from chia.pools.pool_wallet import PoolWallet from chia.pools.pool_wallet_info import create_pool_state, FARMING_TO_POOL, PoolWalletInfo, PoolState from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.server.outbound_message import NodeType, make_msg from chia.simulator.simulator_protocol import FarmNewBlockProtocol from chia.types.announcement import Announcement from chia.types.blockchain_format.coin import Coin from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.spend_bundle import SpendBundle from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash from chia.util.byte_types import hexstr_to_bytes from chia.util.ints import uint32, uint64, uint8 from chia.util.keychain import KeyringIsLocked, bytes_to_mnemonic, generate_mnemonic from chia.util.path import path_from_root from chia.util.ws_message import WsRpcMessage, create_payload_dict from chia.wallet.cat_wallet.cat_constants import DEFAULT_CATS from chia.wallet.cat_wallet.cat_wallet import CATWallet from chia.wallet.derive_keys import master_sk_to_singleton_owner_sk, master_sk_to_wallet_sk_unhardened, MAX_POOL_WALLETS from chia.wallet.rl_wallet.rl_wallet import RLWallet from chia.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk from chia.wallet.did_wallet.did_wallet import DIDWallet from chia.wallet.trade_record import TradeRecord from chia.wallet.trading.offer import Offer from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.transaction_type import TransactionType from chia.wallet.util.wallet_types import AmountWithPuzzlehash, WalletType from chia.wallet.wallet_info import WalletInfo from chia.wallet.wallet_node import WalletNode from chia.util.config import load_config from chia.consensus.coinbase import create_puzzlehash_for_pk # Timeout for response from wallet/full node for sending a transaction TIMEOUT = 30 log = logging.getLogger(__name__) class WalletRpcApi: def __init__(self, wallet_node: WalletNode): assert wallet_node is not None self.service = wallet_node self.service_name = "chia_wallet" self.balance_cache: Dict[int, Any] = {} def get_routes(self) -> Dict[str, Callable]: return { # Key management "/log_in": self.log_in, "/get_logged_in_fingerprint": self.get_logged_in_fingerprint, "/get_public_keys": self.get_public_keys, "/get_private_key": self.get_private_key, "/generate_mnemonic": self.generate_mnemonic, "/add_key": self.add_key, "/delete_key": self.delete_key, "/check_delete_key": self.check_delete_key, "/delete_all_keys": self.delete_all_keys, # Wallet node "/get_sync_status": self.get_sync_status, "/get_height_info": self.get_height_info, "/push_tx": self.push_tx, "/farm_block": self.farm_block, # Only when node simulator is running # this function is just here for backwards-compatibility. It will probably # be removed in the future "/get_initial_freeze_period": self.get_initial_freeze_period, "/get_network_info": self.get_network_info, # Wallet management "/get_wallets": self.get_wallets, "/create_new_wallet": self.create_new_wallet, # Wallet "/get_wallet_balance": self.get_wallet_balance, "/get_transaction": self.get_transaction, "/get_transactions": self.get_transactions, "/get_transaction_count": self.get_transaction_count, "/get_next_address": self.get_next_address, "/send_transaction": self.send_transaction, "/send_transaction_multi": self.send_transaction_multi, "/get_farmed_amount": self.get_farmed_amount, "/create_signed_transaction": self.create_signed_transaction, "/delete_unconfirmed_transactions": self.delete_unconfirmed_transactions, # CATs and trading "/cat_set_name": self.cat_set_name, "/cat_asset_id_to_name": self.cat_asset_id_to_name, "/cat_get_name": self.cat_get_name, "/cat_spend": self.cat_spend, "/cat_get_asset_id": self.cat_get_asset_id, "/create_offer_for_ids": self.create_offer_for_ids, "/get_offer_summary": self.get_offer_summary, "/check_offer_validity": self.check_offer_validity, "/take_offer": self.take_offer, "/get_offer": self.get_offer, "/get_all_offers": self.get_all_offers, "/get_offers_count": self.get_offers_count, "/cancel_offer": self.cancel_offer, "/get_cat_list": self.get_cat_list, # DID Wallet "/did_update_recovery_ids": self.did_update_recovery_ids, "/did_get_pubkey": self.did_get_pubkey, "/did_get_did": self.did_get_did, "/did_recovery_spend": self.did_recovery_spend, "/did_get_recovery_list": self.did_get_recovery_list, "/did_create_attest": self.did_create_attest, "/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery, "/did_create_backup_file": self.did_create_backup_file, # RL wallet "/rl_set_user_info": self.rl_set_user_info, "/send_clawback_transaction:": self.send_clawback_transaction, "/add_rate_limited_funds:": self.add_rate_limited_funds, # Pool Wallet "/pw_join_pool": self.pw_join_pool, "/pw_self_pool": self.pw_self_pool, "/pw_absorb_rewards": self.pw_absorb_rewards, "/pw_status": self.pw_status, } async def _state_changed(self, *args) -> List[WsRpcMessage]: """ Called by the WalletNode or WalletStateManager when something has changed in the wallet. This gives us an opportunity to send notifications to all connected clients via WebSocket. """ payloads = [] if args[0] is not None and args[0] == "sync_changed": # Metrics is the only current consumer for this event payloads.append(create_payload_dict(args[0], {}, self.service_name, "metrics")) if len(args) < 2: return payloads data = { "state": args[0], } if args[1] is not None: data["wallet_id"] = args[1] if args[2] is not None: data["additional_data"] = args[2] payloads.append(create_payload_dict("state_changed", data, self.service_name, "wallet_ui")) if args[0] == "coin_added": payloads.append(create_payload_dict(args[0], data, self.service_name, "metrics")) return payloads async def _stop_wallet(self): """ Stops a currently running wallet/key, which allows starting the wallet with a new key. Each key has it's own wallet database. """ if self.service is not None: self.service._close() peers_close_task: Optional[asyncio.Task] = await self.service._await_closed() if peers_close_task is not None: await peers_close_task async def _convert_tx_puzzle_hash(self, tx: TransactionRecord) -> TransactionRecord: assert self.service.wallet_state_manager is not None return dataclasses.replace( tx, to_puzzle_hash=( await self.service.wallet_state_manager.convert_puzzle_hash(tx.wallet_id, tx.to_puzzle_hash) ), ) ########################################################################################## # Key management ########################################################################################## async def log_in(self, request): """ Logs in the wallet with a specific key. """ fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerprint": fingerprint} await self._stop_wallet() self.balance_cache = {} started = await self.service._start(fingerprint) if started is True: return {"fingerprint": fingerprint} return {"success": False, "error": "Unknown Error"} async def get_logged_in_fingerprint(self, request: Dict): return {"fingerprint": self.service.logged_in_fingerprint} async def get_public_keys(self, request: Dict): try: assert self.service.keychain_proxy is not None # An offering to the mypy gods fingerprints = [ sk.get_g1().get_fingerprint() for (sk, seed) in await self.service.keychain_proxy.get_all_private_keys() ] except KeyringIsLocked: return {"keyring_is_locked": True} except Exception: return {"public_key_fingerprints": []} else: return {"public_key_fingerprints": fingerprints} async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]: try: assert self.service.keychain_proxy is not None # An offering to the mypy gods all_keys = await self.service.keychain_proxy.get_all_private_keys() for sk, seed in all_keys: if sk.get_g1().get_fingerprint() == fingerprint: return sk, seed except Exception as e: log.error(f"Failed to get private key by fingerprint: {e}") return None, None async def get_private_key(self, request): fingerprint = request["fingerprint"] sk, seed = await self._get_private_key(fingerprint) if sk is not None: s = bytes_to_mnemonic(seed) if seed is not None else None return { "private_key": { "fingerprint": fingerprint, "sk": bytes(sk).hex(), "pk": bytes(sk.get_g1()).hex(), "farmer_pk": bytes(master_sk_to_farmer_sk(sk).get_g1()).hex(), "pool_pk": bytes(master_sk_to_pool_sk(sk).get_g1()).hex(), "seed": s, }, } return {"success": False, "private_key": {"fingerprint": fingerprint}} async def generate_mnemonic(self, request: Dict): return {"mnemonic": generate_mnemonic().split(" ")} async def add_key(self, request): if "mnemonic" not in request: raise ValueError("Mnemonic not in request") # Adding a key from 24 word mnemonic mnemonic = request["mnemonic"] passphrase = "" try: sk = await self.service.keychain_proxy.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } except Exception as e: return {"success": False, "error": str(e)} fingerprint = sk.get_g1().get_fingerprint() await self._stop_wallet() # Makes sure the new key is added to config properly started = False try: await self.service.keychain_proxy.check_keys(self.service.root_path) except Exception as e: log.error(f"Failed to check_keys after adding a new key: {e}") started = await self.service._start(fingerprint=fingerprint) if started is True: return {"fingerprint": fingerprint} raise ValueError("Failed to start") async def delete_key(self, request): await self._stop_wallet() fingerprint = request["fingerprint"] try: await self.service.keychain_proxy.delete_key_by_fingerprint(fingerprint) except Exception as e: log.error(f"Failed to delete key by fingerprint: {e}") return {"success": False, "error": str(e)} path = path_from_root( self.service.root_path, f"{self.service.config['database_path']}-{fingerprint}", ) if path.exists(): path.unlink() return {} async def _check_key_used_for_rewards( self, new_root: Path, sk: PrivateKey, max_ph_to_search: int ) -> Tuple[bool, bool]: """Checks if the given key is used for either the farmer rewards or pool rewards returns a tuple of two booleans The first is true if the key is used as the Farmer rewards, otherwise false The second is true if the key is used as the Pool rewards, otherwise false Returns both false if the key cannot be found with the given fingerprint """ if sk is None: return False, False config: Dict = load_config(new_root, "config.yaml") farmer_target = config["farmer"].get("xch_target_address") pool_target = config["pool"].get("xch_target_address") found_farmer = False found_pool = False selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] for i in range(max_ph_to_search): if found_farmer and found_pool: break phs = [ encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix), encode_puzzle_hash( create_puzzlehash_for_pk(master_sk_to_wallet_sk_unhardened(sk, uint32(i)).get_g1()), prefix ), ] for ph in phs: if ph == farmer_target: found_farmer = True if ph == pool_target: found_pool = True return found_farmer, found_pool async def check_delete_key(self, request): """Check the key use prior to possible deletion checks whether key is used for either farm or pool rewards checks if any wallets have a non-zero balance """ used_for_farmer: bool = False used_for_pool: bool = False walletBalance: bool = False fingerprint = request["fingerprint"] sk, _ = await self._get_private_key(fingerprint) if sk is not None: used_for_farmer, used_for_pool = await self._check_key_used_for_rewards(self.service.root_path, sk, 100) if self.service.logged_in_fingerprint != fingerprint: await self._stop_wallet() await self.service._start(fingerprint=fingerprint) wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() for w in wallets: wallet = self.service.wallet_state_manager.wallets[w.id] unspent = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(w.id) balance = await wallet.get_confirmed_balance(unspent) pending_balance = await wallet.get_unconfirmed_balance(unspent) if (balance + pending_balance) > 0: walletBalance = True break return { "fingerprint": fingerprint, "used_for_farmer_rewards": used_for_farmer, "used_for_pool_rewards": used_for_pool, "wallet_balance": walletBalance, } async def delete_all_keys(self, request: Dict): await self._stop_wallet() try: assert self.service.keychain_proxy is not None # An offering to the mypy gods await self.service.keychain_proxy.delete_all_keys() except Exception as e: log.error(f"Failed to delete all keys: {e}") return {"success": False, "error": str(e)} path = path_from_root(self.service.root_path, self.service.config["database_path"]) if path.exists(): path.unlink() return {} ########################################################################################## # Wallet Node ########################################################################################## async def get_sync_status(self, request: Dict): assert self.service.wallet_state_manager is not None syncing = self.service.wallet_state_manager.sync_mode synced = await self.service.wallet_state_manager.synced() return {"synced": synced, "syncing": syncing, "genesis_initialized": True} async def get_height_info(self, request: Dict): assert self.service.wallet_state_manager is not None height = await self.service.wallet_state_manager.blockchain.get_finished_sync_up_to() return {"height": height} async def get_network_info(self, request: Dict): assert self.service.wallet_state_manager is not None network_name = self.service.config["selected_network"] address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"] return {"network_name": network_name, "network_prefix": address_prefix} async def push_tx(self, request: Dict): assert self.service.server is not None nodes = self.service.server.get_full_node_connections() if len(nodes) == 0: raise ValueError("Wallet is not currently connected to any full node peers") await self.service.push_tx(SpendBundle.from_bytes(hexstr_to_bytes(request["spend_bundle"]))) return {} async def farm_block(self, request): raw_puzzle_hash = decode_puzzle_hash(request["address"]) request = FarmNewBlockProtocol(raw_puzzle_hash) msg = make_msg(ProtocolMessageTypes.farm_new_block, request) await self.service.server.send_to_all([msg], NodeType.FULL_NODE) return {} ########################################################################################## # Wallet Management ########################################################################################## async def get_wallets(self, request: Dict): assert self.service.wallet_state_manager is not None wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() return {"wallets": wallets} async def create_new_wallet(self, request: Dict): assert self.service.wallet_state_manager is not None wallet_state_manager = self.service.wallet_state_manager if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") main_wallet = wallet_state_manager.main_wallet fee = uint64(request.get("fee", 0)) if request["wallet_type"] == "cat_wallet": # If not provided, the name will be autogenerated based on the tail hash. name = request.get("name", None) if request["mode"] == "new": async with self.service.wallet_state_manager.lock: cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet( wallet_state_manager, main_wallet, {"identifier": "genesis_by_id"}, uint64(request["amount"]), name, ) asset_id = cat_wallet.get_asset_id() self.service.wallet_state_manager.state_changed("wallet_created") return {"type": cat_wallet.type(), "asset_id": asset_id, "wallet_id": cat_wallet.id()} elif request["mode"] == "existing": async with self.service.wallet_state_manager.lock: cat_wallet = await CATWallet.create_wallet_for_cat( wallet_state_manager, main_wallet, request["asset_id"], name ) self.service.wallet_state_manager.state_changed("wallet_created") return {"type": cat_wallet.type(), "asset_id": request["asset_id"], "wallet_id": cat_wallet.id()} else: # undefined mode pass elif request["wallet_type"] == "rl_wallet": if request["rl_type"] == "admin": log.info("Create rl admin wallet") async with self.service.wallet_state_manager.lock: rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager) success = await rl_admin.admin_create_coin( uint64(int(request["interval"])), uint64(int(request["limit"])), request["pubkey"], uint64(int(request["amount"])), uint64(int(request["fee"])) if "fee" in request else uint64(0), ) assert rl_admin.rl_info.admin_pubkey is not None return { "success": success, "id": rl_admin.id(), "type": rl_admin.type(), "origin": rl_admin.rl_info.rl_origin, "pubkey": rl_admin.rl_info.admin_pubkey.hex(), } elif request["rl_type"] == "user": log.info("Create rl user wallet") async with self.service.wallet_state_manager.lock: rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager) assert rl_user.rl_info.user_pubkey is not None return { "id": rl_user.id(), "type": rl_user.type(), "pubkey": rl_user.rl_info.user_pubkey.hex(), } else: # undefined rl_type pass elif request["wallet_type"] == "did_wallet": if request["did_type"] == "new": backup_dids = [] num_needed = 0 for d in request["backup_dids"]: backup_dids.append(hexstr_to_bytes(d)) if len(backup_dids) > 0: num_needed = uint64(request["num_of_backup_ids_needed"]) async with self.service.wallet_state_manager.lock: did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet( wallet_state_manager, main_wallet, uint64(request["amount"]), backup_dids, uint64(num_needed), ) my_did = did_wallet.get_my_DID() return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), } elif request["did_type"] == "recovery": async with self.service.wallet_state_manager.lock: did_wallet = await DIDWallet.create_new_did_wallet_from_recovery( wallet_state_manager, main_wallet, request["filename"] ) assert did_wallet.did_info.temp_coin is not None assert did_wallet.did_info.temp_puzhash is not None assert did_wallet.did_info.temp_pubkey is not None my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() coin_list = did_wallet.did_info.temp_coin.as_list() newpuzhash = did_wallet.did_info.temp_puzhash pubkey = did_wallet.did_info.temp_pubkey return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), "coin_name": coin_name, "coin_list": coin_list, "newpuzhash": newpuzhash.hex(), "pubkey": pubkey.hex(), "backup_dids": did_wallet.did_info.backup_ids, "num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed, } else: # undefined did_type pass elif request["wallet_type"] == "pool_wallet": if request["mode"] == "new": owner_puzzle_hash: bytes32 = await self.service.wallet_state_manager.main_wallet.get_puzzle_hash(True) from chia.pools.pool_wallet_info import initial_pool_state_from_dict async with self.service.wallet_state_manager.lock: # We assign a pseudo unique id to each pool wallet, so that each one gets its own deterministic # owner and auth keys. The public keys will go on the blockchain, and the private keys can be found # using the root SK and trying each index from zero. The indexes are not fully unique though, # because the PoolWallet is not created until the tx gets confirmed on chain. Therefore if we # make multiple pool wallets at the same time, they will have the same ID. max_pwi = 1 for _, wallet in self.service.wallet_state_manager.wallets.items(): if wallet.type() == WalletType.POOLING_WALLET: pool_wallet_index = await wallet.get_pool_wallet_index() if pool_wallet_index > max_pwi: max_pwi = pool_wallet_index if max_pwi + 1 >= (MAX_POOL_WALLETS - 1): raise ValueError(f"Too many pool wallets ({max_pwi}), cannot create any more on this key.") owner_sk: PrivateKey = master_sk_to_singleton_owner_sk( self.service.wallet_state_manager.private_key, uint32(max_pwi + 1) ) owner_pk: G1Element = owner_sk.get_g1() initial_target_state = initial_pool_state_from_dict( request["initial_target_state"], owner_pk, owner_puzzle_hash ) assert initial_target_state is not None try: delayed_address = None if "p2_singleton_delayed_ph" in request: delayed_address = bytes32.from_hexstr(request["p2_singleton_delayed_ph"]) tr, p2_singleton_puzzle_hash, launcher_id = await PoolWallet.create_new_pool_wallet_transaction( wallet_state_manager, main_wallet, initial_target_state, fee, request.get("p2_singleton_delay_time", None), delayed_address, ) except Exception as e: raise ValueError(str(e)) return { "total_fee": fee * 2, "transaction": tr, "launcher_id": launcher_id.hex(), "p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex(), } elif request["mode"] == "recovery": raise ValueError("Need upgraded singleton for on-chain recovery") else: # undefined wallet_type pass return None ########################################################################################## # Wallet ########################################################################################## async def get_wallet_balance(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] # If syncing return the last available info or 0s syncing = self.service.wallet_state_manager.sync_mode if syncing: if wallet_id in self.balance_cache: wallet_balance = self.balance_cache[wallet_id] else: wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": 0, "unconfirmed_wallet_balance": 0, "spendable_balance": 0, "pending_change": 0, "max_send_amount": 0, "unspent_coin_count": 0, "pending_coin_removal_count": 0, } if self.service.logged_in_fingerprint is not None: wallet_balance["fingerprint"] = self.service.logged_in_fingerprint else: async with self.service.wallet_state_manager.lock: unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet( wallet_id ) balance = await wallet.get_confirmed_balance(unspent_records) pending_balance = await wallet.get_unconfirmed_balance(unspent_records) spendable_balance = await wallet.get_spendable_balance(unspent_records) pending_change = await wallet.get_pending_change_balance() max_send_amount = await wallet.get_max_send_amount(unspent_records) unconfirmed_removals: Dict[ bytes32, Coin ] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id) wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": balance, "unconfirmed_wallet_balance": pending_balance, "spendable_balance": spendable_balance, "pending_change": pending_change, "max_send_amount": max_send_amount, "unspent_coin_count": len(unspent_records), "pending_coin_removal_count": len(unconfirmed_removals), } if self.service.logged_in_fingerprint is not None: wallet_balance["fingerprint"] = self.service.logged_in_fingerprint self.balance_cache[wallet_id] = wallet_balance return {"wallet_balance": wallet_balance} async def get_transaction(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"])) tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id) if tr is None: raise ValueError(f"Transaction 0x{transaction_id.hex()} not found") return { "transaction": (await self._convert_tx_puzzle_hash(tr)).to_json_dict_convenience(self.service.config), "transaction_id": tr.name, } async def get_transactions(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) start = request.get("start", 0) end = request.get("end", 50) sort_key = request.get("sort_key", None) reverse = request.get("reverse", False) to_address = request.get("to_address", None) to_puzzle_hash: Optional[bytes32] = None if to_address is not None: to_puzzle_hash = decode_puzzle_hash(to_address) transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between( wallet_id, start, end, sort_key=sort_key, reverse=reverse, to_puzzle_hash=to_puzzle_hash ) return { "transactions": [ (await self._convert_tx_puzzle_hash(tr)).to_json_dict_convenience(self.service.config) for tr in transactions ], "wallet_id": wallet_id, } async def get_transaction_count(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id) return { "count": count, "wallet_id": wallet_id, } # this function is just here for backwards-compatibility. It will probably # be removed in the future async def get_initial_freeze_period(self, _: Dict): # Mon May 03 2021 17:00:00 GMT+0000 return {"INITIAL_FREEZE_END_TIMESTAMP": 1620061200} async def get_next_address(self, request: Dict) -> Dict: """ Returns a new address """ assert self.service.wallet_state_manager is not None if request["new_address"] is True: create_new = True else: create_new = False wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] if wallet.type() == WalletType.STANDARD_WALLET: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) elif wallet.type() == WalletType.CAT: raw_puzzle_hash = await wallet.standard_wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) else: raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes") return { "wallet_id": wallet_id, "address": address, } async def send_transaction(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") wallet_id = int(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() == WalletType.CAT: raise ValueError("send_transaction does not work for CAT wallets") if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) address = request["address"] selected_network = self.service.config["selected_network"] expected_prefix = self.service.config["network_overrides"]["config"][selected_network]["address_prefix"] if address[0 : len(expected_prefix)] != expected_prefix: raise ValueError("Unexpected Address Prefix") puzzle_hash: bytes32 = decode_puzzle_hash(address) memos: List[bytes] = [] if "memos" in request: memos = [mem.encode("utf-8") for mem in request["memos"]] if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee, memos=memos) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx.to_json_dict_convenience(self.service.config), "transaction_id": tx.name, } async def send_transaction_multi(self, request) -> Dict: assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") wallet_id = uint32(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: transaction: Dict = (await self.create_signed_transaction(request, hold_lock=False))["signed_tx"] tr: TransactionRecord = TransactionRecord.from_json_dict_convenience(transaction) await wallet.push_transaction(tr) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return {"transaction": transaction, "transaction_id": tr.name} async def delete_unconfirmed_transactions(self, request): wallet_id = uint32(request["wallet_id"]) if wallet_id not in self.service.wallet_state_manager.wallets: raise ValueError(f"Wallet id {wallet_id} does not exist") if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") async with self.service.wallet_state_manager.lock: async with self.service.wallet_state_manager.tx_store.db_wrapper.lock: await self.service.wallet_state_manager.tx_store.db_wrapper.begin_transaction() await self.service.wallet_state_manager.tx_store.delete_unconfirmed_transactions(wallet_id) if self.service.wallet_state_manager.wallets[wallet_id].type() == WalletType.POOLING_WALLET.value: self.service.wallet_state_manager.wallets[wallet_id].target_state = None await self.service.wallet_state_manager.tx_store.db_wrapper.commit_transaction() # Update the cache await self.service.wallet_state_manager.tx_store.rebuild_tx_cache() return {} ########################################################################################## # CATs and Trading ########################################################################################## async def get_cat_list(self, request): return {"cat_list": list(DEFAULT_CATS.values())} async def cat_set_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] await wallet.set_name(str(request["name"])) return {"wallet_id": wallet_id} async def cat_get_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] name: str = await wallet.get_name() return {"wallet_id": wallet_id, "name": name} async def cat_spend(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"]) memos: List[bytes] = [] if "memos" in request: memos = [mem.encode("utf-8") for mem in request["memos"]] if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: txs: TransactionRecord = await wallet.generate_signed_transaction( [amount], [puzzle_hash], fee, memos=[memos] ) for tx in txs: await wallet.standard_wallet.push_transaction(tx) return { "transaction": tx.to_json_dict_convenience(self.service.config), "transaction_id": tx.name, } async def cat_get_asset_id(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CATWallet = self.service.wallet_state_manager.wallets[wallet_id] asset_id: str = wallet.get_asset_id() return {"asset_id": asset_id, "wallet_id": wallet_id} async def cat_asset_id_to_name(self, request): assert self.service.wallet_state_manager is not None wallet = await self.service.wallet_state_manager.get_wallet_for_asset_id(request["asset_id"]) if wallet is None: if request["asset_id"] in DEFAULT_CATS: return {"wallet_id": None, "name": DEFAULT_CATS[request["asset_id"]]["name"]} else: raise ValueError("The asset ID specified does not belong to a wallet") else: return {"wallet_id": wallet.id(), "name": (await wallet.get_name())} async def create_offer_for_ids(self, request): assert self.service.wallet_state_manager is not None offer: Dict[str, int] = request["offer"] fee: uint64 = uint64(request.get("fee", 0)) validate_only: bool = request.get("validate_only", False) modified_offer = {} for key in offer: modified_offer[int(key)] = offer[key] async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids( modified_offer, fee=fee, validate_only=validate_only ) if success: return { "offer": Offer.from_bytes(trade_record.offer).to_bech32(), "trade_record": trade_record.to_json_dict_convenience(), } raise ValueError(error) async def get_offer_summary(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) offered, requested = offer.summary() return {"summary": {"offered": offered, "requested": requested, "fees": offer.bundle.fees()}} async def check_offer_validity(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) return {"valid": (await self.service.wallet_state_manager.trade_manager.check_offer_validity(offer))} async def take_offer(self, request): assert self.service.wallet_state_manager is not None offer_hex: str = request["offer"] offer = Offer.from_bech32(offer_hex) fee: uint64 = uint64(request.get("fee", 0)) async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(offer, fee=fee) if not success: raise ValueError(error) return {"trade_record": trade_record.to_json_dict_convenience()} async def get_offer(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager trade_id = bytes32.from_hexstr(request["trade_id"]) file_contents: bool = request.get("file_contents", False) trade_record: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(bytes32(trade_id)) if trade_record is None: raise ValueError(f"No trade with trade id: {trade_id.hex()}") offer_to_return: bytes = trade_record.offer if trade_record.taken_offer is None else trade_record.taken_offer offer_value: Optional[str] = Offer.from_bytes(offer_to_return).to_bech32() if file_contents else None return {"trade_record": trade_record.to_json_dict_convenience(), "offer": offer_value} async def get_all_offers(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager start: int = request.get("start", 0) end: int = request.get("end", 10) exclude_my_offers: bool = request.get("exclude_my_offers", False) exclude_taken_offers: bool = request.get("exclude_taken_offers", False) include_completed: bool = request.get("include_completed", False) sort_key: Optional[str] = request.get("sort_key", None) reverse: bool = request.get("reverse", False) file_contents: bool = request.get("file_contents", False) all_trades = await trade_mgr.trade_store.get_trades_between( start, end, sort_key=sort_key, reverse=reverse, exclude_my_offers=exclude_my_offers, exclude_taken_offers=exclude_taken_offers, include_completed=include_completed, ) result = [] offer_values: Optional[List[str]] = [] if file_contents else None for trade in all_trades: result.append(trade.to_json_dict_convenience()) if file_contents and offer_values is not None: offer_to_return: bytes = trade.offer if trade.taken_offer is None else trade.taken_offer offer_values.append(Offer.from_bytes(offer_to_return).to_bech32()) return {"trade_records": result, "offers": offer_values} async def get_offers_count(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager (total, my_offers_count, taken_offers_count) = await trade_mgr.trade_store.get_trades_count() return {"total": total, "my_offers_count": my_offers_count, "taken_offers_count": taken_offers_count} async def cancel_offer(self, request: Dict): assert self.service.wallet_state_manager is not None wsm = self.service.wallet_state_manager secure = request["secure"] trade_id = bytes32.from_hexstr(request["trade_id"]) fee: uint64 = uint64(request.get("fee", 0)) async with self.service.wallet_state_manager.lock: if secure: await wsm.trade_manager.cancel_pending_offer_safely(bytes32(trade_id), fee=fee) else: await wsm.trade_manager.cancel_pending_offer(bytes32(trade_id)) return {} ########################################################################################## # Distributed Identities ########################################################################################## async def did_update_recovery_ids(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = [] for _ in request["new_list"]: recovery_list.append(hexstr_to_bytes(_)) if "num_verifications_required" in request: new_amount_verifications_required = uint64(request["num_verifications_required"]) else: new_amount_verifications_required = len(recovery_list) async with self.service.wallet_state_manager.lock: update_success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required) # Update coin with new ID info spend_bundle = await wallet.create_update_spend() success = spend_bundle is not None and update_success return {"success": success} async def did_get_did(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did: str = wallet.get_my_DID() async with self.service.wallet_state_manager.lock: coins = await wallet.select_coins(1) if coins is None or coins == set(): return {"success": True, "wallet_id": wallet_id, "my_did": my_did} else: coin = coins.pop() return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()} async def did_get_recovery_list(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = wallet.did_info.backup_ids recover_hex_list = [] for _ in recovery_list: recover_hex_list.append(_.hex()) return { "success": True, "wallet_id": wallet_id, "recover_list": recover_hex_list, "num_required": wallet.did_info.num_of_backup_ids_needed, } async def did_recovery_spend(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed: return {"success": False, "reason": "insufficient messages"} async with self.service.wallet_state_manager.lock: ( info_list, message_spend_bundle, ) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"]) if "pubkey" in request: pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) else: assert wallet.did_info.temp_pubkey is not None pubkey = wallet.did_info.temp_pubkey if "puzhash" in request: puzhash = hexstr_to_bytes(request["puzhash"]) else: assert wallet.did_info.temp_puzhash is not None puzhash = wallet.did_info.temp_puzhash success = await wallet.recovery_spend( wallet.did_info.temp_coin, puzhash, info_list, pubkey, message_spend_bundle, ) return {"success": success} async def did_get_pubkey(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex() return {"success": True, "pubkey": pubkey} async def did_create_attest(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: info = await wallet.get_info_for_recovery() coin = hexstr_to_bytes(request["coin_name"]) pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) spend_bundle = await wallet.create_attestment( coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"] ) if spend_bundle is not None: return { "success": True, "message_spend_bundle": bytes(spend_bundle).hex(), "info": [info[0].hex(), info[1].hex(), info[2]], } else: return {"success": False} async def did_get_information_needed_for_recovery(self, request): wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() return { "success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_name": coin_name, "newpuzhash": did_wallet.did_info.temp_puzhash, "pubkey": did_wallet.did_info.temp_pubkey, "backup_dids": did_wallet.did_info.backup_ids, } async def did_create_backup_file(self, request): try: wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] did_wallet.create_backup(request["filename"]) return {"wallet_id": wallet_id, "success": True} except Exception: return {"wallet_id": wallet_id, "success": False} ########################################################################################## # Rate Limited Wallet ########################################################################################## async def rl_set_user_info(self, request): assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) rl_user = self.service.wallet_state_manager.wallets[wallet_id] origin = request["origin"] async with self.service.wallet_state_manager.lock: await rl_user.set_user_info( uint64(request["interval"]), uint64(request["limit"]), origin["parent_coin_info"], origin["puzzle_hash"], origin["amount"], request["admin_pubkey"], ) return {} async def send_clawback_transaction(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] fee = int(request["fee"]) async with self.service.wallet_state_manager.lock: tx = await wallet.clawback_rl_coin_transaction(fee) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx, "transaction_id": tx.name, } async def add_rate_limited_funds(self, request): wallet_id = uint32(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash) async with self.service.wallet_state_manager.lock: await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"]) return {"status": "SUCCESS"} async def get_farmed_amount(self, request): tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards() amount = 0 pool_reward_amount = 0 farmer_reward_amount = 0 fee_amount = 0 last_height_farmed = 0 for record in tx_records: if record.wallet_id not in self.service.wallet_state_manager.wallets: continue if record.type == TransactionType.COINBASE_REWARD: if self.service.wallet_state_manager.wallets[record.wallet_id].type() == WalletType.POOLING_WALLET: # Don't add pool rewards for pool wallets. continue pool_reward_amount += record.amount height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE) if record.type == TransactionType.FEE_REWARD: fee_amount += record.amount - calculate_base_farmer_reward(height) farmer_reward_amount += calculate_base_farmer_reward(height) if height > last_height_farmed: last_height_farmed = height amount += record.amount assert amount == pool_reward_amount + farmer_reward_amount + fee_amount return { "farmed_amount": amount, "pool_reward_amount": pool_reward_amount, "farmer_reward_amount": farmer_reward_amount, "fee_amount": fee_amount, "last_height_farmed": last_height_farmed, } async def create_signed_transaction(self, request, hold_lock=True) -> Dict: assert self.service.wallet_state_manager is not None if "additions" not in request or len(request["additions"]) < 1: raise ValueError("Specify additions list") additions: List[Dict] = request["additions"] amount_0: uint64 = uint64(additions[0]["amount"]) assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT puzzle_hash_0 = bytes32.from_hexstr(additions[0]["puzzle_hash"]) if len(puzzle_hash_0) != 32: raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0.hex()}") memos_0 = None if "memos" not in additions[0] else [mem.encode("utf-8") for mem in additions[0]["memos"]] additional_outputs: List[AmountWithPuzzlehash] = [] for addition in additions[1:]: receiver_ph = bytes32.from_hexstr(addition["puzzle_hash"]) if len(receiver_ph) != 32: raise ValueError(f"Address must be 32 bytes. {receiver_ph.hex()}") amount = uint64(addition["amount"]) if amount > self.service.constants.MAX_COIN_AMOUNT: raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}") memos = [] if "memos" not in addition else [mem.encode("utf-8") for mem in addition["memos"]] additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount, "memos": memos}) fee = uint64(0) if "fee" in request: fee = uint64(request["fee"]) coins = None if "coins" in request and len(request["coins"]) > 0: coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]]) coin_announcements: Optional[Set[Announcement]] = None if ( "coin_announcements" in request and request["coin_announcements"] is not None and len(request["coin_announcements"]) > 0 ): coin_announcements = { Announcement( bytes32.from_hexstr(announcement["coin_id"]), hexstr_to_bytes(announcement["message"]), hexstr_to_bytes(announcement["morph_bytes"]) if "morph_bytes" in announcement and len(announcement["morph_bytes"]) > 0 else None, ) for announcement in request["coin_announcements"] } puzzle_announcements: Optional[Set[Announcement]] = None if ( "puzzle_announcements" in request and request["puzzle_announcements"] is not None and len(request["puzzle_announcements"]) > 0 ): puzzle_announcements = { Announcement( bytes32.from_hexstr(announcement["puzzle_hash"]), hexstr_to_bytes(announcement["message"]), hexstr_to_bytes(announcement["morph_bytes"]) if "morph_bytes" in announcement and len(announcement["morph_bytes"]) > 0 else None, ) for announcement in request["puzzle_announcements"] } if hold_lock: async with self.service.wallet_state_manager.lock: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, bytes32(puzzle_hash_0), fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs, memos=memos_0, coin_announcements_to_consume=coin_announcements, puzzle_announcements_to_consume=puzzle_announcements, ) else: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, bytes32(puzzle_hash_0), fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs, memos=memos_0, coin_announcements_to_consume=coin_announcements, puzzle_announcements_to_consume=puzzle_announcements, ) return {"signed_tx": signed_tx.to_json_dict_convenience(self.service.config)} ########################################################################################## # Pool Wallet ########################################################################################## async def pw_join_pool(self, request) -> Dict: if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") pool_wallet_info: PoolWalletInfo = await wallet.get_current_state() owner_pubkey = pool_wallet_info.current.owner_pubkey target_puzzlehash = None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") if "target_puzzlehash" in request: target_puzzlehash = bytes32(hexstr_to_bytes(request["target_puzzlehash"])) assert target_puzzlehash is not None new_target_state: PoolState = create_pool_state( FARMING_TO_POOL, target_puzzlehash, owner_pubkey, request["pool_url"], uint32(request["relative_lock_height"]), ) async with self.service.wallet_state_manager.lock: total_fee, tx, fee_tx = await wallet.join_pool(new_target_state, fee) return {"total_fee": total_fee, "transaction": tx, "fee_transaction": fee_tx} async def pw_self_pool(self, request) -> Dict: if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} # Leaving a pool requires two state transitions. # First we transition to PoolSingletonState.LEAVING_POOL # Then we transition to FARMING_TO_POOL or SELF_POOLING fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced.") async with self.service.wallet_state_manager.lock: total_fee, tx, fee_tx = await wallet.self_pool(fee) return {"total_fee": total_fee, "transaction": tx, "fee_transaction": fee_tx} async def pw_absorb_rewards(self, request) -> Dict: """Perform a sweep of the p2_singleton rewards controlled by the pool wallet singleton""" if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before collecting rewards") fee = uint64(request.get("fee", 0)) wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != uint8(WalletType.POOLING_WALLET): raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") async with self.service.wallet_state_manager.lock: transaction, fee_tx = await wallet.claim_pool_rewards(fee) state: PoolWalletInfo = await wallet.get_current_state() return {"state": state.to_json_dict(), "transaction": transaction, "fee_transaction": fee_tx} async def pw_status(self, request) -> Dict: """Return the complete state of the Pool wallet with id `request["wallet_id"]`""" if self.service.wallet_state_manager is None: return {"success": False, "error": "not_initialized"} wallet_id = uint32(request["wallet_id"]) wallet: PoolWallet = self.service.wallet_state_manager.wallets[wallet_id] if wallet.type() != WalletType.POOLING_WALLET.value: raise ValueError(f"Wallet with wallet id: {wallet_id} is not a plotNFT wallet.") state: PoolWalletInfo = await wallet.get_current_state() unconfirmed_transactions: List[TransactionRecord] = await wallet.get_unconfirmed_transactions() return { "state": state.to_json_dict(), "unconfirmed_transactions": unconfirmed_transactions, }
#!/usr/bin/env python # Modules from pygnmi.client import gNMIclient # Variables from inventory import hosts # Body if __name__ == "__main__": paths = ['openconfig-interfaces:interfaces', 'openconfig-network-instance:network-instances'] for host in hosts: with gNMIclient(target=(host["ip_address"], host["port"]), username=host["username"], password=host["password"], insecure=True) as gc: result = gc.get(path=paths, encoding='json') print(f"{host["ip_address"]}: {result}\n\n")
#!/usr/bin/env python # Modules from pygnmi.client import gNMIclient # Variables from inventory import hosts # Body if __name__ == "__main__": paths = ['openconfig-interfaces:interfaces', 'openconfig-network-instance:network-instances'] for host in hosts: with gNMIclient(target=(host["ip_address"], host["port"]), username=host["username"], password=host["password"], insecure=True) as gc: result = gc.get(path=paths, encoding='json') print(f"{host['ip_address']}: {result}\n\n")
from discord_webhook import DiscordWebhook import mysql.connector import sys import argparse import datetime import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from dateutil.parser import parse parser = argparse.ArgumentParser(description='Take in secrets for this script.') parser.add_argument('--channelid', help='a channel id for the accouncements') parser.add_argument('--dbpassword', help='password for the db') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find the max)') args = parser.parse_args() SCOPES = ['https://www.googleapis.com/auth/calendar.readonly'] # Set up discord uri = "https://discordapp.com/api/webhooks/{}".format(args.channelid) # connect to the db try: print("Connecting to database.") conn = mysql.connector.connect( user="app0005", password=args.dbpassword, host="192.168.1.101", port=3306, database="csg_automations" ) except mysql.connector.Error as e: print(f"Error connecting to MariaDB Platform: {e}") sys.exit(1) print("Successful connection to database.") cursor = conn.cursor() creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) try: service = build('calendar', 'v3', credentials=creds) except: e = sys.exc_info()[0] print(e) exit(1) # Call the Calendar API now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time print('fetching the closest 10 events') events_result = service.events().list(calendarId='iu.edu_ghkkngbm6i2qadsla4ktnpgi50@group.calendar.google.com', timeMin=now, maxResults=10, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: print('No upcoming events found.') for event in events: eventid = event['id'] print(f"Processing {eventid}:"{event["summary"]}'") now = datetime.datetime.now() # Some events are deadlines which we put in as events that last all day. # These events will be logged sparely in the db and have a different schedule # Deadlines alert at noon on the following schedule: 1 week before, 3 days before, and 1 day before # If event['start'] is of date assume it is a deadline if list(event['start'].keys())[0] == 'date': deadlineTime = parse(event['start']['date']) # if deadline exists in db get details otherwise create it cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() if data is None: cursor.execute( f"INSERT INTO `csg_automations`.`eventNotification` (`eventId`,`type`) VALUES ('{eventid}', 'deadline' )") conn.commit() # Get the data that we just entered cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() # See if the time is within epsilon deadline1w = (deadlineTime - datetime.timedelta(days=7)).replace(hour=12) deadline3d = (deadlineTime - datetime.timedelta(days=3)).replace(hour=12) deadline1d = (deadlineTime - datetime.timedelta(days=1)).replace(hour=12) # deadline1d = (parse(event['end']['date'])).replace(hour=12) # 1 week alert if data[2] == 0 and (now.day == deadline1w.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneWeek` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for " f"'{event["summary"]}' in 1 week.\n" f"Please do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event["description"]}") response = webhook.execute() print(response) # Send an email here if you want # 3 day alert if data[3] == 0 and (now.day == deadline3d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `threeDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for" f" '{event["summary"]}' in 3 days. \n" f"Do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event["description"]}") response = webhook.execute() print(response) # 1 day alert if data[4] == 0 and (now.day == deadline1d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for" f" '{event["summary"]}' tomorrow.\n" f"Do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event["description"]}") response = webhook.execute() print(response) else: # Events alert on the following schedule: 3 days before, 1 day before, 30 minutes before eventTime = parse(event['start']['dateTime']) event3d = (eventTime - datetime.timedelta(days=3)).replace(tzinfo=None) event1d = (eventTime - datetime.timedelta(days=1)).replace(tzinfo=None) event30min = (eventTime - datetime.timedelta(minutes=30)).replace(tzinfo=None) envTime = eventTime.strftime("%H:%M:%S") # if event exists in db get details otherwise create it cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() if data is None: cursor.execute( f"INSERT INTO `csg_automations`.`eventNotification` (`eventId`,`type`) VALUES ('{eventid}', 'event' )") conn.commit() # Get the data that we just entered cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() # send an alert for 3 days before if data[2] == 0 and (now.day == event3d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `threeday` = 1 WHERE `eventId` = {eventid}") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n CSG will be hosting " f"'{event["summary"]}' in 3 days" f" on the {eventTime.day} at {envTime}.\n" f"We hope to see everyone there!\n" f"------------------------------\n" f"Details: \n\n {event["description"]}") response = webhook.execute() print(response) # send an alert for 1 day before if data[4] == 0 and (now.day == event1d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n CSG will be hosting '{event["summary"]}'" f" tomorrow, the {eventTime.day} at {envTime}.\n " f"We hope to see everyone there!\n" f"------------------------------\n" f"Details: \n\n {event["description"]}") response = webhook.execute() print(response) # send an alert for 30 minutes before if data[5] == 0 and (-1 < ((now-event30min).total_seconds())/60 < 30): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `thirtyMinutes` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone\n Hey reminder bot here,\n CSG will be hosting " f"'{event["summary"]}' in 30 minutes!\n" f"Don't forget to join in!\n" f"------------------------------\n" f"Details: \n\n{event["description"]}") response = webhook.execute() print(response)
from discord_webhook import DiscordWebhook import mysql.connector import sys import argparse import datetime import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from dateutil.parser import parse parser = argparse.ArgumentParser(description='Take in secrets for this script.') parser.add_argument('--channelid', help='a channel id for the accouncements') parser.add_argument('--dbpassword', help='password for the db') # parser.add_argument('--sum', dest='accumulate', action='store_const', # const=sum, default=max, # help='sum the integers (default: find the max)') args = parser.parse_args() SCOPES = ['https://www.googleapis.com/auth/calendar.readonly'] # Set up discord uri = "https://discordapp.com/api/webhooks/{}".format(args.channelid) # connect to the db try: print("Connecting to database.") conn = mysql.connector.connect( user="app0005", password=args.dbpassword, host="192.168.1.101", port=3306, database="csg_automations" ) except mysql.connector.Error as e: print(f"Error connecting to MariaDB Platform: {e}") sys.exit(1) print("Successful connection to database.") cursor = conn.cursor() creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) try: service = build('calendar', 'v3', credentials=creds) except: e = sys.exc_info()[0] print(e) exit(1) # Call the Calendar API now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time print('fetching the closest 10 events') events_result = service.events().list(calendarId='iu.edu_ghkkngbm6i2qadsla4ktnpgi50@group.calendar.google.com', timeMin=now, maxResults=10, singleEvents=True, orderBy='startTime').execute() events = events_result.get('items', []) if not events: print('No upcoming events found.') for event in events: eventid = event['id'] print(f"Processing {eventid}:'{event['summary']}'") now = datetime.datetime.now() # Some events are deadlines which we put in as events that last all day. # These events will be logged sparely in the db and have a different schedule # Deadlines alert at noon on the following schedule: 1 week before, 3 days before, and 1 day before # If event['start'] is of date assume it is a deadline if list(event['start'].keys())[0] == 'date': deadlineTime = parse(event['start']['date']) # if deadline exists in db get details otherwise create it cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() if data is None: cursor.execute( f"INSERT INTO `csg_automations`.`eventNotification` (`eventId`,`type`) VALUES ('{eventid}', 'deadline' )") conn.commit() # Get the data that we just entered cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() # See if the time is within epsilon deadline1w = (deadlineTime - datetime.timedelta(days=7)).replace(hour=12) deadline3d = (deadlineTime - datetime.timedelta(days=3)).replace(hour=12) deadline1d = (deadlineTime - datetime.timedelta(days=1)).replace(hour=12) # deadline1d = (parse(event['end']['date'])).replace(hour=12) # 1 week alert if data[2] == 0 and (now.day == deadline1w.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneWeek` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for " f"'{event['summary']}' in 1 week.\n" f"Please do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event['description']}") response = webhook.execute() print(response) # Send an email here if you want # 3 day alert if data[3] == 0 and (now.day == deadline3d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `threeDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for" f" '{event['summary']}' in 3 days. \n" f"Do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event['description']}") response = webhook.execute() print(response) # 1 day alert if data[4] == 0 and (now.day == deadline1d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n The CSG has a deadline for" f" '{event['summary']}' tomorrow.\n" f"Do not forget to sign up if you are interested. \n" f"------------------------------\n" f"Details: \n\n {event['description']}") response = webhook.execute() print(response) else: # Events alert on the following schedule: 3 days before, 1 day before, 30 minutes before eventTime = parse(event['start']['dateTime']) event3d = (eventTime - datetime.timedelta(days=3)).replace(tzinfo=None) event1d = (eventTime - datetime.timedelta(days=1)).replace(tzinfo=None) event30min = (eventTime - datetime.timedelta(minutes=30)).replace(tzinfo=None) envTime = eventTime.strftime("%H:%M:%S") # if event exists in db get details otherwise create it cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() if data is None: cursor.execute( f"INSERT INTO `csg_automations`.`eventNotification` (`eventId`,`type`) VALUES ('{eventid}', 'event' )") conn.commit() # Get the data that we just entered cursor.execute(f"SELECT * FROM csg_automations.eventNotification where eventID = '{eventid}'") data = cursor.fetchone() # send an alert for 3 days before if data[2] == 0 and (now.day == event3d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `threeday` = 1 WHERE `eventId` = {eventid}") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n CSG will be hosting " f"'{event['summary']}' in 3 days" f" on the {eventTime.day} at {envTime}.\n" f"We hope to see everyone there!\n" f"------------------------------\n" f"Details: \n\n {event['description']}") response = webhook.execute() print(response) # send an alert for 1 day before if data[4] == 0 and (now.day == event1d.day): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `oneDay` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone Hey reminder bot here,\n CSG will be hosting '{event['summary']}'" f" tomorrow, the {eventTime.day} at {envTime}.\n " f"We hope to see everyone there!\n" f"------------------------------\n" f"Details: \n\n {event['description']}") response = webhook.execute() print(response) # send an alert for 30 minutes before if data[5] == 0 and (-1 < ((now-event30min).total_seconds())/60 < 30): # update db cursor.execute( f"UPDATE `csg_automations`.`eventNotification` SET `thirtyMinutes` = 1 WHERE `eventId` = '{eventid}'") conn.commit() # send alert webhook = DiscordWebhook(url=uri, content=f"@everyone\n Hey reminder bot here,\n CSG will be hosting " f"'{event['summary']}' in 30 minutes!\n" f"Don't forget to join in!\n" f"------------------------------\n" f"Details: \n\n{event['description']}") response = webhook.execute() print(response)
"""Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”, em que posição ela aparece a primeira vez e em que posição ela aparece a última vez""" frase = input('Digite algo: ').strip().upper() print(f'\nA letras A aparece {frase.count('A')} vezes') print(f'´A´ aparece a primeira vez na posição {frase.find('A')+1}') print(f'´A´ aparece a última vez na posição {frase.rfind('A')+1}')
"""Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”, em que posição ela aparece a primeira vez e em que posição ela aparece a última vez""" frase = input('Digite algo: ').strip().upper() print(f'\nA letras A aparece {frase.count("A")} vezes') print(f'´A´ aparece a primeira vez na posição {frase.find("A")+1}') print(f'´A´ aparece a última vez na posição {frase.rfind("A")+1}')
import argparse import datetime import os import pprint import numpy as np import torch from atari_network import DQN from atari_wrapper import make_atari_env from torch.utils.tensorboard import SummaryWriter from tianshou.data import Collector, VectorReplayBuffer from tianshou.policy import DQNPolicy from tianshou.policy.modelbased.icm import ICMPolicy from tianshou.trainer import offpolicy_trainer from tianshou.utils import TensorboardLogger, WandbLogger from tianshou.utils.net.discrete import IntrinsicCuriosityModule def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--task", type=str, default="PongNoFrameskip-v4") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--scale-obs", type=int, default=0) parser.add_argument("--eps-test", type=float, default=0.005) parser.add_argument("--eps-train", type=float, default=1.) parser.add_argument("--eps-train-final", type=float, default=0.05) parser.add_argument("--buffer-size", type=int, default=100000) parser.add_argument("--lr", type=float, default=0.0001) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--n-step", type=int, default=3) parser.add_argument("--target-update-freq", type=int, default=500) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=100000) parser.add_argument("--step-per-collect", type=int, default=10) parser.add_argument("--update-per-step", type=float, default=0.1) parser.add_argument("--batch-size", type=int, default=32) parser.add_argument("--training-num", type=int, default=10) parser.add_argument("--test-num", type=int, default=10) parser.add_argument("--logdir", type=str, default="log") parser.add_argument("--render", type=float, default=0.) parser.add_argument( "--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu" ) parser.add_argument("--frames-stack", type=int, default=4) parser.add_argument("--resume-path", type=str, default=None) parser.add_argument("--resume-id", type=str, default=None) parser.add_argument( "--logger", type=str, default="tensorboard", choices=["tensorboard", "wandb"], ) parser.add_argument("--wandb-project", type=str, default="atari.benchmark") parser.add_argument( "--watch", default=False, action="store_true", help="watch the play of pre-trained policy only" ) parser.add_argument("--save-buffer-name", type=str, default=None) parser.add_argument( "--icm-lr-scale", type=float, default=0., help="use intrinsic curiosity module with this lr scale" ) parser.add_argument( "--icm-reward-scale", type=float, default=0.01, help="scaling factor for intrinsic curiosity reward" ) parser.add_argument( "--icm-forward-loss-weight", type=float, default=0.2, help="weight for the forward model loss in ICM" ) return parser.parse_args() def test_dqn(args=get_args()): env, train_envs, test_envs = make_atari_env( args.task, args.seed, args.training_num, args.test_num, scale=args.scale_obs, frame_stack=args.frames_stack, ) args.state_shape = env.observation_space.shape or env.observation_space.n args.action_shape = env.action_space.shape or env.action_space.n # should be N_FRAMES x H x W print("Observations shape:", args.state_shape) print("Actions shape:", args.action_shape) # seed np.random.seed(args.seed) torch.manual_seed(args.seed) # define model net = DQN(*args.state_shape, args.action_shape, args.device).to(args.device) optim = torch.optim.Adam(net.parameters(), lr=args.lr) # define policy policy = DQNPolicy( net, optim, args.gamma, args.n_step, target_update_freq=args.target_update_freq ) if args.icm_lr_scale > 0: feature_net = DQN( *args.state_shape, args.action_shape, args.device, features_only=True ) action_dim = np.prod(args.action_shape) feature_dim = feature_net.output_dim icm_net = IntrinsicCuriosityModule( feature_net.net, feature_dim, action_dim, hidden_sizes=[512], device=args.device ) icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr) policy = ICMPolicy( policy, icm_net, icm_optim, args.icm_lr_scale, args.icm_reward_scale, args.icm_forward_loss_weight ).to(args.device) # load a previous policy if args.resume_path: policy.load_state_dict(torch.load(args.resume_path, map_location=args.device)) print("Loaded agent from: ", args.resume_path) # replay buffer: `save_last_obs` and `stack_num` can be removed together # when you have enough RAM buffer = VectorReplayBuffer( args.buffer_size, buffer_num=len(train_envs), ignore_obs_next=True, save_only_last_obs=True, stack_num=args.frames_stack ) # collector train_collector = Collector(policy, train_envs, buffer, exploration_noise=True) test_collector = Collector(policy, test_envs, exploration_noise=True) # log now = datetime.datetime.now().strftime("%y%m%d-%H%M%S") args.algo_name = "dqn_icm" if args.icm_lr_scale > 0 else "dqn" log_name = os.path.join(args.task, args.algo_name, str(args.seed), now) log_path = os.path.join(args.logdir, log_name) # logger if args.logger == "wandb": logger = WandbLogger( save_interval=1, name=log_name.replace(os.path.sep, "__"), run_id=args.resume_id, config=args, project=args.wandb_project, ) writer = SummaryWriter(log_path) writer.add_text("args", str(args)) if args.logger == "tensorboard": logger = TensorboardLogger(writer) else: # wandb logger.load(writer) def save_best_fn(policy): torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth")) def stop_fn(mean_rewards): if env.spec.reward_threshold: return mean_rewards >= env.spec.reward_threshold elif "Pong" in args.task: return mean_rewards >= 20 else: return False def train_fn(epoch, env_step): # nature DQN setting, linear decay in the first 1M steps if env_step <= 1e6: eps = args.eps_train - env_step / 1e6 * \ (args.eps_train - args.eps_train_final) else: eps = args.eps_train_final policy.set_eps(eps) if env_step % 1000 == 0: logger.write("train/env_step", env_step, {"train/eps": eps}) def test_fn(epoch, env_step): policy.set_eps(args.eps_test) def save_checkpoint_fn(epoch, env_step, gradient_step): # see also: https://pytorch.org/tutorials/beginner/saving_loading_models.html ckpt_path = os.path.join(log_path, "checkpoint.pth") torch.save({"model": policy.state_dict()}, ckpt_path) return ckpt_path # watch agent's performance def watch(): print("Setup test envs ...") policy.eval() policy.set_eps(args.eps_test) test_envs.seed(args.seed) if args.save_buffer_name: print(f"Generate buffer with size {args.buffer_size}") buffer = VectorReplayBuffer( args.buffer_size, buffer_num=len(test_envs), ignore_obs_next=True, save_only_last_obs=True, stack_num=args.frames_stack ) collector = Collector(policy, test_envs, buffer, exploration_noise=True) result = collector.collect(n_step=args.buffer_size) print(f"Save buffer into {args.save_buffer_name}") # Unfortunately, pickle will cause oom with 1M buffer size buffer.save_hdf5(args.save_buffer_name) else: print("Testing agent ...") test_collector.reset() result = test_collector.collect( n_episode=args.test_num, render=args.render ) rew = result["rews"].mean() print(f"Mean reward (over {result["n/ep"]} episodes): {rew}") if args.watch: watch() exit(0) # test train_collector and start filling replay buffer train_collector.collect(n_step=args.batch_size * args.training_num) # trainer result = offpolicy_trainer( policy, train_collector, test_collector, args.epoch, args.step_per_epoch, args.step_per_collect, args.test_num, args.batch_size, train_fn=train_fn, test_fn=test_fn, stop_fn=stop_fn, save_best_fn=save_best_fn, logger=logger, update_per_step=args.update_per_step, test_in_train=False, resume_from_log=args.resume_id is not None, save_checkpoint_fn=save_checkpoint_fn, ) pprint.pprint(result) watch() if __name__ == "__main__": test_dqn(get_args())
import argparse import datetime import os import pprint import numpy as np import torch from atari_network import DQN from atari_wrapper import make_atari_env from torch.utils.tensorboard import SummaryWriter from tianshou.data import Collector, VectorReplayBuffer from tianshou.policy import DQNPolicy from tianshou.policy.modelbased.icm import ICMPolicy from tianshou.trainer import offpolicy_trainer from tianshou.utils import TensorboardLogger, WandbLogger from tianshou.utils.net.discrete import IntrinsicCuriosityModule def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--task", type=str, default="PongNoFrameskip-v4") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--scale-obs", type=int, default=0) parser.add_argument("--eps-test", type=float, default=0.005) parser.add_argument("--eps-train", type=float, default=1.) parser.add_argument("--eps-train-final", type=float, default=0.05) parser.add_argument("--buffer-size", type=int, default=100000) parser.add_argument("--lr", type=float, default=0.0001) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--n-step", type=int, default=3) parser.add_argument("--target-update-freq", type=int, default=500) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=100000) parser.add_argument("--step-per-collect", type=int, default=10) parser.add_argument("--update-per-step", type=float, default=0.1) parser.add_argument("--batch-size", type=int, default=32) parser.add_argument("--training-num", type=int, default=10) parser.add_argument("--test-num", type=int, default=10) parser.add_argument("--logdir", type=str, default="log") parser.add_argument("--render", type=float, default=0.) parser.add_argument( "--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu" ) parser.add_argument("--frames-stack", type=int, default=4) parser.add_argument("--resume-path", type=str, default=None) parser.add_argument("--resume-id", type=str, default=None) parser.add_argument( "--logger", type=str, default="tensorboard", choices=["tensorboard", "wandb"], ) parser.add_argument("--wandb-project", type=str, default="atari.benchmark") parser.add_argument( "--watch", default=False, action="store_true", help="watch the play of pre-trained policy only" ) parser.add_argument("--save-buffer-name", type=str, default=None) parser.add_argument( "--icm-lr-scale", type=float, default=0., help="use intrinsic curiosity module with this lr scale" ) parser.add_argument( "--icm-reward-scale", type=float, default=0.01, help="scaling factor for intrinsic curiosity reward" ) parser.add_argument( "--icm-forward-loss-weight", type=float, default=0.2, help="weight for the forward model loss in ICM" ) return parser.parse_args() def test_dqn(args=get_args()): env, train_envs, test_envs = make_atari_env( args.task, args.seed, args.training_num, args.test_num, scale=args.scale_obs, frame_stack=args.frames_stack, ) args.state_shape = env.observation_space.shape or env.observation_space.n args.action_shape = env.action_space.shape or env.action_space.n # should be N_FRAMES x H x W print("Observations shape:", args.state_shape) print("Actions shape:", args.action_shape) # seed np.random.seed(args.seed) torch.manual_seed(args.seed) # define model net = DQN(*args.state_shape, args.action_shape, args.device).to(args.device) optim = torch.optim.Adam(net.parameters(), lr=args.lr) # define policy policy = DQNPolicy( net, optim, args.gamma, args.n_step, target_update_freq=args.target_update_freq ) if args.icm_lr_scale > 0: feature_net = DQN( *args.state_shape, args.action_shape, args.device, features_only=True ) action_dim = np.prod(args.action_shape) feature_dim = feature_net.output_dim icm_net = IntrinsicCuriosityModule( feature_net.net, feature_dim, action_dim, hidden_sizes=[512], device=args.device ) icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr) policy = ICMPolicy( policy, icm_net, icm_optim, args.icm_lr_scale, args.icm_reward_scale, args.icm_forward_loss_weight ).to(args.device) # load a previous policy if args.resume_path: policy.load_state_dict(torch.load(args.resume_path, map_location=args.device)) print("Loaded agent from: ", args.resume_path) # replay buffer: `save_last_obs` and `stack_num` can be removed together # when you have enough RAM buffer = VectorReplayBuffer( args.buffer_size, buffer_num=len(train_envs), ignore_obs_next=True, save_only_last_obs=True, stack_num=args.frames_stack ) # collector train_collector = Collector(policy, train_envs, buffer, exploration_noise=True) test_collector = Collector(policy, test_envs, exploration_noise=True) # log now = datetime.datetime.now().strftime("%y%m%d-%H%M%S") args.algo_name = "dqn_icm" if args.icm_lr_scale > 0 else "dqn" log_name = os.path.join(args.task, args.algo_name, str(args.seed), now) log_path = os.path.join(args.logdir, log_name) # logger if args.logger == "wandb": logger = WandbLogger( save_interval=1, name=log_name.replace(os.path.sep, "__"), run_id=args.resume_id, config=args, project=args.wandb_project, ) writer = SummaryWriter(log_path) writer.add_text("args", str(args)) if args.logger == "tensorboard": logger = TensorboardLogger(writer) else: # wandb logger.load(writer) def save_best_fn(policy): torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth")) def stop_fn(mean_rewards): if env.spec.reward_threshold: return mean_rewards >= env.spec.reward_threshold elif "Pong" in args.task: return mean_rewards >= 20 else: return False def train_fn(epoch, env_step): # nature DQN setting, linear decay in the first 1M steps if env_step <= 1e6: eps = args.eps_train - env_step / 1e6 * \ (args.eps_train - args.eps_train_final) else: eps = args.eps_train_final policy.set_eps(eps) if env_step % 1000 == 0: logger.write("train/env_step", env_step, {"train/eps": eps}) def test_fn(epoch, env_step): policy.set_eps(args.eps_test) def save_checkpoint_fn(epoch, env_step, gradient_step): # see also: https://pytorch.org/tutorials/beginner/saving_loading_models.html ckpt_path = os.path.join(log_path, "checkpoint.pth") torch.save({"model": policy.state_dict()}, ckpt_path) return ckpt_path # watch agent's performance def watch(): print("Setup test envs ...") policy.eval() policy.set_eps(args.eps_test) test_envs.seed(args.seed) if args.save_buffer_name: print(f"Generate buffer with size {args.buffer_size}") buffer = VectorReplayBuffer( args.buffer_size, buffer_num=len(test_envs), ignore_obs_next=True, save_only_last_obs=True, stack_num=args.frames_stack ) collector = Collector(policy, test_envs, buffer, exploration_noise=True) result = collector.collect(n_step=args.buffer_size) print(f"Save buffer into {args.save_buffer_name}") # Unfortunately, pickle will cause oom with 1M buffer size buffer.save_hdf5(args.save_buffer_name) else: print("Testing agent ...") test_collector.reset() result = test_collector.collect( n_episode=args.test_num, render=args.render ) rew = result["rews"].mean() print(f"Mean reward (over {result['n/ep']} episodes): {rew}") if args.watch: watch() exit(0) # test train_collector and start filling replay buffer train_collector.collect(n_step=args.batch_size * args.training_num) # trainer result = offpolicy_trainer( policy, train_collector, test_collector, args.epoch, args.step_per_epoch, args.step_per_collect, args.test_num, args.batch_size, train_fn=train_fn, test_fn=test_fn, stop_fn=stop_fn, save_best_fn=save_best_fn, logger=logger, update_per_step=args.update_per_step, test_in_train=False, resume_from_log=args.resume_id is not None, save_checkpoint_fn=save_checkpoint_fn, ) pprint.pprint(result) watch() if __name__ == "__main__": test_dqn(get_args())
import logging import os from typing import Any, Dict from src.helpers.logging import add_logging from src.helpers.types import AnyEntity @add_logging(level=logging.DEBUG) def parse_entity(text: str, entity_info: AnyEntity, logger: logging.Logger) -> str: """Extracts the entity substring from the given text, based on the provided entity information. Parameters ---------- text : str The raw text to extract an entity from. entity_info : AnyEntity Entity metadata as provided by Telegram API. Returns ------- str Extracted entity string (or empty string if the provided metadata is flawed). """ length = entity_info["length"] offset = entity_info["offset"] entity = text[offset : offset + length] logger.debug(f"Parsed {entity} from {text} (length={length}, offset={offset}).") return entity def is_command(command: str, ref_command: str) -> bool: """ Checks whether the given command matches the reference command. Handles the case where the command is suffixed with the bot handle. Parameters ---------- command : str Command to check. ref_command : str Reference command to compare to. Returns ------- bool True if the command matches the reference command. """ return command in ( ref_command, f"{ref_command}@{os.environ["TELEGRAM_BOT_HANDLE"]}", )
import logging import os from typing import Any, Dict from src.helpers.logging import add_logging from src.helpers.types import AnyEntity @add_logging(level=logging.DEBUG) def parse_entity(text: str, entity_info: AnyEntity, logger: logging.Logger) -> str: """Extracts the entity substring from the given text, based on the provided entity information. Parameters ---------- text : str The raw text to extract an entity from. entity_info : AnyEntity Entity metadata as provided by Telegram API. Returns ------- str Extracted entity string (or empty string if the provided metadata is flawed). """ length = entity_info["length"] offset = entity_info["offset"] entity = text[offset : offset + length] logger.debug(f"Parsed {entity} from {text} (length={length}, offset={offset}).") return entity def is_command(command: str, ref_command: str) -> bool: """ Checks whether the given command matches the reference command. Handles the case where the command is suffixed with the bot handle. Parameters ---------- command : str Command to check. ref_command : str Reference command to compare to. Returns ------- bool True if the command matches the reference command. """ return command in ( ref_command, f"{ref_command}@{os.environ['TELEGRAM_BOT_HANDLE']}", )
import copy import logging import re from markupsafe import escape from galaxy import model, util from galaxy.web.framework.helpers import grids, iff, time_ago from galaxy.webapps.base.controller import BaseUIController, web log = logging.getLogger(__name__) VALID_FIELDNAME_RE = re.compile(r"^[a-zA-Z0-9\_]+$") class FormsGrid(grids.Grid): # Custom column types class NameColumn(grids.TextColumn): def get_value(self, trans, grid, form): return escape(form.latest_form.name) class DescriptionColumn(grids.TextColumn): def get_value(self, trans, grid, form): return escape(form.latest_form.desc) class TypeColumn(grids.TextColumn): def get_value(self, trans, grid, form): return form.latest_form.type class StatusColumn(grids.GridColumn): def get_value(self, trans, grid, user): if user.deleted: return "deleted" return "active" # Grid definition title = "Forms" model_class = model.FormDefinitionCurrent default_sort_key = "-update_time" num_rows_per_page = 50 use_paging = True default_filter = dict(deleted="False") columns = [ NameColumn("Name", key="name", model_class=model.FormDefinition, link=(lambda item: iff(item.deleted, None, dict(controller="admin", action="form/edit_form", id=item.id))), attach_popup=True, filterable="advanced"), DescriptionColumn("Description", key="desc", model_class=model.FormDefinition, filterable="advanced"), TypeColumn("Type"), grids.GridColumn("Last Updated", key="update_time", format=time_ago), StatusColumn("Status"), grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced") ] columns.append(grids.MulticolFilterColumn("Search", cols_to_filter=[columns[0], columns[1]], key="free-text-search", visible=False, filterable="standard")) operations = [ grids.GridOperation("Delete", allow_multiple=True, condition=(lambda item: not item.deleted)), grids.GridOperation("Undelete", condition=(lambda item: item.deleted)), ] global_actions = [ grids.GridAction("Create new form", dict(controller="admin", action="form/create_form")) ] def build_initial_query(self, trans, **kwargs): return trans.sa_session.query(self.model_class).join(model.FormDefinition, self.model_class.latest_form_id == model.FormDefinition.id) class Forms(BaseUIController): forms_grid = FormsGrid() @web.legacy_expose_api @web.require_admin def forms_list(self, trans, payload=None, **kwd): message = kwd.get('message', '') status = kwd.get('status', '') if 'operation' in kwd: id = kwd.get('id') if not id: return self.message_exception(trans, f'Invalid form id ({str(id)}) received.') ids = util.listify(id) operation = kwd['operation'].lower() if operation == 'delete': message, status = self._delete_form(trans, ids) elif operation == 'undelete': message, status = self._undelete_form(trans, ids) if message and status: kwd['message'] = util.sanitize_text(message) kwd['status'] = status return self.forms_grid(trans, **kwd) @web.legacy_expose_api @web.require_admin def create_form(self, trans, payload=None, **kwd): if trans.request.method == 'GET': fd_types = sorted(trans.app.model.FormDefinition.types.__members__.items()) return { 'title': 'Create new form', 'submit_title': 'Create', 'inputs': [{ 'name': 'name', 'label': 'Name' }, { 'name': 'desc', 'label': 'Description' }, { 'name': 'type', 'type': 'select', 'options': [('None', 'none')] + [(ft[1], ft[1]) for ft in fd_types], 'label': 'Type' }, { 'name': 'csv_file', 'label': 'Import from CSV', 'type': 'upload', 'help': 'Import fields from CSV-file with the following format: Label, Help, Type, Value, Options, Required=True/False.' }] } else: # csv-file format: label, helptext, type, default, selectlist, required ''' csv_file = payload.get('csv_file') index = 0 if csv_file: lines = csv_file.splitlines() for line in lines: row = line.split(',') if len(row) >= 6: prefix = 'fields_%i|' % index payload[f"{prefix}name"] = '%i_imported_field' % (index + 1) payload[f"{prefix}label"] = row[0] payload[f"{prefix}helptext"] = row[1] payload[f"{prefix}type"] = row[2] payload[f"{prefix}default"] = row[3] payload[f"{prefix}selectlist"] = row[4].split(',') payload[f"{prefix}required"] = row[5].lower() == 'true' index = index + 1 new_form, message = self.save_form_definition(trans, None, payload) if new_form is None: return self.message_exception(trans, message) imported = (' with %i imported fields' % index) if index > 0 else '' message = f"The form '{payload.get("name")}' has been created{imported}." return {'message': util.sanitize_text(message)} @web.legacy_expose_api @web.require_admin def edit_form(self, trans, payload=None, **kwd): id = kwd.get('id') if not id: return self.message_exception(trans, 'No form id received for editing.') form = get_form(trans, id) latest_form = form.latest_form if trans.request.method == 'GET': fd_types = sorted(trans.app.model.FormDefinition.types.__members__.items()) ff_types = [(t.__name__.replace('Field', ''), t.__name__) for t in trans.model.FormDefinition.supported_field_types] field_cache = [] field_inputs = [{ 'name': 'name', 'label': 'Name', 'value': 'field_name', 'help': 'The field name must be unique for each field and must contain only alphanumeric characters and underscore.' }, { 'name': 'label', 'label': 'Label', 'value': 'Field label' }, { 'name': 'helptext', 'label': 'Help text' }, { 'name': 'type', 'label': 'Type', 'type': 'select', 'options': ff_types }, { 'name': 'default', 'label': 'Default value' }, { 'name': 'selectlist', 'label': 'Options', 'help': '*Only for fields which allow multiple selections, provide comma-separated values.' }, { 'name': 'required', 'label': 'Required', 'type': 'boolean', 'value': 'false' }] form_dict = { 'title': 'Edit form for \'%s\'' % (util.sanitize_text(latest_form.name)), 'inputs': [{ 'name': 'name', 'label': 'Name', 'value': latest_form.name }, { 'name': 'desc', 'label': 'Description', 'value': latest_form.desc }, { 'name': 'type', 'type': 'select', 'options': [('None', 'none')] + [(ft[1], ft[1]) for ft in fd_types], 'label': 'Type', 'value': latest_form.type }, { 'name': 'fields', 'title': 'Field', 'type': 'repeat', 'cache': field_cache, 'inputs': field_inputs }] } for field in latest_form.fields: new_field = copy.deepcopy(field_inputs) for field_input in new_field: field_value = field.get(field_input['name']) if field_value: if isinstance(field_value, list): field_value = ','.join(field_value) field_input['value'] = str(field_value) field_cache.append(new_field) return form_dict else: new_form, message = self.save_form_definition(trans, id, payload) if new_form is None: return self.message_exception(trans, message) message = f"The form '{payload.get("name")}' has been updated." return {'message': util.sanitize_text(message)} def get_current_form(self, trans, payload=None, **kwd): ''' This method gets all the unsaved user-entered form details and returns a dictionary containing the name, desc, type, layout & fields of the form ''' name = payload.get('name') desc = payload.get('desc') or '' type = payload.get('type') fields = [] index = 0 while True: prefix = 'fields_%i|' % index if f"{prefix}label" in payload: field_attributes = ['name', 'label', 'helptext', 'required', 'type', 'selectlist', 'default'] field_dict = {attr: payload.get(f'{prefix}{attr}') for attr in field_attributes} field_dict['visible'] = True field_dict['required'] = field_dict['required'] == 'true' if isinstance(field_dict['selectlist'], str): field_dict['selectlist'] = field_dict['selectlist'].split(',') else: field_dict['selectlist'] = [] fields.append(field_dict) index = index + 1 else: break return dict(name=name, desc=desc, type=type, layout=[], fields=fields) def save_form_definition(self, trans, form_id=None, payload=None, **kwd): ''' This method saves a form given an id ''' if not payload.get('name'): return None, 'Please provide a form name.' if payload.get('type') == 'none': return None, 'Please select a form type.' current_form = self.get_current_form(trans, payload) # validate fields field_names_dict = {} for field in current_form['fields']: if not field['label']: return None, 'All the field labels must be completed.' if not VALID_FIELDNAME_RE.match(field['name']): return None, f"{field["name"]} is not a valid field name." if field['name'] in field_names_dict: return None, f"Each field name must be unique in the form definition. {field["name"]} is not unique." else: field_names_dict[field['name']] = 1 # create a new form definition form_definition = trans.app.model.FormDefinition(name=current_form['name'], desc=current_form['desc'], fields=current_form['fields'], form_definition_current=None, form_type=current_form['type'], layout=current_form['layout']) # save changes to the existing form if form_id: form_definition_current = trans.sa_session.query(trans.app.model.FormDefinitionCurrent).get(trans.security.decode_id(form_id)) if form_definition_current is None: return None, f'Invalid form id ({form_id}) provided. Cannot save form.' else: form_definition_current = trans.app.model.FormDefinitionCurrent() # create corresponding row in the form_definition_current table form_definition.form_definition_current = form_definition_current form_definition_current.latest_form = form_definition trans.sa_session.add(form_definition_current) trans.sa_session.flush() return form_definition, None @web.expose @web.require_admin def _delete_form(self, trans, ids): for form_id in ids: form = get_form(trans, form_id) form.deleted = True trans.sa_session.add(form) trans.sa_session.flush() return ('Deleted %i form(s).' % len(ids), 'done') @web.expose @web.require_admin def _undelete_form(self, trans, ids): for form_id in ids: form = get_form(trans, form_id) form.deleted = False trans.sa_session.add(form) trans.sa_session.flush() return ('Undeleted %i form(s).' % len(ids), 'done') # ---- Utility methods ------------------------------------------------------- def get_form(trans, form_id): """Get a FormDefinition from the database by id.""" form = trans.sa_session.query(trans.app.model.FormDefinitionCurrent).get(trans.security.decode_id(form_id)) if not form: return trans.show_error_message(f"Form not found for id ({str(form_id)})") return form
import copy import logging import re from markupsafe import escape from galaxy import model, util from galaxy.web.framework.helpers import grids, iff, time_ago from galaxy.webapps.base.controller import BaseUIController, web log = logging.getLogger(__name__) VALID_FIELDNAME_RE = re.compile(r"^[a-zA-Z0-9\_]+$") class FormsGrid(grids.Grid): # Custom column types class NameColumn(grids.TextColumn): def get_value(self, trans, grid, form): return escape(form.latest_form.name) class DescriptionColumn(grids.TextColumn): def get_value(self, trans, grid, form): return escape(form.latest_form.desc) class TypeColumn(grids.TextColumn): def get_value(self, trans, grid, form): return form.latest_form.type class StatusColumn(grids.GridColumn): def get_value(self, trans, grid, user): if user.deleted: return "deleted" return "active" # Grid definition title = "Forms" model_class = model.FormDefinitionCurrent default_sort_key = "-update_time" num_rows_per_page = 50 use_paging = True default_filter = dict(deleted="False") columns = [ NameColumn("Name", key="name", model_class=model.FormDefinition, link=(lambda item: iff(item.deleted, None, dict(controller="admin", action="form/edit_form", id=item.id))), attach_popup=True, filterable="advanced"), DescriptionColumn("Description", key="desc", model_class=model.FormDefinition, filterable="advanced"), TypeColumn("Type"), grids.GridColumn("Last Updated", key="update_time", format=time_ago), StatusColumn("Status"), grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced") ] columns.append(grids.MulticolFilterColumn("Search", cols_to_filter=[columns[0], columns[1]], key="free-text-search", visible=False, filterable="standard")) operations = [ grids.GridOperation("Delete", allow_multiple=True, condition=(lambda item: not item.deleted)), grids.GridOperation("Undelete", condition=(lambda item: item.deleted)), ] global_actions = [ grids.GridAction("Create new form", dict(controller="admin", action="form/create_form")) ] def build_initial_query(self, trans, **kwargs): return trans.sa_session.query(self.model_class).join(model.FormDefinition, self.model_class.latest_form_id == model.FormDefinition.id) class Forms(BaseUIController): forms_grid = FormsGrid() @web.legacy_expose_api @web.require_admin def forms_list(self, trans, payload=None, **kwd): message = kwd.get('message', '') status = kwd.get('status', '') if 'operation' in kwd: id = kwd.get('id') if not id: return self.message_exception(trans, f'Invalid form id ({str(id)}) received.') ids = util.listify(id) operation = kwd['operation'].lower() if operation == 'delete': message, status = self._delete_form(trans, ids) elif operation == 'undelete': message, status = self._undelete_form(trans, ids) if message and status: kwd['message'] = util.sanitize_text(message) kwd['status'] = status return self.forms_grid(trans, **kwd) @web.legacy_expose_api @web.require_admin def create_form(self, trans, payload=None, **kwd): if trans.request.method == 'GET': fd_types = sorted(trans.app.model.FormDefinition.types.__members__.items()) return { 'title': 'Create new form', 'submit_title': 'Create', 'inputs': [{ 'name': 'name', 'label': 'Name' }, { 'name': 'desc', 'label': 'Description' }, { 'name': 'type', 'type': 'select', 'options': [('None', 'none')] + [(ft[1], ft[1]) for ft in fd_types], 'label': 'Type' }, { 'name': 'csv_file', 'label': 'Import from CSV', 'type': 'upload', 'help': 'Import fields from CSV-file with the following format: Label, Help, Type, Value, Options, Required=True/False.' }] } else: # csv-file format: label, helptext, type, default, selectlist, required ''' csv_file = payload.get('csv_file') index = 0 if csv_file: lines = csv_file.splitlines() for line in lines: row = line.split(',') if len(row) >= 6: prefix = 'fields_%i|' % index payload[f"{prefix}name"] = '%i_imported_field' % (index + 1) payload[f"{prefix}label"] = row[0] payload[f"{prefix}helptext"] = row[1] payload[f"{prefix}type"] = row[2] payload[f"{prefix}default"] = row[3] payload[f"{prefix}selectlist"] = row[4].split(',') payload[f"{prefix}required"] = row[5].lower() == 'true' index = index + 1 new_form, message = self.save_form_definition(trans, None, payload) if new_form is None: return self.message_exception(trans, message) imported = (' with %i imported fields' % index) if index > 0 else '' message = f"The form '{payload.get('name')}' has been created{imported}." return {'message': util.sanitize_text(message)} @web.legacy_expose_api @web.require_admin def edit_form(self, trans, payload=None, **kwd): id = kwd.get('id') if not id: return self.message_exception(trans, 'No form id received for editing.') form = get_form(trans, id) latest_form = form.latest_form if trans.request.method == 'GET': fd_types = sorted(trans.app.model.FormDefinition.types.__members__.items()) ff_types = [(t.__name__.replace('Field', ''), t.__name__) for t in trans.model.FormDefinition.supported_field_types] field_cache = [] field_inputs = [{ 'name': 'name', 'label': 'Name', 'value': 'field_name', 'help': 'The field name must be unique for each field and must contain only alphanumeric characters and underscore.' }, { 'name': 'label', 'label': 'Label', 'value': 'Field label' }, { 'name': 'helptext', 'label': 'Help text' }, { 'name': 'type', 'label': 'Type', 'type': 'select', 'options': ff_types }, { 'name': 'default', 'label': 'Default value' }, { 'name': 'selectlist', 'label': 'Options', 'help': '*Only for fields which allow multiple selections, provide comma-separated values.' }, { 'name': 'required', 'label': 'Required', 'type': 'boolean', 'value': 'false' }] form_dict = { 'title': 'Edit form for \'%s\'' % (util.sanitize_text(latest_form.name)), 'inputs': [{ 'name': 'name', 'label': 'Name', 'value': latest_form.name }, { 'name': 'desc', 'label': 'Description', 'value': latest_form.desc }, { 'name': 'type', 'type': 'select', 'options': [('None', 'none')] + [(ft[1], ft[1]) for ft in fd_types], 'label': 'Type', 'value': latest_form.type }, { 'name': 'fields', 'title': 'Field', 'type': 'repeat', 'cache': field_cache, 'inputs': field_inputs }] } for field in latest_form.fields: new_field = copy.deepcopy(field_inputs) for field_input in new_field: field_value = field.get(field_input['name']) if field_value: if isinstance(field_value, list): field_value = ','.join(field_value) field_input['value'] = str(field_value) field_cache.append(new_field) return form_dict else: new_form, message = self.save_form_definition(trans, id, payload) if new_form is None: return self.message_exception(trans, message) message = f"The form '{payload.get('name')}' has been updated." return {'message': util.sanitize_text(message)} def get_current_form(self, trans, payload=None, **kwd): ''' This method gets all the unsaved user-entered form details and returns a dictionary containing the name, desc, type, layout & fields of the form ''' name = payload.get('name') desc = payload.get('desc') or '' type = payload.get('type') fields = [] index = 0 while True: prefix = 'fields_%i|' % index if f"{prefix}label" in payload: field_attributes = ['name', 'label', 'helptext', 'required', 'type', 'selectlist', 'default'] field_dict = {attr: payload.get(f'{prefix}{attr}') for attr in field_attributes} field_dict['visible'] = True field_dict['required'] = field_dict['required'] == 'true' if isinstance(field_dict['selectlist'], str): field_dict['selectlist'] = field_dict['selectlist'].split(',') else: field_dict['selectlist'] = [] fields.append(field_dict) index = index + 1 else: break return dict(name=name, desc=desc, type=type, layout=[], fields=fields) def save_form_definition(self, trans, form_id=None, payload=None, **kwd): ''' This method saves a form given an id ''' if not payload.get('name'): return None, 'Please provide a form name.' if payload.get('type') == 'none': return None, 'Please select a form type.' current_form = self.get_current_form(trans, payload) # validate fields field_names_dict = {} for field in current_form['fields']: if not field['label']: return None, 'All the field labels must be completed.' if not VALID_FIELDNAME_RE.match(field['name']): return None, f"{field['name']} is not a valid field name." if field['name'] in field_names_dict: return None, f"Each field name must be unique in the form definition. {field['name']} is not unique." else: field_names_dict[field['name']] = 1 # create a new form definition form_definition = trans.app.model.FormDefinition(name=current_form['name'], desc=current_form['desc'], fields=current_form['fields'], form_definition_current=None, form_type=current_form['type'], layout=current_form['layout']) # save changes to the existing form if form_id: form_definition_current = trans.sa_session.query(trans.app.model.FormDefinitionCurrent).get(trans.security.decode_id(form_id)) if form_definition_current is None: return None, f'Invalid form id ({form_id}) provided. Cannot save form.' else: form_definition_current = trans.app.model.FormDefinitionCurrent() # create corresponding row in the form_definition_current table form_definition.form_definition_current = form_definition_current form_definition_current.latest_form = form_definition trans.sa_session.add(form_definition_current) trans.sa_session.flush() return form_definition, None @web.expose @web.require_admin def _delete_form(self, trans, ids): for form_id in ids: form = get_form(trans, form_id) form.deleted = True trans.sa_session.add(form) trans.sa_session.flush() return ('Deleted %i form(s).' % len(ids), 'done') @web.expose @web.require_admin def _undelete_form(self, trans, ids): for form_id in ids: form = get_form(trans, form_id) form.deleted = False trans.sa_session.add(form) trans.sa_session.flush() return ('Undeleted %i form(s).' % len(ids), 'done') # ---- Utility methods ------------------------------------------------------- def get_form(trans, form_id): """Get a FormDefinition from the database by id.""" form = trans.sa_session.query(trans.app.model.FormDefinitionCurrent).get(trans.security.decode_id(form_id)) if not form: return trans.show_error_message(f"Form not found for id ({str(form_id)})") return form
import copy from operator import itemgetter from typing import Tuple, Callable import demistomock as demisto # noqa: F401 import urllib3 from CommonServerPython import * # noqa: F401 # Disable insecure warnings urllib3.disable_warnings() TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" NONCE_LENGTH = 64 API_KEY_LENGTH = 128 INTEGRATION_CONTEXT_BRAND = 'Core' INTEGRATION_NAME = 'Cortex Core - IR' XSOAR_RESOLVED_STATUS_TO_Core = { 'Other': 'resolved_other', 'Duplicate': 'resolved_duplicate', 'False Positive': 'resolved_false_positive', 'Resolved': 'resolved_true_positive', } ALERT_GENERAL_FIELDS = { 'detection_modules', 'alert_full_description', 'matching_service_rule_id', 'variation_rule_id', 'content_version', 'detector_id', 'mitre_technique_id_and_name', 'silent', 'mitre_technique_ids', 'activity_first_seet_at', '_type', 'dst_association_strength', 'alert_description', } ALERT_EVENT_GENERAL_FIELDS = { "_time", "vendor", "event_timestamp", "event_type", "event_id", "cloud_provider", "project", "cloud_provider_event_id", "cloud_correlation_id", "operation_name_orig", "operation_name", "identity_orig", "identity_name", "identity_uuid", "identity_type", "identity_sub_type", "identity_invoked_by_name", "identity_invoked_by_uuid", "identity_invoked_by_type", "identity_invoked_by_sub_type", "operation_status", "operation_status_orig", "operation_status_orig_code", "operation_status_reason_provided", "resource_type", "resource_type_orig", "resource_sub_type", "resource_sub_type_orig", "region", "zone", "referenced_resource", "referenced_resource_name", "referenced_resources_count", "user_agent", "caller_ip", 'caller_ip_geolocation', "caller_ip_asn", 'caller_project', 'raw_log', "log_name", "caller_ip_asn_org", "event_base_id", "ingestion_time", } ALERT_EVENT_GENERAL_FIELDS = { "_time", "vendor", "event_timestamp", "event_type", "event_id", "cloud_provider", "project", "cloud_provider_event_id", "cloud_correlation_id", "operation_name_orig", "operation_name", "identity_orig", "identity_name", "identity_uuid", "identity_type", "identity_sub_type", "identity_invoked_by_name", "identity_invoked_by_uuid", "identity_invoked_by_type", "identity_invoked_by_sub_type", "operation_status", "operation_status_orig", "operation_status_orig_code", "operation_status_reason_provided", "resource_type", "resource_type_orig", "resource_sub_type", "resource_sub_type_orig", "region", "zone", "referenced_resource", "referenced_resource_name", "referenced_resources_count", "user_agent", "caller_ip", 'caller_ip_geolocation', "caller_ip_asn", 'caller_project', 'raw_log', "log_name", "caller_ip_asn_org", "event_base_id", "ingestion_time", } ALERT_EVENT_AWS_FIELDS = { "eventVersion", "userIdentity", "eventTime", "eventSource", "eventName", "awsRegion", "sourceIPAddress", "userAgent", "requestID", "eventID", "readOnly", "eventType", "apiVersion", "managementEvent", "recipientAccountId", "eventCategory", "errorCode", "errorMessage", "resources", } ALERT_EVENT_GCP_FIELDS = { "labels", "operation", "protoPayload", "resource", "severity", "timestamp", } ALERT_EVENT_AZURE_FIELDS = { "time", "resourceId", "category", "operationName", "operationVersion", "schemaVersion", "statusCode", "statusText", "callerIpAddress", "correlationId", "identity", "level", "properties", "uri", "protocol", "resourceType", "tenantId", } class Client(BaseClient): def __init__(self, base_url: str, headers: dict, timeout: int = 120, proxy: bool = False, verify: bool = False): self.timeout = timeout super().__init__(base_url=base_url, headers=headers, proxy=proxy, verify=verify) def test_module(self): """ Performs basic get request to get item samples """ try: self.get_incidents(limit=1) except Exception as err: if 'API request Unauthorized' in str(err): # this error is received from the Core server when the client clock is not in sync to the server raise DemistoException(f'{str(err)} please validate that your both ' f'XSOAR and Core server clocks are in sync') else: raise def get_incidents(self, incident_id_list=None, lte_modification_time=None, gte_modification_time=None, lte_creation_time=None, gte_creation_time=None, status=None, sort_by_modification_time=None, sort_by_creation_time=None, page_number=0, limit=100, gte_creation_time_milliseconds=0): """ Filters and returns incidents :param incident_id_list: List of incident ids - must be list :param lte_modification_time: string of time format "2019-12-31T23:59:00" :param gte_modification_time: string of time format "2019-12-31T23:59:00" :param lte_creation_time: string of time format "2019-12-31T23:59:00" :param gte_creation_time: string of time format "2019-12-31T23:59:00" :param status: string of status :param sort_by_modification_time: optional - enum (asc,desc) :param sort_by_creation_time: optional - enum (asc,desc) :param page_number: page number :param limit: maximum number of incidents to return per page :param gte_creation_time_milliseconds: greater than time in milliseconds :return: """ search_from = page_number * limit search_to = search_from + limit request_data = { 'search_from': search_from, 'search_to': search_to, } if sort_by_creation_time and sort_by_modification_time: raise ValueError('Should be provide either sort_by_creation_time or ' 'sort_by_modification_time. Can\'t provide both') if sort_by_creation_time: request_data['sort'] = { 'field': 'creation_time', 'keyword': sort_by_creation_time } elif sort_by_modification_time: request_data['sort'] = { 'field': 'modification_time', 'keyword': sort_by_modification_time } filters = [] if incident_id_list is not None and len(incident_id_list) > 0: filters.append({ 'field': 'incident_id_list', 'operator': 'in', 'value': incident_id_list }) if lte_creation_time: filters.append({ 'field': 'creation_time', 'operator': 'lte', 'value': date_to_timestamp(lte_creation_time, TIME_FORMAT) }) if gte_creation_time: filters.append({ 'field': 'creation_time', 'operator': 'gte', 'value': date_to_timestamp(gte_creation_time, TIME_FORMAT) }) if lte_modification_time: filters.append({ 'field': 'modification_time', 'operator': 'lte', 'value': date_to_timestamp(lte_modification_time, TIME_FORMAT) }) if gte_modification_time: filters.append({ 'field': 'modification_time', 'operator': 'gte', 'value': date_to_timestamp(gte_modification_time, TIME_FORMAT) }) if gte_creation_time_milliseconds > 0: filters.append({ 'field': 'creation_time', 'operator': 'gte', 'value': gte_creation_time_milliseconds }) if status: filters.append({ 'field': 'status', 'operator': 'eq', 'value': status }) if len(filters) > 0: request_data['filters'] = filters res = self._http_request( method='POST', url_suffix='/incidents/get_incidents/', json_data={'request_data': request_data}, timeout=self.timeout ) incidents = res.get('reply').get('incidents', []) return incidents def update_incident(self, incident_id, assigned_user_mail, assigned_user_pretty_name, status, severity, resolve_comment, unassign_user): update_data = {} if unassign_user and (assigned_user_mail or assigned_user_pretty_name): raise ValueError("Can't provide both assignee_email/assignee_name and unassign_user") if unassign_user: update_data['assigned_user_mail'] = 'none' if assigned_user_mail: update_data['assigned_user_mail'] = assigned_user_mail if assigned_user_pretty_name: update_data['assigned_user_pretty_name'] = assigned_user_pretty_name if status: update_data['status'] = status if severity: update_data['manual_severity'] = severity if resolve_comment: update_data['resolve_comment'] = resolve_comment request_data = { 'incident_id': incident_id, 'update_data': update_data, } self._http_request( method='POST', url_suffix='/incidents/update_incident/', json_data={'request_data': request_data}, timeout=self.timeout ) def get_endpoints(self, endpoint_id_list=None, dist_name=None, ip_list=None, group_name=None, platform=None, alias_name=None, isolate=None, hostname=None, page_number=0, limit=30, first_seen_gte=None, first_seen_lte=None, last_seen_gte=None, last_seen_lte=None, sort_by_first_seen=None, sort_by_last_seen=None, status=None, no_filter=False ): search_from = page_number * limit search_to = search_from + limit request_data = { 'search_from': search_from, 'search_to': search_to, } if no_filter: reply = self._http_request( method='POST', url_suffix='/endpoints/get_endpoints/', json_data={}, timeout=self.timeout ) endpoints = reply.get('reply')[search_from:search_to] for endpoint in endpoints: if not endpoint.get('endpoint_id'): endpoint['endpoint_id'] = endpoint.get('agent_id') else: filters = [] if status: filters.append({ 'field': 'endpoint_status', 'operator': 'IN', 'value': [status] }) if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if dist_name: filters.append({ 'field': 'dist_name', 'operator': 'in', 'value': dist_name }) if ip_list: filters.append({ 'field': 'ip_list', 'operator': 'in', 'value': ip_list }) if group_name: filters.append({ 'field': 'group_name', 'operator': 'in', 'value': group_name }) if platform: filters.append({ 'field': 'platform', 'operator': 'in', 'value': platform }) if alias_name: filters.append({ 'field': 'alias', 'operator': 'in', 'value': alias_name }) if isolate: filters.append({ 'field': 'isolate', 'operator': 'in', 'value': [isolate] }) if hostname: filters.append({ 'field': 'hostname', 'operator': 'in', 'value': hostname }) if first_seen_gte: filters.append({ 'field': 'first_seen', 'operator': 'gte', 'value': first_seen_gte }) if first_seen_lte: filters.append({ 'field': 'first_seen', 'operator': 'lte', 'value': first_seen_lte }) if last_seen_gte: filters.append({ 'field': 'last_seen', 'operator': 'gte', 'value': last_seen_gte }) if last_seen_lte: filters.append({ 'field': 'last_seen', 'operator': 'lte', 'value': last_seen_lte }) if search_from: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by_first_seen: request_data['sort'] = { 'field': 'first_seen', 'keyword': sort_by_first_seen } elif sort_by_last_seen: request_data['sort'] = { 'field': 'last_seen', 'keyword': sort_by_last_seen } request_data['filters'] = filters reply = self._http_request( method='POST', url_suffix='/endpoints/get_endpoint/', json_data={'request_data': request_data}, timeout=self.timeout ) endpoints = reply.get('reply').get('endpoints', []) return endpoints def isolate_endpoint(self, endpoint_id, incident_id=None): request_data = { 'endpoint_id': endpoint_id, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/isolate', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def unisolate_endpoint(self, endpoint_id, incident_id=None): request_data = { 'endpoint_id': endpoint_id, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/unisolate', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_distribution_url(self, distribution_id, package_type): reply = self._http_request( method='POST', url_suffix='/distributions/get_dist_url/', json_data={ 'request_data': { 'distribution_id': distribution_id, 'package_type': package_type } }, timeout=self.timeout ) return reply.get('reply').get('distribution_url') def get_distribution_status(self, distribution_id): reply = self._http_request( method='POST', url_suffix='/distributions/get_status/', json_data={ 'request_data': { 'distribution_id': distribution_id } }, timeout=self.timeout ) return reply.get('reply').get('status') def get_distribution_versions(self): reply = self._http_request( method='POST', url_suffix='/distributions/get_versions/', json_data={}, timeout=self.timeout ) return reply.get('reply') def create_distribution(self, name, platform, package_type, agent_version, description): request_data = {} if package_type == 'standalone': request_data = { 'name': name, 'platform': platform, 'package_type': package_type, 'agent_version': agent_version, 'description': description, } elif package_type == 'upgrade': request_data = { 'name': name, 'package_type': package_type, 'description': description, } if platform == 'windows': request_data['windows_version'] = agent_version elif platform == 'linux': request_data['linux_version'] = agent_version elif platform == 'macos': request_data['macos_version'] = agent_version reply = self._http_request( method='POST', url_suffix='/distributions/create/', json_data={ 'request_data': request_data }, timeout=self.timeout ) return reply.get('reply').get('distribution_id') def audit_management_logs(self, email, result, _type, sub_type, search_from, search_to, timestamp_gte, timestamp_lte, sort_by, sort_order): request_data: Dict[str, Any] = {} filters = [] if email: filters.append({ 'field': 'email', 'operator': 'in', 'value': email }) if result: filters.append({ 'field': 'result', 'operator': 'in', 'value': result }) if _type: filters.append({ 'field': 'type', 'operator': 'in', 'value': _type }) if sub_type: filters.append({ 'field': 'sub_type', 'operator': 'in', 'value': sub_type }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte }) if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if filters: request_data['filters'] = filters if search_from > 0: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by: request_data['sort'] = { 'field': sort_by, 'keyword': sort_order } reply = self._http_request( method='POST', url_suffix='/audits/management_logs/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data', []) def get_audit_agent_reports(self, endpoint_ids, endpoint_names, result, _type, sub_type, search_from, search_to, timestamp_gte, timestamp_lte, sort_by, sort_order): request_data: Dict[str, Any] = {} filters = [] if endpoint_ids: filters.append({ 'field': 'endpoint_id', 'operator': 'in', 'value': endpoint_ids }) if endpoint_names: filters.append({ 'field': 'endpoint_name', 'operator': 'in', 'value': endpoint_names }) if result: filters.append({ 'field': 'result', 'operator': 'in', 'value': result }) if _type: filters.append({ 'field': 'type', 'operator': 'in', 'value': _type }) if sub_type: filters.append({ 'field': 'sub_type', 'operator': 'in', 'value': sub_type }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte }) if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if filters: request_data['filters'] = filters if search_from > 0: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by: request_data['sort'] = { 'field': sort_by, 'keyword': sort_order } reply = self._http_request( method='POST', url_suffix='/audits/agents_reports/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data', []) def blocklist_files(self, hash_list, comment=None, incident_id=None, detailed_response=False): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id if detailed_response: request_data['detailed_response'] = detailed_response self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/blocklist/', json_data={'request_data': request_data}, ok_codes=(200, 201, 500,), timeout=self.timeout ) return reply.get('reply') def remove_blocklist_files(self, hash_list, comment=None, incident_id=None): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/blocklist/remove/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def allowlist_files(self, hash_list, comment=None, incident_id=None, detailed_response=False): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id if detailed_response: request_data['detailed_response'] = detailed_response self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/allowlist/', json_data={'request_data': request_data}, ok_codes=(201, 200), timeout=self.timeout ) return reply.get('reply') def remove_allowlist_files(self, hash_list, comment=None, incident_id=None): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/allowlist/remove/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def quarantine_files(self, endpoint_id_list, file_path, file_hash, incident_id): request_data: Dict[str, Any] = {} filters = [] if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if filters: request_data['filters'] = filters request_data['file_path'] = file_path request_data['file_hash'] = file_hash if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/endpoints/quarantine/', json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def restore_file(self, file_hash, endpoint_id=None, incident_id=None): request_data: Dict[str, Any] = {'file_hash': file_hash} if incident_id: request_data['incident_id'] = incident_id if endpoint_id: request_data['endpoint_id'] = endpoint_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/endpoints/restore/', json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def endpoint_scan(self, url_suffix, endpoint_id_list=None, dist_name=None, gte_first_seen=None, gte_last_seen=None, lte_first_seen=None, lte_last_seen=None, ip_list=None, group_name=None, platform=None, alias=None, isolate=None, hostname: list = None, incident_id=None): request_data: Dict[str, Any] = {} filters = [] if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if dist_name: filters.append({ 'field': 'dist_name', 'operator': 'in', 'value': dist_name }) if ip_list: filters.append({ 'field': 'ip_list', 'operator': 'in', 'value': ip_list }) if group_name: filters.append({ 'field': 'group_name', 'operator': 'in', 'value': group_name }) if platform: filters.append({ 'field': 'platform', 'operator': 'in', 'value': platform }) if alias: filters.append({ 'field': 'alias', 'operator': 'in', 'value': alias }) if isolate: filters.append({ 'field': 'isolate', 'operator': 'in', 'value': [isolate] }) if hostname: filters.append({ 'field': 'hostname', 'operator': 'in', 'value': hostname }) if gte_first_seen: filters.append({ 'field': 'first_seen', 'operator': 'gte', 'value': gte_first_seen }) if lte_first_seen: filters.append({ 'field': 'first_seen', 'operator': 'lte', 'value': lte_first_seen }) if gte_last_seen: filters.append({ 'field': 'last_seen', 'operator': 'gte', 'value': gte_last_seen }) if lte_last_seen: filters.append({ 'field': 'last_seen', 'operator': 'lte', 'value': lte_last_seen }) if filters: request_data['filters'] = filters else: request_data['filters'] = 'all' if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix=url_suffix, json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def get_quarantine_status(self, file_path, file_hash, endpoint_id): request_data: Dict[str, Any] = {'files': [{ 'endpoint_id': endpoint_id, 'file_path': file_path, 'file_hash': file_hash }]} self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/quarantine/status/', json_data={'request_data': request_data}, timeout=self.timeout ) reply_content = reply.get('reply') if isinstance(reply_content, list): return reply_content[0] else: raise TypeError(f'got unexpected response from api: {reply_content}\n') def delete_endpoints(self, endpoint_ids: list): request_data: Dict[str, Any] = { 'filters': [ { 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids } ] } self._http_request( method='POST', url_suffix='/endpoints/delete/', json_data={'request_data': request_data}, timeout=self.timeout ) def get_policy(self, endpoint_id) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'endpoint_id': endpoint_id } reply = self._http_request( method='POST', url_suffix='/endpoints/get_policy/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def report_incorrect_wildfire(self, file_hash: str, new_verdict: int, reason: str, email: str) -> Dict[str, Any]: request_data: Dict[str, Any] = { "hash": file_hash, "new_verdict": new_verdict, "reason": reason, "email": email, } reply = demisto._apiCall(name="wfReportIncorrectVerdict", params=None, data=json.dumps(request_data)) return reply def get_original_alerts(self, alert_id_list): res = self._http_request( method='POST', url_suffix='/alerts/get_original_alerts/', json_data={ 'request_data': { 'alert_id_list': alert_id_list, } }, ) return res.get('reply', {}) def get_endpoint_device_control_violations(self, endpoint_ids: list, type_of_violation, timestamp_gte: int, timestamp_lte: int, ip_list: list, vendor: list, vendor_id: list, product: list, product_id: list, serial: list, hostname: list, violation_ids: list, username: list) \ -> Dict[str, Any]: arg_list = {'type': type_of_violation, 'endpoint_id_list': endpoint_ids, 'ip_list': ip_list, 'vendor': vendor, 'vendor_id': vendor_id, 'product': product, 'product_id': product_id, 'serial': serial, 'hostname': hostname, 'violation_id_list': violation_ids, 'username': username } filters: list = [{ 'field': arg_key, 'operator': 'in', 'value': arg_val } for arg_key, arg_val in arg_list.items() if arg_val and arg_val[0]] if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte}) request_data: Dict[str, Any] = { 'filters': filters } reply = self._http_request( method='POST', url_suffix='/device_control/get_violations/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def generate_files_dict_with_specific_os(self, windows: list, linux: list, macos: list) -> Dict[str, list]: if not windows and not linux and not macos: raise ValueError('You should enter at least one path.') files = {} if windows: files['windows'] = windows if linux: files['linux'] = linux if macos: files['macos'] = macos return files def retrieve_file(self, endpoint_id_list: list, windows: list, linux: list, macos: list, file_path_list: list, incident_id: Optional[int]) -> Dict[str, Any]: # there are 2 options, either the paths are given with separation to a specific os or without # it using generic_file_path if file_path_list: files = self.generate_files_dict( endpoint_id_list=endpoint_id_list, file_path_list=file_path_list ) else: files = self.generate_files_dict_with_specific_os(windows=windows, linux=linux, macos=macos) request_data: Dict[str, Any] = { 'filters': [ { 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list } ], 'files': files, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/file_retrieval/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def generate_files_dict(self, endpoint_id_list: list, file_path_list: list) -> Dict[str, Any]: files: dict = {"windows": [], "linux": [], "macos": []} if len(endpoint_id_list) != len(file_path_list): raise ValueError("The endpoint_ids list must be in the same length as the generic_file_path") for endpoint_id, file_path in zip(endpoint_id_list, file_path_list): endpoints = self.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoints) == 0 or not isinstance(endpoints, list): raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoints[0] endpoint_os_type = endpoint.get('os_type') if 'windows' in endpoint_os_type.lower(): files['windows'].append(file_path) elif 'linux' in endpoint_os_type.lower(): files['linux'].append(file_path) elif 'macos' in endpoint_os_type.lower(): files['macos'].append(file_path) # remove keys with no value files = {k: v for k, v in files.items() if v} return files def retrieve_file_details(self, action_id: int) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'group_action_id': action_id } reply = self._http_request( method='POST', url_suffix='/actions/file_retrieval_details/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data') def get_scripts(self, name: list, description: list, created_by: list, windows_supported, linux_supported, macos_supported, is_high_risk) -> Dict[str, Any]: arg_list = {'name': name, 'description': description, 'created_by': created_by, 'windows_supported': windows_supported, 'linux_supported': linux_supported, 'macos_supported': macos_supported, 'is_high_risk': is_high_risk } filters: list = [{ 'field': arg_key, 'operator': 'in', 'value': arg_val } for arg_key, arg_val in arg_list.items() if arg_val and arg_val[0]] request_data: Dict[str, Any] = { 'filters': filters } reply = self._http_request( method='POST', url_suffix='/scripts/get_scripts/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_script_metadata(self, script_uid) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'script_uid': script_uid } reply = self._http_request( method='POST', url_suffix='/scripts/get_script_metadata/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_script_code(self, script_uid) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'script_uid': script_uid } reply = self._http_request( method='POST', url_suffix='/scripts/get_script_code/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') @logger def run_script(self, script_uid: str, endpoint_ids: list, parameters: Dict[str, Any], timeout: int, incident_id: Optional[int], ) -> Dict[str, Any]: filters: list = [{ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids }] request_data: Dict[str, Any] = {'script_uid': script_uid, 'timeout': timeout, 'filters': filters, 'parameters_values': parameters} if incident_id: request_data['incident_id'] = incident_id return self._http_request( method='POST', url_suffix='/scripts/run_script/', json_data={'request_data': request_data}, timeout=self.timeout ) @logger def run_snippet_code_script(self, snippet_code: str, endpoint_ids: list, incident_id: Optional[int] = None) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'filters': [{ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids }], 'snippet_code': snippet_code, } if incident_id: request_data['incident_id'] = incident_id return self._http_request( method='POST', url_suffix='/scripts/run_snippet_code_script', json_data={ 'request_data': request_data }, timeout=self.timeout, ) @logger def get_script_execution_status(self, action_id: str) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'action_id': action_id } return self._http_request( method='POST', url_suffix='/scripts/get_script_execution_status/', json_data={'request_data': request_data}, timeout=self.timeout ) @logger def get_script_execution_results(self, action_id: str) -> Dict[str, Any]: return self._http_request( method='POST', url_suffix='/scripts/get_script_execution_results', json_data={ 'request_data': { 'action_id': action_id } }, timeout=self.timeout, ) @logger def get_script_execution_result_files(self, action_id: str, endpoint_id: str) -> Dict[str, Any]: response = self._http_request( method='POST', url_suffix='/scripts/get_script_execution_results_files', json_data={ 'request_data': { 'action_id': action_id, 'endpoint_id': endpoint_id, } }, timeout=self.timeout, ) link = response.get('reply', {}).get('DATA') return self._http_request( method='GET', full_url=link, resp_type='response', ) def action_status_get(self, action_id) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'group_action_id': action_id, } reply = self._http_request( method='POST', url_suffix='/actions/get_action_status/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data') def get_file(self, file_link): reply = self._http_request( method='GET', url_suffix=file_link, timeout=self.timeout, resp_type='content' ) return reply def add_exclusion(self, indicator, name, status="ENABLED", comment=None): request_data: Dict[str, Any] = { 'indicator': indicator, 'status': status, 'name': name } res = self._http_request( method='POST', url_suffix='/alerts_exclusion/add/', json_data={'request_data': request_data}, timeout=self.timeout ) return res.get("reply") def delete_exclusion(self, alert_exclusion_id: int): request_data: Dict[str, Any] = { 'alert_exclusion_id': alert_exclusion_id, } res = self._http_request( method='POST', url_suffix='/alerts_exclusion/delete/', json_data={'request_data': request_data}, timeout=self.timeout ) return res.get("reply") def get_exclusion(self, limit, tenant_id=None, filter=None): request_data: Dict[str, Any] = {} if tenant_id: request_data['tenant_id'] = tenant_id if filter: request_data['filter_data'] = filter res = self._http_request( method='POST', url_suffix='/alerts_exclusion/', json_data={'request_data': request_data}, timeout=self.timeout ) reply = res.get("reply") return reply[:limit] def create_endpoint_context(audit_logs): endpoints = [] for log in audit_logs: endpoint_details = { 'ID': log.get('ENDPOINTID'), 'Hostname': log.get('ENDPOINTNAME'), 'Domain': log.get('DOMAIN'), } remove_nulls_from_dictionary(endpoint_details) if endpoint_details: endpoints.append(endpoint_details) return endpoints def create_account_context(endpoints): account_context = [] for endpoint in endpoints: domain = endpoint.get('domain') if domain: users = endpoint.get('users', []) # in case the value of 'users' is None if users and isinstance(users, list): for user in users: account_context.append({ 'Username': user, 'Domain': domain, }) return account_context def get_process_context(alert, process_type): process_context = { 'Name': alert.get(f'{process_type}_process_image_name'), 'MD5': alert.get(f'{process_type}_process_image_md5'), 'SHA256': alert.get(f'{process_type}_process_image_sha256'), 'PID': alert.get(f'{process_type}_process_os_pid'), 'CommandLine': alert.get(f'{process_type}_process_command_line'), 'Path': alert.get(f'{process_type}_process_image_path'), 'Start Time': alert.get(f'{process_type}_process_execution_time'), 'Hostname': alert.get('host_name'), } remove_nulls_from_dictionary(process_context) # If the process contains only 'HostName' , don't create an indicator if len(process_context.keys()) == 1 and 'Hostname' in process_context.keys(): return {} return process_context def add_to_ip_context(alert, ip_context): action_local_ip = alert.get('action_local_ip') action_remote_ip = alert.get('action_remote_ip') if action_local_ip: ip_context.append({ 'Address': action_local_ip, }) if action_remote_ip: ip_context.append({ 'Address': action_remote_ip, }) def create_context_from_network_artifacts(network_artifacts, ip_context): domain_context = [] if network_artifacts: for artifact in network_artifacts: domain = artifact.get('network_domain') if domain: domain_context.append({ 'Name': domain, }) network_ip_details = { 'Address': artifact.get('network_remote_ip'), 'GEO': { 'Country': artifact.get('network_country')}, } remove_nulls_from_dictionary(network_ip_details) if network_ip_details: ip_context.append(network_ip_details) return domain_context def update_incident_command(client, args): incident_id = args.get('incident_id') assigned_user_mail = args.get('assigned_user_mail') assigned_user_pretty_name = args.get('assigned_user_pretty_name') status = args.get('status') severity = args.get('manual_severity') unassign_user = args.get('unassign_user') == 'true' resolve_comment = args.get('resolve_comment') client.update_incident( incident_id=incident_id, assigned_user_mail=assigned_user_mail, assigned_user_pretty_name=assigned_user_pretty_name, unassign_user=unassign_user, status=status, severity=severity, resolve_comment=resolve_comment ) return f'Incident {incident_id} has been updated', None, None def arg_to_int(arg, arg_name: str, required: bool = False): if arg is None: if required is True: raise ValueError(f'Missing "{arg_name}"') return None if isinstance(arg, str): if arg.isdigit(): return int(arg) raise ValueError(f'Invalid number: "{arg_name}"="{arg}"') if isinstance(arg, int): return arg return ValueError(f'Invalid number: "{arg_name}"') def get_endpoints_command(client, args): page_number = arg_to_int( arg=args.get('page', '0'), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', '30'), arg_name='Failed to parse "limit". Must be a number.', required=True ) if list(args.keys()) == ['limit', 'page', 'sort_order']: endpoints = client.get_endpoints(page_number=page_number, limit=limit, no_filter=True) else: endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias_name = argToList(args.get('alias_name')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) status = args.get('status') first_seen_gte = arg_to_timestamp( arg=args.get('first_seen_gte'), arg_name='first_seen_gte' ) first_seen_lte = arg_to_timestamp( arg=args.get('first_seen_lte'), arg_name='first_seen_lte' ) last_seen_gte = arg_to_timestamp( arg=args.get('last_seen_gte'), arg_name='last_seen_gte' ) last_seen_lte = arg_to_timestamp( arg=args.get('last_seen_lte'), arg_name='last_seen_lte' ) sort_by_first_seen = args.get('sort_by_first_seen') sort_by_last_seen = args.get('sort_by_last_seen') endpoints = client.get_endpoints( endpoint_id_list=endpoint_id_list, dist_name=dist_name, ip_list=ip_list, group_name=group_name, platform=platform, alias_name=alias_name, isolate=isolate, hostname=hostname, page_number=page_number, limit=limit, first_seen_gte=first_seen_gte, first_seen_lte=first_seen_lte, last_seen_gte=last_seen_gte, last_seen_lte=last_seen_lte, sort_by_first_seen=sort_by_first_seen, sort_by_last_seen=sort_by_last_seen, status=status ) standard_endpoints = generate_endpoint_by_contex_standard(endpoints, False) endpoint_context_list = [] for endpoint in standard_endpoints: endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH) endpoint_context_list.append(endpoint_context) context = { f'{INTEGRATION_CONTEXT_BRAND}.Endpoint(val.endpoint_id == obj.endpoint_id)': endpoints, Common.Endpoint.CONTEXT_PATH: endpoint_context_list } account_context = create_account_context(endpoints) if account_context: context[Common.Account.CONTEXT_PATH] = account_context return CommandResults( readable_output=tableToMarkdown('Endpoints', endpoints), outputs=context, raw_response=endpoints ) def convert_os_to_standard(endpoint_os): os_type = '' endpoint_os = endpoint_os.lower() if 'windows' in endpoint_os: os_type = "Windows" elif 'linux' in endpoint_os: os_type = "Linux" elif 'macos' in endpoint_os: os_type = "Macos" elif 'android' in endpoint_os: os_type = "Android" return os_type def get_endpoint_properties(single_endpoint): status = 'Online' if single_endpoint.get('endpoint_status', '').lower() == 'connected' else 'Offline' is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes' hostname = single_endpoint['host_name'] if single_endpoint.get('host_name') else single_endpoint.get( 'endpoint_name') ip = single_endpoint.get('ip') return status, is_isolated, hostname, ip def generate_endpoint_by_contex_standard(endpoints, ip_as_string): standard_endpoints = [] for single_endpoint in endpoints: status, is_isolated, hostname, ip = get_endpoint_properties(single_endpoint) # in the `core-get-endpoints` command the ip is returned as list, in order not to break bc we will keep it # in the `endpoint` command we use the standard if ip_as_string and isinstance(ip, list): ip = ip[0] os_type = convert_os_to_standard(single_endpoint.get('os_type', '')) endpoint = Common.Endpoint( id=single_endpoint.get('endpoint_id'), hostname=hostname, ip_address=ip, os=os_type, status=status, is_isolated=is_isolated, mac_address=single_endpoint.get('mac_address'), domain=single_endpoint.get('domain'), vendor=INTEGRATION_NAME) standard_endpoints.append(endpoint) return standard_endpoints def endpoint_command(client, args): endpoint_id_list = argToList(args.get('id')) endpoint_ip_list = argToList(args.get('ip')) endpoint_hostname_list = argToList(args.get('hostname')) endpoints = client.get_endpoints( endpoint_id_list=endpoint_id_list, ip_list=endpoint_ip_list, hostname=endpoint_hostname_list, ) standard_endpoints = generate_endpoint_by_contex_standard(endpoints, True) command_results = [] if standard_endpoints: for endpoint in standard_endpoints: endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH) hr = tableToMarkdown('Cortex Core Endpoint', endpoint_context) command_results.append(CommandResults( readable_output=hr, raw_response=endpoints, indicator=endpoint )) else: command_results.append(CommandResults( readable_output="No endpoints were found", raw_response=endpoints, )) return command_results def isolate_endpoint_command(client, args): endpoint_id = args.get('endpoint_id') disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False)) incident_id = arg_to_number(args.get('incident_id')) endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoint) == 0: raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoint[0] endpoint_status = endpoint.get('endpoint_status') is_isolated = endpoint.get('is_isolated') if is_isolated == 'AGENT_ISOLATED': return CommandResults( readable_output=f'Endpoint {endpoint_id} already isolated.' ) if is_isolated == 'AGENT_PENDING_ISOLATION': return CommandResults( readable_output=f'Endpoint {endpoint_id} pending isolation.' ) if endpoint_status == 'UNINSTALLED': raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be isolated.') if endpoint_status == 'DISCONNECTED': if disconnected_should_return_error: raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be isolated.') else: return CommandResults( readable_output=f'Warning: isolation action is pending for the following disconnected endpoint: {endpoint_id}.', outputs={f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id} ) if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION': raise ValueError( f'Error: Endpoint {endpoint_id} is pending isolation cancellation and therefore can not be isolated.' ) result = client.isolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id) return CommandResults( readable_output=f'The isolation request has been submitted successfully on Endpoint {endpoint_id}.\n', outputs={f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id}, raw_response=result ) def unisolate_endpoint_command(client, args): endpoint_id = args.get('endpoint_id') incident_id = arg_to_number(args.get('incident_id')) disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False)) endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoint) == 0: raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoint[0] endpoint_status = endpoint.get('endpoint_status') is_isolated = endpoint.get('is_isolated') if is_isolated == 'AGENT_UNISOLATED': return CommandResults( readable_output=f'Endpoint {endpoint_id} already unisolated.' ) if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION': return CommandResults( readable_output=f'Endpoint {endpoint_id} pending isolation cancellation.' ) if endpoint_status == 'UNINSTALLED': raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be un-isolated.') if endpoint_status == 'DISCONNECTED': if disconnected_should_return_error: raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be un-isolated.') else: return CommandResults( readable_output=f'Warning: un-isolation action is pending for the following disconnected ' f'endpoint: {endpoint_id}.', outputs={ f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)' f'': endpoint_id} ) if is_isolated == 'AGENT_PENDING_ISOLATION': raise ValueError( f'Error: Endpoint {endpoint_id} is pending isolation and therefore can not be un-isolated.' ) result = client.unisolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id) return CommandResults( readable_output=f'The un-isolation request has been submitted successfully on Endpoint {endpoint_id}.\n', outputs={f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id}, raw_response=result ) def arg_to_timestamp(arg, arg_name: str, required: bool = False): if arg is None: if required is True: raise ValueError(f'Missing "{arg_name}"') return None if isinstance(arg, str) and arg.isdigit(): # timestamp that str - we just convert it to int return int(arg) if isinstance(arg, str): # if the arg is string of date format 2019-10-23T00:00:00 or "3 days", etc date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'}) if date is None: # if d is None it means dateparser failed to parse it raise ValueError(f'Invalid date: {arg_name}') return int(date.timestamp() * 1000) if isinstance(arg, (int, float)): return arg def get_audit_management_logs_command(client, args): email = argToList(args.get('email')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse "limit". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.audit_management_logs( email=email, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) return ( tableToMarkdown('Audit Management Logs', audit_logs, [ 'AUDIT_ID', 'AUDIT_RESULT', 'AUDIT_DESCRIPTION', 'AUDIT_OWNER_NAME', 'AUDIT_OWNER_EMAIL', 'AUDIT_ASSET_JSON', 'AUDIT_ASSET_NAMES', 'AUDIT_HOSTNAME', 'AUDIT_REASON', 'AUDIT_ENTITY', 'AUDIT_ENTITY_SUBTYPE', 'AUDIT_SESSION_ID', 'AUDIT_CASE_ID', 'AUDIT_INSERT_TIME' ]), { f'{INTEGRATION_CONTEXT_BRAND}.AuditManagementLogs(val.AUDIT_ID == obj.AUDIT_ID)': audit_logs }, audit_logs ) def get_audit_agent_reports_command(client, args): endpoint_ids = argToList(args.get('endpoint_ids')) endpoint_names = argToList(args.get('endpoint_names')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse "limit". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.get_audit_agent_reports( endpoint_ids=endpoint_ids, endpoint_names=endpoint_names, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) integration_context = {f'{INTEGRATION_CONTEXT_BRAND}.AuditAgentReports': audit_logs} endpoint_context = create_endpoint_context(audit_logs) if endpoint_context: integration_context[Common.Endpoint.CONTEXT_PATH] = endpoint_context return ( tableToMarkdown('Audit Agent Reports', audit_logs), integration_context, audit_logs ) def get_distribution_url_command(client, args): distribution_id = args.get('distribution_id') package_type = args.get('package_type') url = client.get_distribution_url(distribution_id, package_type) return ( f'[Distribution URL]({url})', { 'Core.Distribution(val.id == obj.id)': { 'id': distribution_id, 'url': url } }, url ) def get_distribution_status_command(client, args): distribution_ids = argToList(args.get('distribution_ids')) distribution_list = [] for distribution_id in distribution_ids: status = client.get_distribution_status(distribution_id) distribution_list.append({ 'id': distribution_id, 'status': status }) return ( tableToMarkdown('Distribution Status', distribution_list, ['id', 'status']), { f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution_list }, distribution_list ) def get_distribution_versions_command(client): versions = client.get_distribution_versions() readable_output = [] for operation_system in versions.keys(): os_versions = versions[operation_system] readable_output.append( tableToMarkdown(operation_system, os_versions or [], ['versions']) ) return ( '\n\n'.join(readable_output), { f'{INTEGRATION_CONTEXT_BRAND}.DistributionVersions': versions }, versions ) def create_distribution_command(client, args): name = args.get('name') platform = args.get('platform') package_type = args.get('package_type') description = args.get('description') agent_version = args.get('agent_version') if not platform == 'android' and not agent_version: # agent_version must be provided for all the platforms except android raise ValueError(f'Missing argument "agent_version" for platform "{platform}"') distribution_id = client.create_distribution( name=name, platform=platform, package_type=package_type, agent_version=agent_version, description=description ) distribution = { 'id': distribution_id, 'name': name, 'platform': platform, 'package_type': package_type, 'agent_version': agent_version, 'description': description } return ( f'Distribution {distribution_id} created successfully', { f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution }, distribution ) def blocklist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) detailed_response = argToBoolean(args.get('detailed_response', False)) res = client.blocklist_files(hash_list=hash_list, comment=comment, incident_id=incident_id, detailed_response=detailed_response) if detailed_response: return CommandResults( readable_output=tableToMarkdown('Blocklist Files', res), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=res, raw_response=res ) markdown_data = [{'added_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Blocklist Files', markdown_data, headers=['added_hashes'], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.blocklist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list}, raw_response=res ) def remove_blocklist_files_command(client: Client, args: Dict) -> CommandResults: hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) res = client.remove_blocklist_files(hash_list=hash_list, comment=comment, incident_id=incident_id) markdown_data = [{'removed_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Blocklist Files Removed', markdown_data, headers=['removed_hashes'], headerTransform=pascalToSpace), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=markdown_data, raw_response=res ) def allowlist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) detailed_response = argToBoolean(args.get('detailed_response', False)) res = client.allowlist_files(hash_list=hash_list, comment=comment, incident_id=incident_id, detailed_response=detailed_response) if detailed_response: return CommandResults( readable_output=tableToMarkdown('Allowlist Files', res), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=res, raw_response=res ) markdown_data = [{'added_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Allowlist Files', markdown_data, headers=['added_hashes'], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.allowlist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list}, raw_response=res ) def remove_allowlist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) res = client.remove_allowlist_files(hash_list=hash_list, comment=comment, incident_id=incident_id) markdown_data = [{'removed_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Allowlist Files Removed', markdown_data, headers=['removed_hashes'], headerTransform=pascalToSpace), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.allowlist', outputs=markdown_data, raw_response=res ) def quarantine_files_command(client, args): endpoint_id_list = argToList(args.get("endpoint_id_list")) file_path = args.get("file_path") file_hash = args.get("file_hash") incident_id = arg_to_number(args.get('incident_id')) reply = client.quarantine_files( endpoint_id_list=endpoint_id_list, file_path=file_path, file_hash=file_hash, incident_id=incident_id ) output = { 'endpointIdList': endpoint_id_list, 'filePath': file_path, 'fileHash': file_hash, 'actionId': reply.get("action_id") } return CommandResults( readable_output=tableToMarkdown('Quarantine files', output, headers=[*output], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.quarantineFiles.actionIds(val.actionId === obj.actionId)': output}, raw_response=reply ) def restore_file_command(client, args): file_hash = args.get('file_hash') endpoint_id = args.get('endpoint_id') incident_id = arg_to_number(args.get('incident_id')) reply = client.restore_file( file_hash=file_hash, endpoint_id=endpoint_id, incident_id=incident_id ) action_id = reply.get("action_id") return CommandResults( readable_output=tableToMarkdown('Restore files', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.restoredFiles.actionId(val.actionId == obj.actionId)': action_id}, raw_response=reply ) def get_quarantine_status_command(client, args): file_path = args.get('file_path') file_hash = args.get('file_hash') endpoint_id = args.get('endpoint_id') reply = client.get_quarantine_status( file_path=file_path, file_hash=file_hash, endpoint_id=endpoint_id ) output = { 'status': reply['status'], 'endpointId': reply['endpoint_id'], 'filePath': reply['file_path'], 'fileHash': reply['file_hash'] } return CommandResults( readable_output=tableToMarkdown('Quarantine files status', output, headers=[*output], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.quarantineFiles.status(val.fileHash === obj.fileHash &&' f'val.endpointId === obj.endpointId && val.filePath === obj.filePath)': output}, raw_response=reply ) def endpoint_scan_command(client, args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) incident_id = arg_to_number(args.get('incident_id')) validate_args_scan_commands(args) reply = client.endpoint_scan( url_suffix='/endpoints/scan/', endpoint_id_list=argToList(endpoint_id_list), dist_name=dist_name, gte_first_seen=gte_first_seen, gte_last_seen=gte_last_seen, lte_first_seen=lte_first_seen, lte_last_seen=lte_last_seen, ip_list=ip_list, group_name=group_name, platform=platform, alias=alias, isolate=isolate, hostname=hostname, incident_id=incident_id ) action_id = reply.get("action_id") context = { "actionId": action_id, "aborted": False } return CommandResults( readable_output=tableToMarkdown('Endpoint scan', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.endpointScan(val.actionId == obj.actionId)': context}, raw_response=reply ) def endpoint_scan_abort_command(client, args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) incident_id = arg_to_number(args.get('incident_id')) validate_args_scan_commands(args) reply = client.endpoint_scan( url_suffix='endpoints/abort_scan/', endpoint_id_list=argToList(endpoint_id_list), dist_name=dist_name, gte_first_seen=gte_first_seen, gte_last_seen=gte_last_seen, lte_first_seen=lte_first_seen, lte_last_seen=lte_last_seen, ip_list=ip_list, group_name=group_name, platform=platform, alias=alias, isolate=isolate, hostname=hostname, incident_id=incident_id ) action_id = reply.get("action_id") context = { "actionId": action_id, "aborted": True } return CommandResults( readable_output=tableToMarkdown('Endpoint abort scan', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.endpointScan(val.actionId == obj.actionId)': context}, raw_response=reply ) def validate_args_scan_commands(args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) hostname = argToList(args.get('hostname')) all_ = argToBoolean(args.get('all', 'false')) # to prevent the case where an empty filtered command will trigger by default a scan on all the endpoints. err_msg = 'To scan/abort scan all the endpoints run this command with the \'all\' argument as True ' \ 'and without any other filters. This may cause performance issues.\n' \ 'To scan/abort scan some of the endpoints, please use the filter arguments.' if all_: if endpoint_id_list or dist_name or gte_first_seen or gte_last_seen or lte_first_seen or lte_last_seen \ or ip_list or group_name or platform or alias or hostname: raise Exception(err_msg) else: if not endpoint_id_list and not dist_name and not gte_first_seen and not gte_last_seen \ and not lte_first_seen and not lte_last_seen and not ip_list and not group_name and not platform \ and not alias and not hostname: raise Exception(err_msg) def sort_by_key(list_to_sort, main_key, fallback_key): """Sorts a given list elements by main_key for all elements with the key, uses sorting by fallback_key on all elements that dont have the main_key""" list_elements_with_main_key = [element for element in list_to_sort if element.get(main_key)] sorted_list = sorted(list_elements_with_main_key, key=itemgetter(main_key)) if len(list_to_sort) == len(sorted_list): return sorted_list list_elements_with_fallback_without_main = [element for element in list_to_sort if element.get(fallback_key) and not element.get(main_key)] sorted_list.extend(sorted(list_elements_with_fallback_without_main, key=itemgetter(fallback_key))) if len(sorted_list) == len(list_to_sort): return sorted_list list_elements_without_fallback_and_main = [element for element in list_to_sort if not element.get(fallback_key) and not element.get(main_key)] sorted_list.extend(list_elements_without_fallback_and_main) return sorted_list def drop_field_underscore(section): section_copy = section.copy() for field in section_copy.keys(): if '_' in field: section[field.replace('_', '')] = section.get(field) def reformat_sublist_fields(sublist): for section in sublist: drop_field_underscore(section) def handle_outgoing_incident_owner_sync(update_args): if 'owner' in update_args and demisto.params().get('sync_owners'): if update_args.get('owner'): user_info = demisto.findUser(username=update_args.get('owner')) if user_info: update_args['assigned_user_mail'] = user_info.get('email') else: # handle synced unassignment update_args['assigned_user_mail'] = None def handle_user_unassignment(update_args): if ('assigned_user_mail' in update_args and update_args.get('assigned_user_mail') in ['None', 'null', '', None]) \ or ('assigned_user_pretty_name' in update_args and update_args.get('assigned_user_pretty_name') in ['None', 'null', '', None]): update_args['unassign_user'] = 'true' update_args['assigned_user_mail'] = None update_args['assigned_user_pretty_name'] = None def handle_outgoing_issue_closure(update_args, inc_status): if inc_status == 2: update_args['status'] = XSOAR_RESOLVED_STATUS_TO_Core.get(update_args.get('closeReason', 'Other')) demisto.debug(f"Closing Remote Core incident with status {update_args["status"]}") update_args['resolve_comment'] = update_args.get('closeNotes', '') def get_update_args(delta, inc_status): """Change the updated field names to fit the update command""" update_args = delta handle_outgoing_incident_owner_sync(update_args) handle_user_unassignment(update_args) if update_args.get('closingUserId'): handle_outgoing_issue_closure(update_args, inc_status) return update_args def update_remote_system_command(client, args): remote_args = UpdateRemoteSystemArgs(args) if remote_args.delta: demisto.debug(f'Got the following delta keys {str(list(remote_args.delta.keys()))} to update Core ' f'incident {remote_args.remote_incident_id}') try: if remote_args.incident_changed: update_args = get_update_args(remote_args.delta, remote_args.inc_status) update_args['incident_id'] = remote_args.remote_incident_id demisto.debug(f'Sending incident with remote ID [{remote_args.remote_incident_id}] to Core\n') update_incident_command(client, update_args) else: demisto.debug(f'Skipping updating remote incident fields [{remote_args.remote_incident_id}] ' f'as it is not new nor changed') return remote_args.remote_incident_id except Exception as e: demisto.debug(f"Error in Core outgoing mirror for incident {remote_args.remote_incident_id} \n" f"Error message: {str(e)}") return remote_args.remote_incident_id def delete_endpoints_command(client: Client, args: Dict[str, str]) -> Tuple[str, Any, Any]: endpoint_id_list: list = argToList(args.get('endpoint_ids')) client.delete_endpoints(endpoint_id_list) return f'Successfully deleted the following endpoints: {args.get('endpoint_ids')}', None, None def get_policy_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: endpoint_id = args.get('endpoint_id') reply = client.get_policy(endpoint_id) context = {'endpoint_id': endpoint_id, 'policy_name': reply.get('policy_name')} return ( f'The policy name of endpoint: {endpoint_id} is: {reply.get('policy_name')}.', { f'{INTEGRATION_CONTEXT_BRAND}.Policy(val.endpoint_id == obj.endpoint_id)': context }, reply ) def get_endpoint_device_control_violations_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: endpoint_ids: list = argToList(args.get('endpoint_ids')) type_of_violation = args.get('type') timestamp_gte: int = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte: int = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) ip_list: list = argToList(args.get('ip_list')) vendor: list = argToList(args.get('vendor')) vendor_id: list = argToList(args.get('vendor_id')) product: list = argToList(args.get('product')) product_id: list = argToList(args.get('product_id')) serial: list = argToList(args.get('serial')) hostname: list = argToList(args.get('hostname')) violation_id_list: list = argToList(args.get('violation_id_list', '')) username: list = argToList(args.get('username')) violation_ids = [arg_to_int(arg=item, arg_name=str(item)) for item in violation_id_list] reply = client.get_endpoint_device_control_violations( endpoint_ids=endpoint_ids, type_of_violation=[type_of_violation], timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, ip_list=ip_list, vendor=vendor, vendor_id=vendor_id, product=product, product_id=product_id, serial=serial, hostname=hostname, violation_ids=violation_ids, username=username ) headers = ['date', 'hostname', 'platform', 'username', 'ip', 'type', 'violation_id', 'vendor', 'product', 'serial'] violations: list = copy.deepcopy(reply.get('violations')) # type: ignore for violation in violations: timestamp: str = violation.get('timestamp') violation['date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) return ( tableToMarkdown(name='Endpoint Device Control Violation', t=violations, headers=headers, headerTransform=string_to_table_header, removeNull=True), { f'{INTEGRATION_CONTEXT_BRAND}.EndpointViolations(val.violation_id==obj.violation_id)': violations }, reply ) def retrieve_files_command(client: Client, args: Dict[str, str]) -> CommandResults: endpoint_id_list: list = argToList(args.get('endpoint_ids')) windows: list = argToList(args.get('windows_file_paths')) linux: list = argToList(args.get('linux_file_paths')) macos: list = argToList(args.get('mac_file_paths')) file_path_list: list = argToList(args.get('generic_file_path')) incident_id: Optional[int] = arg_to_number(args.get('incident_id')) reply = client.retrieve_file( endpoint_id_list=endpoint_id_list, windows=windows, linux=linux, macos=macos, file_path_list=file_path_list, incident_id=incident_id ) result = {'action_id': reply.get('action_id')} return CommandResults( readable_output=tableToMarkdown(name='Retrieve files', t=result, headerTransform=string_to_table_header), outputs={f'{INTEGRATION_CONTEXT_BRAND}.RetrievedFiles(val.action_id == obj.action_id)': result}, raw_response=reply ) def retrieve_file_details_command(client: Client, args): action_id_list = argToList(args.get('action_id', '')) action_id_list = [arg_to_int(arg=item, arg_name=str(item)) for item in action_id_list] result = [] raw_result = [] file_results = [] endpoints_count = 0 retrived_files_count = 0 for action_id in action_id_list: data = client.retrieve_file_details(action_id) raw_result.append(data) for endpoint, link in data.items(): endpoints_count += 1 obj = { 'action_id': action_id, 'endpoint_id': endpoint } if link: retrived_files_count += 1 obj['file_link'] = link file_link = "download" + link.split("download")[1] file = client.get_file(file_link=file_link) file_results.append(fileResult(filename=f'{endpoint}_{retrived_files_count}.zip', data=file)) result.append(obj) hr = f'### Action id : {args.get('action_id', '')} \n Retrieved {retrived_files_count} files from ' \ f'{endpoints_count} endpoints. \n To get the exact action status run the core-action-status-get command' return_entry = {'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': raw_result, 'HumanReadable': hr, 'ReadableContentsFormat': formats['markdown'], 'EntryContext': {} } return return_entry, file_results def get_scripts_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_name: list = argToList(args.get('script_name')) description: list = argToList(args.get('description')) created_by: list = argToList(args.get('created_by')) windows_supported = args.get('windows_supported') linux_supported = args.get('linux_supported') macos_supported = args.get('macos_supported') is_high_risk = args.get('is_high_risk') offset = arg_to_int(arg=args.get('offset', 0), arg_name='offset') limit = arg_to_int(arg=args.get('limit', 50), arg_name='limit') result = client.get_scripts( name=script_name, description=description, created_by=created_by, windows_supported=[windows_supported], linux_supported=[linux_supported], macos_supported=[macos_supported], is_high_risk=[is_high_risk] ) scripts = copy.deepcopy(result.get('scripts')[offset:(offset + limit)]) # type: ignore for script in scripts: timestamp = script.get('modification_date') script['modification_date_timestamp'] = timestamp script['modification_date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) headers: list = ['name', 'description', 'script_uid', 'modification_date', 'created_by', 'windows_supported', 'linux_supported', 'macos_supported', 'is_high_risk'] return ( tableToMarkdown(name='Scripts', t=scripts, headers=headers, removeNull=True, headerTransform=string_to_table_header), { f'{INTEGRATION_CONTEXT_BRAND}.Scripts(val.script_uid == obj.script_uid)': scripts }, result ) def get_script_metadata_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_uid = args.get('script_uid') reply = client.get_script_metadata(script_uid) script_metadata = copy.deepcopy(reply) timestamp = script_metadata.get('modification_date') script_metadata['modification_date_timestamp'] = timestamp script_metadata['modification_date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) return ( tableToMarkdown(name='Script Metadata', t=script_metadata, removeNull=True, headerTransform=string_to_table_header), { f'{INTEGRATION_CONTEXT_BRAND}.ScriptMetadata(val.script_uid == obj.script_uid)': reply }, reply ) def get_script_code_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_uid = args.get('script_uid') reply = client.get_script_code(script_uid) context = { 'script_uid': script_uid, 'code': reply } return ( f'### Script code: \n ``` {str(reply)} ```', { f'{INTEGRATION_CONTEXT_BRAND}.ScriptCode(val.script_uid == obj.script_uid)': context }, reply ) def action_status_get_command(client: Client, args) -> CommandResults: action_id_list = argToList(args.get('action_id', '')) action_id_list = [arg_to_int(arg=item, arg_name=str(item)) for item in action_id_list] result = [] for action_id in action_id_list: data = client.action_status_get(action_id) for endpoint_id, status in data.items(): result.append({ 'action_id': action_id, 'endpoint_id': endpoint_id, 'status': status }) return CommandResults( readable_output=tableToMarkdown(name='Get Action Status', t=result, removeNull=True), outputs={f'{INTEGRATION_CONTEXT_BRAND}.GetActionStatus(val.action_id == obj.action_id)': result}, raw_response=result ) def run_script_command(client: Client, args: Dict) -> CommandResults: script_uid = args.get('script_uid') endpoint_ids = argToList(args.get('endpoint_ids')) timeout = arg_to_number(args.get('timeout', 600)) or 600 incident_id = arg_to_number(args.get('incident_id')) if parameters := args.get('parameters'): try: parameters = json.loads(parameters) except json.decoder.JSONDecodeError as e: raise ValueError(f'The parameters argument is not in a valid JSON structure:\n{e}') else: parameters = {} response = client.run_script(script_uid, endpoint_ids, parameters, timeout, incident_id=incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Script', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=response, ) def run_snippet_code_script_command(client: Client, args: Dict) -> CommandResults: snippet_code = args.get('snippet_code') endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) response = client.run_snippet_code_script(snippet_code=snippet_code, endpoint_ids=endpoint_ids, incident_id=incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Snippet Code Script', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, ) def get_script_execution_status_command(client: Client, args: Dict) -> List[CommandResults]: action_ids = argToList(args.get('action_id', '')) command_results = [] for action_id in action_ids: response = client.get_script_execution_status(action_id) reply = response.get('reply') reply['action_id'] = int(action_id) command_results.append(CommandResults( readable_output=tableToMarkdown(f'Script Execution Status - {action_id}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptStatus', outputs_key_field='action_id', outputs=reply, raw_response=response, )) return command_results def get_script_execution_results_command(client: Client, args: Dict) -> List[CommandResults]: action_ids = argToList(args.get('action_id', '')) command_results = [] for action_id in action_ids: response = client.get_script_execution_results(action_id) results = response.get('reply', {}).get('results') context = { 'action_id': int(action_id), 'results': results, } command_results.append(CommandResults( readable_output=tableToMarkdown(f'Script Execution Results - {action_id}', results), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptResult', outputs_key_field='action_id', outputs=context, raw_response=response, )) return command_results def get_script_execution_result_files_command(client: Client, args: Dict) -> Dict: action_id = args.get('action_id', '') endpoint_id = args.get('endpoint_id') file_response = client.get_script_execution_result_files(action_id, endpoint_id) try: filename = file_response.headers.get('Content-Disposition').split('attachment; filename=')[1] except Exception as e: demisto.debug(f'Failed extracting filename from response headers - [{str(e)}]') filename = action_id + '.zip' return fileResult(filename, file_response.content) def run_script_execute_commands_command(client: Client, args: Dict) -> CommandResults: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 parameters = {'commands_list': argToList(args.get('commands'))} response = client.run_script('a6f7683c8e217d85bd3c398f0d3fb6bf', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Script Execute Commands', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, ) def run_script_delete_file_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 file_paths = argToList(args.get('file_path')) all_files_response = [] for file_path in file_paths: parameters = {'file_path': file_path} response = client.run_script('548023b6e4a01ec51a495ba6e5d2a15d', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_files_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script Delete File on {file_path}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_files_response def run_script_file_exists_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 file_paths = argToList(args.get('file_path')) all_files_response = [] for file_path in file_paths: parameters = {'path': file_path} response = client.run_script('414763381b5bfb7b05796c9fe690df46', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_files_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script File Exists on {file_path}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_files_response def run_script_kill_process_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 processes_names = argToList(args.get('process_name')) all_processes_response = [] for process_name in processes_names: parameters = {'process_name': process_name} response = client.run_script('fd0a544a99a9421222b4f57a11839481', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_processes_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script Kill Process on {process_name}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_processes_response def add_exclusion_command(client: Client, args: Dict) -> CommandResults: name = args.get('name') indicator = args.get('filterObject') if not indicator: raise DemistoException("Didn't get filterObject arg. This arg is required.") status = args.get('status', "ENABLED") comment = args.get('comment') res = client.add_exclusion(name=name, status=status, indicator=json.loads(indicator), comment=comment) return CommandResults( readable_output=tableToMarkdown('Add Exclusion', res), outputs={f'{INTEGRATION_CONTEXT_BRAND}.exclusion.rule_id(val.rule_id == obj.rule_id)': res.get("rule_id")}, raw_response=res ) def delete_exclusion_command(client: Client, args: Dict) -> CommandResults: alert_exclusion_id = arg_to_number(args.get('alert_exclusion_id')) if not alert_exclusion_id: raise DemistoException("Didn't get alert_exclusion_id arg. This arg is required.") res = client.delete_exclusion(alert_exclusion_id=alert_exclusion_id) return CommandResults( readable_output=f"Successfully deleted the following exclusion: {alert_exclusion_id}", outputs={f'{INTEGRATION_CONTEXT_BRAND}.deletedExclusion.rule_id(val.rule_id == obj.rule_id)': res.get("rule_id")}, raw_response=res ) def get_exclusion_command(client: Client, args: Dict) -> CommandResults: res = client.get_exclusion(tenant_id=args.get('tenant_ID'), filter=args.get('filterObject'), limit=arg_to_number(args.get('limit', 20))) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.exclusion', outputs=res, readable_output=tableToMarkdown('Exclusion', res), raw_response=res ) def report_incorrect_wildfire_command(client: Client, args) -> CommandResults: file_hash = args.get('file_hash') reason = args.get('reason') email = args.get('email') new_verdict = arg_to_int( arg=args.get('new_verdict'), arg_name='Failed to parse "new_verdict". Must be a number.', required=True ) response = client.report_incorrect_wildfire(file_hash, new_verdict, reason, email) return CommandResults( readable_output=f'Reported incorrect WildFire on {file_hash}', outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.WildFire', outputs={"file_hash": file_hash, "new_verdict": new_verdict}, raw_response=response, ) def decode_dict_values(dict_to_decode: dict): """Decode JSON str values of a given dict. Args: dict_to_decode (dict): The dict to decode. """ for key, value in dict_to_decode.items(): # if value is a dictionary, we want to recursively decode it's values if isinstance(value, dict): decode_dict_values(value) # if value is a string, we want to try to decode it, if it cannot be decoded, we will move on. elif isinstance(value, str): try: dict_to_decode[key] = json.loads(value) except ValueError: continue def filter_general_fields(alert: dict) -> dict: """filter only relevant general fields from a given alert. Args: alert (dict): The alert to filter Returns: dict: The filtered alert """ updated_alert = {} updated_event = {} for field in ALERT_GENERAL_FIELDS: if field in alert: updated_alert[field] = alert.get(field) event = alert.get('raw_abioc', {}).get('event', {}) if not event: return_warning('No XDR cloud analytics event.') else: for field in ALERT_EVENT_GENERAL_FIELDS: if field in event: updated_event[field] = event.get(field) updated_alert['event'] = updated_event return updated_alert def filter_vendor_fields(alert: dict): """Remove non relevant fields from the alert event (filter by vendor: Amazon/google/Microsoft) Args: alert (dict): The alert to filter Returns: dict: The filtered alert """ vendor_mapper = { 'Amazon': ALERT_EVENT_AWS_FIELDS, 'Google': ALERT_EVENT_GCP_FIELDS, 'MSFT': ALERT_EVENT_AZURE_FIELDS, } event = alert.get('event', {}) vendor = event.get('vendor') if vendor and vendor in vendor_mapper: raw_log = event.get('raw_log', {}) if raw_log and isinstance(raw_log, dict): for key in list(raw_log): if key not in vendor_mapper[vendor]: raw_log.pop(key) def get_original_alerts_command(client: Client, args: Dict) -> CommandResults: alert_id_list = argToList(args.get('alert_ids', [])) raw_response = client.get_original_alerts(alert_id_list) reply = copy.deepcopy(raw_response) alerts = reply.get('alerts', []) filtered_alerts = [] for i, alert in enumerate(alerts): # decode raw_response try: alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', '')) # some of the returned JSON fields are double encoded, so it needs to be double-decoded. # example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"} decode_dict_values(alert) except Exception: continue # remove original_alert_json field and add its content to alert. alert.update( alert.pop('original_alert_json', None)) updated_alert = filter_general_fields(alert) if 'event' in updated_alert: filter_vendor_fields(updated_alert) filtered_alerts.append(updated_alert) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.OriginalAlert', outputs_key_field='internal_id', outputs=filtered_alerts, raw_response=raw_response, ) def get_dynamic_analysis_command(client: Client, args: Dict) -> CommandResults: alert_id_list = argToList(args.get('alert_ids', [])) raw_response = client.get_original_alerts(alert_id_list) reply = copy.deepcopy(raw_response) alerts = reply.get('alerts', []) filtered_alerts = [] for i, alert in enumerate(alerts): # decode raw_response try: alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', '')) # some of the returned JSON fields are double encoded, so it needs to be double-decoded. # example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"} decode_dict_values(alert) except Exception: continue # remove original_alert_json field and add its content to alert. alert.update(alert.pop('original_alert_json', None)) if demisto.get(alert, 'messageData.dynamicAnalysis'): filtered_alerts.append(demisto.get(alert, 'messageData.dynamicAnalysis')) if not filtered_alerts: return CommandResults( readable_output="There is no dynamicAnalysis for these alert ids.", raw_response=raw_response ) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.DynamicAnalysis', outputs=filtered_alerts, raw_response=raw_response, ) def run_polling_command(client: Client, args: dict, cmd: str, command_function: Callable, command_decision_field: str, results_function: Callable, polling_field: str, polling_value: List, stop_polling: bool = False) -> CommandResults: """ args: demito args cmd: the command to schedule by after the current command command_function: the function which is runs the actual command command_decision_field: the field in the response based on it what the command status and if the command occurred results_function: the function which we are polling on and retrieves the status of the command_function polling_field: the field which from the result of the results_function which we are interested in its value polling_value: list of values of the polling_field we want to check stop_polling: yes - polling_value is stopping, not - polling_value not stopping """ ScheduledCommand.raise_error_if_not_supported() interval_in_secs = int(args.get('interval_in_seconds', 60)) timeout_in_seconds = int(args.get('timeout_in_seconds', 600)) if command_decision_field not in args: # create new command run command_results = command_function(client, args) if isinstance(command_results, CommandResults): outputs = [command_results.raw_response] if command_results.raw_response else [] else: outputs = [c.raw_response for c in command_results] command_decision_values = [o.get(command_decision_field) for o in outputs] if outputs else [] # type: ignore if outputs and command_decision_values: polling_args = { command_decision_field: command_decision_values, 'interval_in_seconds': interval_in_secs, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_secs, args=polling_args, timeout_in_seconds=timeout_in_seconds) if isinstance(command_results, list): command_results = command_results[0] command_results.scheduled_command = scheduled_command return command_results else: if command_results.readable_output: demisto.error(f"{command_results.readable_output}") else: demisto.error(f"Command {command_function} didn't succeeded, returned {outputs}") return command_results # get polling result command_results = results_function(client, args) outputs_result_func = command_results.raw_response result = outputs_result_func.get(polling_field) if isinstance(outputs_result_func, dict) else\ outputs_result_func[0].get(polling_field) cond = result not in polling_value if stop_polling else result in polling_value if cond: # schedule next poll polling_args = { 'interval_in_seconds': interval_in_secs, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_secs, args=polling_args, timeout_in_seconds=timeout_in_seconds) # result with scheduled_command only - no update to the war room command_results = CommandResults(scheduled_command=scheduled_command) return command_results def main(): """ Executes an integration command """ command = demisto.command() LOG(f'Command being called is {command}') args = demisto.args() api_key = demisto.params().get('apikey') api_key_id = demisto.params().get('apikey_id') url = demisto.params().get('url') if not api_key or not api_key_id or not url: headers = { "HOST": demisto.getLicenseCustomField("Core.ApiHostName"), demisto.getLicenseCustomField("Core.ApiHeader"): demisto.getLicenseCustomField("Core.ApiKey"), "Content-Type": "application/json" } url = "http://" + demisto.getLicenseCustomField("Core.ApiHost") + "/api/webapp/" add_sensitive_log_strs(demisto.getLicenseCustomField("Core.ApiKey")) else: headers = { "Content-Type": "application/json", "x-xdr-auth-id": str(api_key_id), "Authorization": api_key } add_sensitive_log_strs(api_key) base_url = urljoin(url, '/public_api/v1') proxy = demisto.params().get('proxy') verify_cert = not demisto.params().get('insecure', False) try: timeout = int(demisto.params().get('timeout', 120)) except ValueError as e: demisto.debug(f'Failed casting timeout parameter to int, falling back to 120 - {e}') timeout = 120 client = Client( base_url=base_url, proxy=proxy, verify=verify_cert, headers=headers, timeout=timeout ) try: if command == 'test-module': client.test_module() demisto.results('ok') elif command == 'core-get-endpoints': return_results(get_endpoints_command(client, args)) elif command == 'core-isolate-endpoint': polling_args = { **args, "endpoint_id_list": args.get('endpoint_id') } return_results(run_polling_command(client=client, args=polling_args, cmd="core-isolate-endpoint", command_function=isolate_endpoint_command, command_decision_field="action_id", results_function=get_endpoints_command, polling_field="is_isolated", polling_value=["AGENT_ISOLATED"], stop_polling=True)) elif command == 'core-unisolate-endpoint': polling_args = { **args, "endpoint_id_list": args.get('endpoint_id') } return_results(run_polling_command(client=client, args=polling_args, cmd="core-unisolate-endpoint", command_function=unisolate_endpoint_command, command_decision_field="action_id", results_function=get_endpoints_command, polling_field="is_isolated", polling_value=["AGENT_UNISOLATED", "CANCELLED", "ֿPENDING_ABORT", "ABORTED", "EXPIRED", "COMPLETED_PARTIAL", "COMPLETED_SUCCESSFULLY", "FAILED", "TIMEOUT"], stop_polling=True)) elif command == 'core-get-distribution-url': return_outputs(*get_distribution_url_command(client, args)) elif command == 'core-get-create-distribution-status': return_outputs(*get_distribution_status_command(client, args)) elif command == 'core-get-distribution-versions': return_outputs(*get_distribution_versions_command(client)) elif command == 'core-create-distribution': return_outputs(*create_distribution_command(client, args)) elif command == 'core-get-audit-management-logs': return_outputs(*get_audit_management_logs_command(client, args)) elif command == 'core-get-audit-agent-reports': return_outputs(*get_audit_agent_reports_command(client, args)) elif command == 'core-blocklist-files': return_results(blocklist_files_command(client, args)) elif command == 'core-allowlist-files': return_results(allowlist_files_command(client, args)) elif command == 'core-quarantine-files': polling_args = { **args, "endpoint_id": argToList(args.get("endpoint_id_list"))[0] } return_results(run_polling_command(client=client, args=polling_args, cmd="core-quarantine-files", command_function=quarantine_files_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-get-quarantine-status': return_results(get_quarantine_status_command(client, args)) elif command == 'core-restore-file': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=restore_file_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-endpoint-scan': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=endpoint_scan_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-endpoint-scan-abort': return_results(endpoint_scan_abort_command(client, args)) elif command == 'update-remote-system': return_results(update_remote_system_command(client, args)) elif command == 'core-delete-endpoints': return_outputs(*delete_endpoints_command(client, args)) elif command == 'core-get-policy': return_outputs(*get_policy_command(client, args)) elif command == 'core-get-endpoint-device-control-violations': return_outputs(*get_endpoint_device_control_violations_command(client, args)) elif command == 'core-retrieve-files': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=retrieve_files_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-retrieve-file-details': return_entry, file_results = retrieve_file_details_command(client, args) demisto.results(return_entry) if file_results: demisto.results(file_results) elif command == 'core-get-scripts': return_outputs(*get_scripts_command(client, args)) elif command == 'core-get-script-metadata': return_outputs(*get_script_metadata_command(client, args)) elif command == 'core-get-script-code': return_outputs(*get_script_code_command(client, args)) elif command == 'core-action-status-get': return_results(action_status_get_command(client, args)) elif command == 'core-run-script': return_results(run_script_command(client, args)) elif command == 'core-run-snippet-code-script': return_results(run_polling_command(client=client, args=args, cmd="core-run-snippet-code-script", command_function=run_snippet_code_script_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-get-script-execution-status': return_results(get_script_execution_status_command(client, args)) elif command == 'core-get-script-execution-results': return_results(get_script_execution_results_command(client, args)) elif command == 'core-get-script-execution-result-files': return_results(get_script_execution_result_files_command(client, args)) elif command == 'core-run-script-execute-commands': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-execute-commands", command_function=run_script_execute_commands_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-delete-file': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-delete-file", command_function=run_script_delete_file_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-file-exists': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-file-exists", command_function=run_script_file_exists_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-kill-process': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-kill-process", command_function=run_script_kill_process_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'endpoint': return_results(endpoint_command(client, args)) elif command == 'core-report-incorrect-wildfire': return_results(report_incorrect_wildfire_command(client, args)) elif command == 'core-remove-blocklist-files': return_results(remove_blocklist_files_command(client, args)) elif command == 'core-remove-allowlist-files': return_results(remove_allowlist_files_command(client, args)) elif command == 'core-add-exclusion': return_results(add_exclusion_command(client, args)) elif command == 'core-delete-exclusion': return_results(delete_exclusion_command(client, args)) elif command == 'core-get-exclusion': return_results(get_exclusion_command(client, args)) elif command == 'core-get-cloud-original-alerts': return_results(get_original_alerts_command(client, args)) elif command == 'core-get-dynamic-analysis': return_results(get_dynamic_analysis_command(client, args)) except Exception as err: demisto.error(traceback.format_exc()) return_error(str(err)) if __name__ in ('__main__', '__builtin__', 'builtins'): main()
import copy from operator import itemgetter from typing import Tuple, Callable import demistomock as demisto # noqa: F401 import urllib3 from CommonServerPython import * # noqa: F401 # Disable insecure warnings urllib3.disable_warnings() TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" NONCE_LENGTH = 64 API_KEY_LENGTH = 128 INTEGRATION_CONTEXT_BRAND = 'Core' INTEGRATION_NAME = 'Cortex Core - IR' XSOAR_RESOLVED_STATUS_TO_Core = { 'Other': 'resolved_other', 'Duplicate': 'resolved_duplicate', 'False Positive': 'resolved_false_positive', 'Resolved': 'resolved_true_positive', } ALERT_GENERAL_FIELDS = { 'detection_modules', 'alert_full_description', 'matching_service_rule_id', 'variation_rule_id', 'content_version', 'detector_id', 'mitre_technique_id_and_name', 'silent', 'mitre_technique_ids', 'activity_first_seet_at', '_type', 'dst_association_strength', 'alert_description', } ALERT_EVENT_GENERAL_FIELDS = { "_time", "vendor", "event_timestamp", "event_type", "event_id", "cloud_provider", "project", "cloud_provider_event_id", "cloud_correlation_id", "operation_name_orig", "operation_name", "identity_orig", "identity_name", "identity_uuid", "identity_type", "identity_sub_type", "identity_invoked_by_name", "identity_invoked_by_uuid", "identity_invoked_by_type", "identity_invoked_by_sub_type", "operation_status", "operation_status_orig", "operation_status_orig_code", "operation_status_reason_provided", "resource_type", "resource_type_orig", "resource_sub_type", "resource_sub_type_orig", "region", "zone", "referenced_resource", "referenced_resource_name", "referenced_resources_count", "user_agent", "caller_ip", 'caller_ip_geolocation', "caller_ip_asn", 'caller_project', 'raw_log', "log_name", "caller_ip_asn_org", "event_base_id", "ingestion_time", } ALERT_EVENT_GENERAL_FIELDS = { "_time", "vendor", "event_timestamp", "event_type", "event_id", "cloud_provider", "project", "cloud_provider_event_id", "cloud_correlation_id", "operation_name_orig", "operation_name", "identity_orig", "identity_name", "identity_uuid", "identity_type", "identity_sub_type", "identity_invoked_by_name", "identity_invoked_by_uuid", "identity_invoked_by_type", "identity_invoked_by_sub_type", "operation_status", "operation_status_orig", "operation_status_orig_code", "operation_status_reason_provided", "resource_type", "resource_type_orig", "resource_sub_type", "resource_sub_type_orig", "region", "zone", "referenced_resource", "referenced_resource_name", "referenced_resources_count", "user_agent", "caller_ip", 'caller_ip_geolocation', "caller_ip_asn", 'caller_project', 'raw_log', "log_name", "caller_ip_asn_org", "event_base_id", "ingestion_time", } ALERT_EVENT_AWS_FIELDS = { "eventVersion", "userIdentity", "eventTime", "eventSource", "eventName", "awsRegion", "sourceIPAddress", "userAgent", "requestID", "eventID", "readOnly", "eventType", "apiVersion", "managementEvent", "recipientAccountId", "eventCategory", "errorCode", "errorMessage", "resources", } ALERT_EVENT_GCP_FIELDS = { "labels", "operation", "protoPayload", "resource", "severity", "timestamp", } ALERT_EVENT_AZURE_FIELDS = { "time", "resourceId", "category", "operationName", "operationVersion", "schemaVersion", "statusCode", "statusText", "callerIpAddress", "correlationId", "identity", "level", "properties", "uri", "protocol", "resourceType", "tenantId", } class Client(BaseClient): def __init__(self, base_url: str, headers: dict, timeout: int = 120, proxy: bool = False, verify: bool = False): self.timeout = timeout super().__init__(base_url=base_url, headers=headers, proxy=proxy, verify=verify) def test_module(self): """ Performs basic get request to get item samples """ try: self.get_incidents(limit=1) except Exception as err: if 'API request Unauthorized' in str(err): # this error is received from the Core server when the client clock is not in sync to the server raise DemistoException(f'{str(err)} please validate that your both ' f'XSOAR and Core server clocks are in sync') else: raise def get_incidents(self, incident_id_list=None, lte_modification_time=None, gte_modification_time=None, lte_creation_time=None, gte_creation_time=None, status=None, sort_by_modification_time=None, sort_by_creation_time=None, page_number=0, limit=100, gte_creation_time_milliseconds=0): """ Filters and returns incidents :param incident_id_list: List of incident ids - must be list :param lte_modification_time: string of time format "2019-12-31T23:59:00" :param gte_modification_time: string of time format "2019-12-31T23:59:00" :param lte_creation_time: string of time format "2019-12-31T23:59:00" :param gte_creation_time: string of time format "2019-12-31T23:59:00" :param status: string of status :param sort_by_modification_time: optional - enum (asc,desc) :param sort_by_creation_time: optional - enum (asc,desc) :param page_number: page number :param limit: maximum number of incidents to return per page :param gte_creation_time_milliseconds: greater than time in milliseconds :return: """ search_from = page_number * limit search_to = search_from + limit request_data = { 'search_from': search_from, 'search_to': search_to, } if sort_by_creation_time and sort_by_modification_time: raise ValueError('Should be provide either sort_by_creation_time or ' 'sort_by_modification_time. Can\'t provide both') if sort_by_creation_time: request_data['sort'] = { 'field': 'creation_time', 'keyword': sort_by_creation_time } elif sort_by_modification_time: request_data['sort'] = { 'field': 'modification_time', 'keyword': sort_by_modification_time } filters = [] if incident_id_list is not None and len(incident_id_list) > 0: filters.append({ 'field': 'incident_id_list', 'operator': 'in', 'value': incident_id_list }) if lte_creation_time: filters.append({ 'field': 'creation_time', 'operator': 'lte', 'value': date_to_timestamp(lte_creation_time, TIME_FORMAT) }) if gte_creation_time: filters.append({ 'field': 'creation_time', 'operator': 'gte', 'value': date_to_timestamp(gte_creation_time, TIME_FORMAT) }) if lte_modification_time: filters.append({ 'field': 'modification_time', 'operator': 'lte', 'value': date_to_timestamp(lte_modification_time, TIME_FORMAT) }) if gte_modification_time: filters.append({ 'field': 'modification_time', 'operator': 'gte', 'value': date_to_timestamp(gte_modification_time, TIME_FORMAT) }) if gte_creation_time_milliseconds > 0: filters.append({ 'field': 'creation_time', 'operator': 'gte', 'value': gte_creation_time_milliseconds }) if status: filters.append({ 'field': 'status', 'operator': 'eq', 'value': status }) if len(filters) > 0: request_data['filters'] = filters res = self._http_request( method='POST', url_suffix='/incidents/get_incidents/', json_data={'request_data': request_data}, timeout=self.timeout ) incidents = res.get('reply').get('incidents', []) return incidents def update_incident(self, incident_id, assigned_user_mail, assigned_user_pretty_name, status, severity, resolve_comment, unassign_user): update_data = {} if unassign_user and (assigned_user_mail or assigned_user_pretty_name): raise ValueError("Can't provide both assignee_email/assignee_name and unassign_user") if unassign_user: update_data['assigned_user_mail'] = 'none' if assigned_user_mail: update_data['assigned_user_mail'] = assigned_user_mail if assigned_user_pretty_name: update_data['assigned_user_pretty_name'] = assigned_user_pretty_name if status: update_data['status'] = status if severity: update_data['manual_severity'] = severity if resolve_comment: update_data['resolve_comment'] = resolve_comment request_data = { 'incident_id': incident_id, 'update_data': update_data, } self._http_request( method='POST', url_suffix='/incidents/update_incident/', json_data={'request_data': request_data}, timeout=self.timeout ) def get_endpoints(self, endpoint_id_list=None, dist_name=None, ip_list=None, group_name=None, platform=None, alias_name=None, isolate=None, hostname=None, page_number=0, limit=30, first_seen_gte=None, first_seen_lte=None, last_seen_gte=None, last_seen_lte=None, sort_by_first_seen=None, sort_by_last_seen=None, status=None, no_filter=False ): search_from = page_number * limit search_to = search_from + limit request_data = { 'search_from': search_from, 'search_to': search_to, } if no_filter: reply = self._http_request( method='POST', url_suffix='/endpoints/get_endpoints/', json_data={}, timeout=self.timeout ) endpoints = reply.get('reply')[search_from:search_to] for endpoint in endpoints: if not endpoint.get('endpoint_id'): endpoint['endpoint_id'] = endpoint.get('agent_id') else: filters = [] if status: filters.append({ 'field': 'endpoint_status', 'operator': 'IN', 'value': [status] }) if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if dist_name: filters.append({ 'field': 'dist_name', 'operator': 'in', 'value': dist_name }) if ip_list: filters.append({ 'field': 'ip_list', 'operator': 'in', 'value': ip_list }) if group_name: filters.append({ 'field': 'group_name', 'operator': 'in', 'value': group_name }) if platform: filters.append({ 'field': 'platform', 'operator': 'in', 'value': platform }) if alias_name: filters.append({ 'field': 'alias', 'operator': 'in', 'value': alias_name }) if isolate: filters.append({ 'field': 'isolate', 'operator': 'in', 'value': [isolate] }) if hostname: filters.append({ 'field': 'hostname', 'operator': 'in', 'value': hostname }) if first_seen_gte: filters.append({ 'field': 'first_seen', 'operator': 'gte', 'value': first_seen_gte }) if first_seen_lte: filters.append({ 'field': 'first_seen', 'operator': 'lte', 'value': first_seen_lte }) if last_seen_gte: filters.append({ 'field': 'last_seen', 'operator': 'gte', 'value': last_seen_gte }) if last_seen_lte: filters.append({ 'field': 'last_seen', 'operator': 'lte', 'value': last_seen_lte }) if search_from: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by_first_seen: request_data['sort'] = { 'field': 'first_seen', 'keyword': sort_by_first_seen } elif sort_by_last_seen: request_data['sort'] = { 'field': 'last_seen', 'keyword': sort_by_last_seen } request_data['filters'] = filters reply = self._http_request( method='POST', url_suffix='/endpoints/get_endpoint/', json_data={'request_data': request_data}, timeout=self.timeout ) endpoints = reply.get('reply').get('endpoints', []) return endpoints def isolate_endpoint(self, endpoint_id, incident_id=None): request_data = { 'endpoint_id': endpoint_id, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/isolate', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def unisolate_endpoint(self, endpoint_id, incident_id=None): request_data = { 'endpoint_id': endpoint_id, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/unisolate', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_distribution_url(self, distribution_id, package_type): reply = self._http_request( method='POST', url_suffix='/distributions/get_dist_url/', json_data={ 'request_data': { 'distribution_id': distribution_id, 'package_type': package_type } }, timeout=self.timeout ) return reply.get('reply').get('distribution_url') def get_distribution_status(self, distribution_id): reply = self._http_request( method='POST', url_suffix='/distributions/get_status/', json_data={ 'request_data': { 'distribution_id': distribution_id } }, timeout=self.timeout ) return reply.get('reply').get('status') def get_distribution_versions(self): reply = self._http_request( method='POST', url_suffix='/distributions/get_versions/', json_data={}, timeout=self.timeout ) return reply.get('reply') def create_distribution(self, name, platform, package_type, agent_version, description): request_data = {} if package_type == 'standalone': request_data = { 'name': name, 'platform': platform, 'package_type': package_type, 'agent_version': agent_version, 'description': description, } elif package_type == 'upgrade': request_data = { 'name': name, 'package_type': package_type, 'description': description, } if platform == 'windows': request_data['windows_version'] = agent_version elif platform == 'linux': request_data['linux_version'] = agent_version elif platform == 'macos': request_data['macos_version'] = agent_version reply = self._http_request( method='POST', url_suffix='/distributions/create/', json_data={ 'request_data': request_data }, timeout=self.timeout ) return reply.get('reply').get('distribution_id') def audit_management_logs(self, email, result, _type, sub_type, search_from, search_to, timestamp_gte, timestamp_lte, sort_by, sort_order): request_data: Dict[str, Any] = {} filters = [] if email: filters.append({ 'field': 'email', 'operator': 'in', 'value': email }) if result: filters.append({ 'field': 'result', 'operator': 'in', 'value': result }) if _type: filters.append({ 'field': 'type', 'operator': 'in', 'value': _type }) if sub_type: filters.append({ 'field': 'sub_type', 'operator': 'in', 'value': sub_type }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte }) if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if filters: request_data['filters'] = filters if search_from > 0: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by: request_data['sort'] = { 'field': sort_by, 'keyword': sort_order } reply = self._http_request( method='POST', url_suffix='/audits/management_logs/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data', []) def get_audit_agent_reports(self, endpoint_ids, endpoint_names, result, _type, sub_type, search_from, search_to, timestamp_gte, timestamp_lte, sort_by, sort_order): request_data: Dict[str, Any] = {} filters = [] if endpoint_ids: filters.append({ 'field': 'endpoint_id', 'operator': 'in', 'value': endpoint_ids }) if endpoint_names: filters.append({ 'field': 'endpoint_name', 'operator': 'in', 'value': endpoint_names }) if result: filters.append({ 'field': 'result', 'operator': 'in', 'value': result }) if _type: filters.append({ 'field': 'type', 'operator': 'in', 'value': _type }) if sub_type: filters.append({ 'field': 'sub_type', 'operator': 'in', 'value': sub_type }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte }) if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if filters: request_data['filters'] = filters if search_from > 0: request_data['search_from'] = search_from if search_to: request_data['search_to'] = search_to if sort_by: request_data['sort'] = { 'field': sort_by, 'keyword': sort_order } reply = self._http_request( method='POST', url_suffix='/audits/agents_reports/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data', []) def blocklist_files(self, hash_list, comment=None, incident_id=None, detailed_response=False): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id if detailed_response: request_data['detailed_response'] = detailed_response self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/blocklist/', json_data={'request_data': request_data}, ok_codes=(200, 201, 500,), timeout=self.timeout ) return reply.get('reply') def remove_blocklist_files(self, hash_list, comment=None, incident_id=None): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/blocklist/remove/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def allowlist_files(self, hash_list, comment=None, incident_id=None, detailed_response=False): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id if detailed_response: request_data['detailed_response'] = detailed_response self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/allowlist/', json_data={'request_data': request_data}, ok_codes=(201, 200), timeout=self.timeout ) return reply.get('reply') def remove_allowlist_files(self, hash_list, comment=None, incident_id=None): request_data: Dict[str, Any] = {"hash_list": hash_list} if comment: request_data["comment"] = comment if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/hash_exceptions/allowlist/remove/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def quarantine_files(self, endpoint_id_list, file_path, file_hash, incident_id): request_data: Dict[str, Any] = {} filters = [] if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if filters: request_data['filters'] = filters request_data['file_path'] = file_path request_data['file_hash'] = file_hash if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/endpoints/quarantine/', json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def restore_file(self, file_hash, endpoint_id=None, incident_id=None): request_data: Dict[str, Any] = {'file_hash': file_hash} if incident_id: request_data['incident_id'] = incident_id if endpoint_id: request_data['endpoint_id'] = endpoint_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/endpoints/restore/', json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def endpoint_scan(self, url_suffix, endpoint_id_list=None, dist_name=None, gte_first_seen=None, gte_last_seen=None, lte_first_seen=None, lte_last_seen=None, ip_list=None, group_name=None, platform=None, alias=None, isolate=None, hostname: list = None, incident_id=None): request_data: Dict[str, Any] = {} filters = [] if endpoint_id_list: filters.append({ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list }) if dist_name: filters.append({ 'field': 'dist_name', 'operator': 'in', 'value': dist_name }) if ip_list: filters.append({ 'field': 'ip_list', 'operator': 'in', 'value': ip_list }) if group_name: filters.append({ 'field': 'group_name', 'operator': 'in', 'value': group_name }) if platform: filters.append({ 'field': 'platform', 'operator': 'in', 'value': platform }) if alias: filters.append({ 'field': 'alias', 'operator': 'in', 'value': alias }) if isolate: filters.append({ 'field': 'isolate', 'operator': 'in', 'value': [isolate] }) if hostname: filters.append({ 'field': 'hostname', 'operator': 'in', 'value': hostname }) if gte_first_seen: filters.append({ 'field': 'first_seen', 'operator': 'gte', 'value': gte_first_seen }) if lte_first_seen: filters.append({ 'field': 'first_seen', 'operator': 'lte', 'value': lte_first_seen }) if gte_last_seen: filters.append({ 'field': 'last_seen', 'operator': 'gte', 'value': gte_last_seen }) if lte_last_seen: filters.append({ 'field': 'last_seen', 'operator': 'lte', 'value': lte_last_seen }) if filters: request_data['filters'] = filters else: request_data['filters'] = 'all' if incident_id: request_data['incident_id'] = incident_id self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix=url_suffix, json_data={'request_data': request_data}, ok_codes=(200, 201), timeout=self.timeout ) return reply.get('reply') def get_quarantine_status(self, file_path, file_hash, endpoint_id): request_data: Dict[str, Any] = {'files': [{ 'endpoint_id': endpoint_id, 'file_path': file_path, 'file_hash': file_hash }]} self._headers['content-type'] = 'application/json' reply = self._http_request( method='POST', url_suffix='/quarantine/status/', json_data={'request_data': request_data}, timeout=self.timeout ) reply_content = reply.get('reply') if isinstance(reply_content, list): return reply_content[0] else: raise TypeError(f'got unexpected response from api: {reply_content}\n') def delete_endpoints(self, endpoint_ids: list): request_data: Dict[str, Any] = { 'filters': [ { 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids } ] } self._http_request( method='POST', url_suffix='/endpoints/delete/', json_data={'request_data': request_data}, timeout=self.timeout ) def get_policy(self, endpoint_id) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'endpoint_id': endpoint_id } reply = self._http_request( method='POST', url_suffix='/endpoints/get_policy/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def report_incorrect_wildfire(self, file_hash: str, new_verdict: int, reason: str, email: str) -> Dict[str, Any]: request_data: Dict[str, Any] = { "hash": file_hash, "new_verdict": new_verdict, "reason": reason, "email": email, } reply = demisto._apiCall(name="wfReportIncorrectVerdict", params=None, data=json.dumps(request_data)) return reply def get_original_alerts(self, alert_id_list): res = self._http_request( method='POST', url_suffix='/alerts/get_original_alerts/', json_data={ 'request_data': { 'alert_id_list': alert_id_list, } }, ) return res.get('reply', {}) def get_endpoint_device_control_violations(self, endpoint_ids: list, type_of_violation, timestamp_gte: int, timestamp_lte: int, ip_list: list, vendor: list, vendor_id: list, product: list, product_id: list, serial: list, hostname: list, violation_ids: list, username: list) \ -> Dict[str, Any]: arg_list = {'type': type_of_violation, 'endpoint_id_list': endpoint_ids, 'ip_list': ip_list, 'vendor': vendor, 'vendor_id': vendor_id, 'product': product, 'product_id': product_id, 'serial': serial, 'hostname': hostname, 'violation_id_list': violation_ids, 'username': username } filters: list = [{ 'field': arg_key, 'operator': 'in', 'value': arg_val } for arg_key, arg_val in arg_list.items() if arg_val and arg_val[0]] if timestamp_lte: filters.append({ 'field': 'timestamp', 'operator': 'lte', 'value': timestamp_lte }) if timestamp_gte: filters.append({ 'field': 'timestamp', 'operator': 'gte', 'value': timestamp_gte}) request_data: Dict[str, Any] = { 'filters': filters } reply = self._http_request( method='POST', url_suffix='/device_control/get_violations/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def generate_files_dict_with_specific_os(self, windows: list, linux: list, macos: list) -> Dict[str, list]: if not windows and not linux and not macos: raise ValueError('You should enter at least one path.') files = {} if windows: files['windows'] = windows if linux: files['linux'] = linux if macos: files['macos'] = macos return files def retrieve_file(self, endpoint_id_list: list, windows: list, linux: list, macos: list, file_path_list: list, incident_id: Optional[int]) -> Dict[str, Any]: # there are 2 options, either the paths are given with separation to a specific os or without # it using generic_file_path if file_path_list: files = self.generate_files_dict( endpoint_id_list=endpoint_id_list, file_path_list=file_path_list ) else: files = self.generate_files_dict_with_specific_os(windows=windows, linux=linux, macos=macos) request_data: Dict[str, Any] = { 'filters': [ { 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_id_list } ], 'files': files, } if incident_id: request_data['incident_id'] = incident_id reply = self._http_request( method='POST', url_suffix='/endpoints/file_retrieval/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def generate_files_dict(self, endpoint_id_list: list, file_path_list: list) -> Dict[str, Any]: files: dict = {"windows": [], "linux": [], "macos": []} if len(endpoint_id_list) != len(file_path_list): raise ValueError("The endpoint_ids list must be in the same length as the generic_file_path") for endpoint_id, file_path in zip(endpoint_id_list, file_path_list): endpoints = self.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoints) == 0 or not isinstance(endpoints, list): raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoints[0] endpoint_os_type = endpoint.get('os_type') if 'windows' in endpoint_os_type.lower(): files['windows'].append(file_path) elif 'linux' in endpoint_os_type.lower(): files['linux'].append(file_path) elif 'macos' in endpoint_os_type.lower(): files['macos'].append(file_path) # remove keys with no value files = {k: v for k, v in files.items() if v} return files def retrieve_file_details(self, action_id: int) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'group_action_id': action_id } reply = self._http_request( method='POST', url_suffix='/actions/file_retrieval_details/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data') def get_scripts(self, name: list, description: list, created_by: list, windows_supported, linux_supported, macos_supported, is_high_risk) -> Dict[str, Any]: arg_list = {'name': name, 'description': description, 'created_by': created_by, 'windows_supported': windows_supported, 'linux_supported': linux_supported, 'macos_supported': macos_supported, 'is_high_risk': is_high_risk } filters: list = [{ 'field': arg_key, 'operator': 'in', 'value': arg_val } for arg_key, arg_val in arg_list.items() if arg_val and arg_val[0]] request_data: Dict[str, Any] = { 'filters': filters } reply = self._http_request( method='POST', url_suffix='/scripts/get_scripts/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_script_metadata(self, script_uid) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'script_uid': script_uid } reply = self._http_request( method='POST', url_suffix='/scripts/get_script_metadata/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') def get_script_code(self, script_uid) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'script_uid': script_uid } reply = self._http_request( method='POST', url_suffix='/scripts/get_script_code/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply') @logger def run_script(self, script_uid: str, endpoint_ids: list, parameters: Dict[str, Any], timeout: int, incident_id: Optional[int], ) -> Dict[str, Any]: filters: list = [{ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids }] request_data: Dict[str, Any] = {'script_uid': script_uid, 'timeout': timeout, 'filters': filters, 'parameters_values': parameters} if incident_id: request_data['incident_id'] = incident_id return self._http_request( method='POST', url_suffix='/scripts/run_script/', json_data={'request_data': request_data}, timeout=self.timeout ) @logger def run_snippet_code_script(self, snippet_code: str, endpoint_ids: list, incident_id: Optional[int] = None) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'filters': [{ 'field': 'endpoint_id_list', 'operator': 'in', 'value': endpoint_ids }], 'snippet_code': snippet_code, } if incident_id: request_data['incident_id'] = incident_id return self._http_request( method='POST', url_suffix='/scripts/run_snippet_code_script', json_data={ 'request_data': request_data }, timeout=self.timeout, ) @logger def get_script_execution_status(self, action_id: str) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'action_id': action_id } return self._http_request( method='POST', url_suffix='/scripts/get_script_execution_status/', json_data={'request_data': request_data}, timeout=self.timeout ) @logger def get_script_execution_results(self, action_id: str) -> Dict[str, Any]: return self._http_request( method='POST', url_suffix='/scripts/get_script_execution_results', json_data={ 'request_data': { 'action_id': action_id } }, timeout=self.timeout, ) @logger def get_script_execution_result_files(self, action_id: str, endpoint_id: str) -> Dict[str, Any]: response = self._http_request( method='POST', url_suffix='/scripts/get_script_execution_results_files', json_data={ 'request_data': { 'action_id': action_id, 'endpoint_id': endpoint_id, } }, timeout=self.timeout, ) link = response.get('reply', {}).get('DATA') return self._http_request( method='GET', full_url=link, resp_type='response', ) def action_status_get(self, action_id) -> Dict[str, Any]: request_data: Dict[str, Any] = { 'group_action_id': action_id, } reply = self._http_request( method='POST', url_suffix='/actions/get_action_status/', json_data={'request_data': request_data}, timeout=self.timeout ) return reply.get('reply').get('data') def get_file(self, file_link): reply = self._http_request( method='GET', url_suffix=file_link, timeout=self.timeout, resp_type='content' ) return reply def add_exclusion(self, indicator, name, status="ENABLED", comment=None): request_data: Dict[str, Any] = { 'indicator': indicator, 'status': status, 'name': name } res = self._http_request( method='POST', url_suffix='/alerts_exclusion/add/', json_data={'request_data': request_data}, timeout=self.timeout ) return res.get("reply") def delete_exclusion(self, alert_exclusion_id: int): request_data: Dict[str, Any] = { 'alert_exclusion_id': alert_exclusion_id, } res = self._http_request( method='POST', url_suffix='/alerts_exclusion/delete/', json_data={'request_data': request_data}, timeout=self.timeout ) return res.get("reply") def get_exclusion(self, limit, tenant_id=None, filter=None): request_data: Dict[str, Any] = {} if tenant_id: request_data['tenant_id'] = tenant_id if filter: request_data['filter_data'] = filter res = self._http_request( method='POST', url_suffix='/alerts_exclusion/', json_data={'request_data': request_data}, timeout=self.timeout ) reply = res.get("reply") return reply[:limit] def create_endpoint_context(audit_logs): endpoints = [] for log in audit_logs: endpoint_details = { 'ID': log.get('ENDPOINTID'), 'Hostname': log.get('ENDPOINTNAME'), 'Domain': log.get('DOMAIN'), } remove_nulls_from_dictionary(endpoint_details) if endpoint_details: endpoints.append(endpoint_details) return endpoints def create_account_context(endpoints): account_context = [] for endpoint in endpoints: domain = endpoint.get('domain') if domain: users = endpoint.get('users', []) # in case the value of 'users' is None if users and isinstance(users, list): for user in users: account_context.append({ 'Username': user, 'Domain': domain, }) return account_context def get_process_context(alert, process_type): process_context = { 'Name': alert.get(f'{process_type}_process_image_name'), 'MD5': alert.get(f'{process_type}_process_image_md5'), 'SHA256': alert.get(f'{process_type}_process_image_sha256'), 'PID': alert.get(f'{process_type}_process_os_pid'), 'CommandLine': alert.get(f'{process_type}_process_command_line'), 'Path': alert.get(f'{process_type}_process_image_path'), 'Start Time': alert.get(f'{process_type}_process_execution_time'), 'Hostname': alert.get('host_name'), } remove_nulls_from_dictionary(process_context) # If the process contains only 'HostName' , don't create an indicator if len(process_context.keys()) == 1 and 'Hostname' in process_context.keys(): return {} return process_context def add_to_ip_context(alert, ip_context): action_local_ip = alert.get('action_local_ip') action_remote_ip = alert.get('action_remote_ip') if action_local_ip: ip_context.append({ 'Address': action_local_ip, }) if action_remote_ip: ip_context.append({ 'Address': action_remote_ip, }) def create_context_from_network_artifacts(network_artifacts, ip_context): domain_context = [] if network_artifacts: for artifact in network_artifacts: domain = artifact.get('network_domain') if domain: domain_context.append({ 'Name': domain, }) network_ip_details = { 'Address': artifact.get('network_remote_ip'), 'GEO': { 'Country': artifact.get('network_country')}, } remove_nulls_from_dictionary(network_ip_details) if network_ip_details: ip_context.append(network_ip_details) return domain_context def update_incident_command(client, args): incident_id = args.get('incident_id') assigned_user_mail = args.get('assigned_user_mail') assigned_user_pretty_name = args.get('assigned_user_pretty_name') status = args.get('status') severity = args.get('manual_severity') unassign_user = args.get('unassign_user') == 'true' resolve_comment = args.get('resolve_comment') client.update_incident( incident_id=incident_id, assigned_user_mail=assigned_user_mail, assigned_user_pretty_name=assigned_user_pretty_name, unassign_user=unassign_user, status=status, severity=severity, resolve_comment=resolve_comment ) return f'Incident {incident_id} has been updated', None, None def arg_to_int(arg, arg_name: str, required: bool = False): if arg is None: if required is True: raise ValueError(f'Missing "{arg_name}"') return None if isinstance(arg, str): if arg.isdigit(): return int(arg) raise ValueError(f'Invalid number: "{arg_name}"="{arg}"') if isinstance(arg, int): return arg return ValueError(f'Invalid number: "{arg_name}"') def get_endpoints_command(client, args): page_number = arg_to_int( arg=args.get('page', '0'), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', '30'), arg_name='Failed to parse "limit". Must be a number.', required=True ) if list(args.keys()) == ['limit', 'page', 'sort_order']: endpoints = client.get_endpoints(page_number=page_number, limit=limit, no_filter=True) else: endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias_name = argToList(args.get('alias_name')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) status = args.get('status') first_seen_gte = arg_to_timestamp( arg=args.get('first_seen_gte'), arg_name='first_seen_gte' ) first_seen_lte = arg_to_timestamp( arg=args.get('first_seen_lte'), arg_name='first_seen_lte' ) last_seen_gte = arg_to_timestamp( arg=args.get('last_seen_gte'), arg_name='last_seen_gte' ) last_seen_lte = arg_to_timestamp( arg=args.get('last_seen_lte'), arg_name='last_seen_lte' ) sort_by_first_seen = args.get('sort_by_first_seen') sort_by_last_seen = args.get('sort_by_last_seen') endpoints = client.get_endpoints( endpoint_id_list=endpoint_id_list, dist_name=dist_name, ip_list=ip_list, group_name=group_name, platform=platform, alias_name=alias_name, isolate=isolate, hostname=hostname, page_number=page_number, limit=limit, first_seen_gte=first_seen_gte, first_seen_lte=first_seen_lte, last_seen_gte=last_seen_gte, last_seen_lte=last_seen_lte, sort_by_first_seen=sort_by_first_seen, sort_by_last_seen=sort_by_last_seen, status=status ) standard_endpoints = generate_endpoint_by_contex_standard(endpoints, False) endpoint_context_list = [] for endpoint in standard_endpoints: endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH) endpoint_context_list.append(endpoint_context) context = { f'{INTEGRATION_CONTEXT_BRAND}.Endpoint(val.endpoint_id == obj.endpoint_id)': endpoints, Common.Endpoint.CONTEXT_PATH: endpoint_context_list } account_context = create_account_context(endpoints) if account_context: context[Common.Account.CONTEXT_PATH] = account_context return CommandResults( readable_output=tableToMarkdown('Endpoints', endpoints), outputs=context, raw_response=endpoints ) def convert_os_to_standard(endpoint_os): os_type = '' endpoint_os = endpoint_os.lower() if 'windows' in endpoint_os: os_type = "Windows" elif 'linux' in endpoint_os: os_type = "Linux" elif 'macos' in endpoint_os: os_type = "Macos" elif 'android' in endpoint_os: os_type = "Android" return os_type def get_endpoint_properties(single_endpoint): status = 'Online' if single_endpoint.get('endpoint_status', '').lower() == 'connected' else 'Offline' is_isolated = 'No' if 'unisolated' in single_endpoint.get('is_isolated', '').lower() else 'Yes' hostname = single_endpoint['host_name'] if single_endpoint.get('host_name') else single_endpoint.get( 'endpoint_name') ip = single_endpoint.get('ip') return status, is_isolated, hostname, ip def generate_endpoint_by_contex_standard(endpoints, ip_as_string): standard_endpoints = [] for single_endpoint in endpoints: status, is_isolated, hostname, ip = get_endpoint_properties(single_endpoint) # in the `core-get-endpoints` command the ip is returned as list, in order not to break bc we will keep it # in the `endpoint` command we use the standard if ip_as_string and isinstance(ip, list): ip = ip[0] os_type = convert_os_to_standard(single_endpoint.get('os_type', '')) endpoint = Common.Endpoint( id=single_endpoint.get('endpoint_id'), hostname=hostname, ip_address=ip, os=os_type, status=status, is_isolated=is_isolated, mac_address=single_endpoint.get('mac_address'), domain=single_endpoint.get('domain'), vendor=INTEGRATION_NAME) standard_endpoints.append(endpoint) return standard_endpoints def endpoint_command(client, args): endpoint_id_list = argToList(args.get('id')) endpoint_ip_list = argToList(args.get('ip')) endpoint_hostname_list = argToList(args.get('hostname')) endpoints = client.get_endpoints( endpoint_id_list=endpoint_id_list, ip_list=endpoint_ip_list, hostname=endpoint_hostname_list, ) standard_endpoints = generate_endpoint_by_contex_standard(endpoints, True) command_results = [] if standard_endpoints: for endpoint in standard_endpoints: endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH) hr = tableToMarkdown('Cortex Core Endpoint', endpoint_context) command_results.append(CommandResults( readable_output=hr, raw_response=endpoints, indicator=endpoint )) else: command_results.append(CommandResults( readable_output="No endpoints were found", raw_response=endpoints, )) return command_results def isolate_endpoint_command(client, args): endpoint_id = args.get('endpoint_id') disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False)) incident_id = arg_to_number(args.get('incident_id')) endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoint) == 0: raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoint[0] endpoint_status = endpoint.get('endpoint_status') is_isolated = endpoint.get('is_isolated') if is_isolated == 'AGENT_ISOLATED': return CommandResults( readable_output=f'Endpoint {endpoint_id} already isolated.' ) if is_isolated == 'AGENT_PENDING_ISOLATION': return CommandResults( readable_output=f'Endpoint {endpoint_id} pending isolation.' ) if endpoint_status == 'UNINSTALLED': raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be isolated.') if endpoint_status == 'DISCONNECTED': if disconnected_should_return_error: raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be isolated.') else: return CommandResults( readable_output=f'Warning: isolation action is pending for the following disconnected endpoint: {endpoint_id}.', outputs={f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id} ) if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION': raise ValueError( f'Error: Endpoint {endpoint_id} is pending isolation cancellation and therefore can not be isolated.' ) result = client.isolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id) return CommandResults( readable_output=f'The isolation request has been submitted successfully on Endpoint {endpoint_id}.\n', outputs={f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id}, raw_response=result ) def unisolate_endpoint_command(client, args): endpoint_id = args.get('endpoint_id') incident_id = arg_to_number(args.get('incident_id')) disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False)) endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id]) if len(endpoint) == 0: raise ValueError(f'Error: Endpoint {endpoint_id} was not found') endpoint = endpoint[0] endpoint_status = endpoint.get('endpoint_status') is_isolated = endpoint.get('is_isolated') if is_isolated == 'AGENT_UNISOLATED': return CommandResults( readable_output=f'Endpoint {endpoint_id} already unisolated.' ) if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION': return CommandResults( readable_output=f'Endpoint {endpoint_id} pending isolation cancellation.' ) if endpoint_status == 'UNINSTALLED': raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be un-isolated.') if endpoint_status == 'DISCONNECTED': if disconnected_should_return_error: raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be un-isolated.') else: return CommandResults( readable_output=f'Warning: un-isolation action is pending for the following disconnected ' f'endpoint: {endpoint_id}.', outputs={ f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)' f'': endpoint_id} ) if is_isolated == 'AGENT_PENDING_ISOLATION': raise ValueError( f'Error: Endpoint {endpoint_id} is pending isolation and therefore can not be un-isolated.' ) result = client.unisolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id) return CommandResults( readable_output=f'The un-isolation request has been submitted successfully on Endpoint {endpoint_id}.\n', outputs={f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id}, raw_response=result ) def arg_to_timestamp(arg, arg_name: str, required: bool = False): if arg is None: if required is True: raise ValueError(f'Missing "{arg_name}"') return None if isinstance(arg, str) and arg.isdigit(): # timestamp that str - we just convert it to int return int(arg) if isinstance(arg, str): # if the arg is string of date format 2019-10-23T00:00:00 or "3 days", etc date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'}) if date is None: # if d is None it means dateparser failed to parse it raise ValueError(f'Invalid date: {arg_name}') return int(date.timestamp() * 1000) if isinstance(arg, (int, float)): return arg def get_audit_management_logs_command(client, args): email = argToList(args.get('email')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse "limit". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.audit_management_logs( email=email, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) return ( tableToMarkdown('Audit Management Logs', audit_logs, [ 'AUDIT_ID', 'AUDIT_RESULT', 'AUDIT_DESCRIPTION', 'AUDIT_OWNER_NAME', 'AUDIT_OWNER_EMAIL', 'AUDIT_ASSET_JSON', 'AUDIT_ASSET_NAMES', 'AUDIT_HOSTNAME', 'AUDIT_REASON', 'AUDIT_ENTITY', 'AUDIT_ENTITY_SUBTYPE', 'AUDIT_SESSION_ID', 'AUDIT_CASE_ID', 'AUDIT_INSERT_TIME' ]), { f'{INTEGRATION_CONTEXT_BRAND}.AuditManagementLogs(val.AUDIT_ID == obj.AUDIT_ID)': audit_logs }, audit_logs ) def get_audit_agent_reports_command(client, args): endpoint_ids = argToList(args.get('endpoint_ids')) endpoint_names = argToList(args.get('endpoint_names')) result = argToList(args.get('result')) _type = argToList(args.get('type')) sub_type = argToList(args.get('sub_type')) timestamp_gte = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) page_number = arg_to_int( arg=args.get('page', 0), arg_name='Failed to parse "page". Must be a number.', required=True ) limit = arg_to_int( arg=args.get('limit', 20), arg_name='Failed to parse "limit". Must be a number.', required=True ) search_from = page_number * limit search_to = search_from + limit sort_by = args.get('sort_by') sort_order = args.get('sort_order', 'asc') audit_logs = client.get_audit_agent_reports( endpoint_ids=endpoint_ids, endpoint_names=endpoint_names, result=result, _type=_type, sub_type=sub_type, timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, search_from=search_from, search_to=search_to, sort_by=sort_by, sort_order=sort_order ) integration_context = {f'{INTEGRATION_CONTEXT_BRAND}.AuditAgentReports': audit_logs} endpoint_context = create_endpoint_context(audit_logs) if endpoint_context: integration_context[Common.Endpoint.CONTEXT_PATH] = endpoint_context return ( tableToMarkdown('Audit Agent Reports', audit_logs), integration_context, audit_logs ) def get_distribution_url_command(client, args): distribution_id = args.get('distribution_id') package_type = args.get('package_type') url = client.get_distribution_url(distribution_id, package_type) return ( f'[Distribution URL]({url})', { 'Core.Distribution(val.id == obj.id)': { 'id': distribution_id, 'url': url } }, url ) def get_distribution_status_command(client, args): distribution_ids = argToList(args.get('distribution_ids')) distribution_list = [] for distribution_id in distribution_ids: status = client.get_distribution_status(distribution_id) distribution_list.append({ 'id': distribution_id, 'status': status }) return ( tableToMarkdown('Distribution Status', distribution_list, ['id', 'status']), { f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution_list }, distribution_list ) def get_distribution_versions_command(client): versions = client.get_distribution_versions() readable_output = [] for operation_system in versions.keys(): os_versions = versions[operation_system] readable_output.append( tableToMarkdown(operation_system, os_versions or [], ['versions']) ) return ( '\n\n'.join(readable_output), { f'{INTEGRATION_CONTEXT_BRAND}.DistributionVersions': versions }, versions ) def create_distribution_command(client, args): name = args.get('name') platform = args.get('platform') package_type = args.get('package_type') description = args.get('description') agent_version = args.get('agent_version') if not platform == 'android' and not agent_version: # agent_version must be provided for all the platforms except android raise ValueError(f'Missing argument "agent_version" for platform "{platform}"') distribution_id = client.create_distribution( name=name, platform=platform, package_type=package_type, agent_version=agent_version, description=description ) distribution = { 'id': distribution_id, 'name': name, 'platform': platform, 'package_type': package_type, 'agent_version': agent_version, 'description': description } return ( f'Distribution {distribution_id} created successfully', { f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution }, distribution ) def blocklist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) detailed_response = argToBoolean(args.get('detailed_response', False)) res = client.blocklist_files(hash_list=hash_list, comment=comment, incident_id=incident_id, detailed_response=detailed_response) if detailed_response: return CommandResults( readable_output=tableToMarkdown('Blocklist Files', res), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=res, raw_response=res ) markdown_data = [{'added_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Blocklist Files', markdown_data, headers=['added_hashes'], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.blocklist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list}, raw_response=res ) def remove_blocklist_files_command(client: Client, args: Dict) -> CommandResults: hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) res = client.remove_blocklist_files(hash_list=hash_list, comment=comment, incident_id=incident_id) markdown_data = [{'removed_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Blocklist Files Removed', markdown_data, headers=['removed_hashes'], headerTransform=pascalToSpace), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=markdown_data, raw_response=res ) def allowlist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) detailed_response = argToBoolean(args.get('detailed_response', False)) res = client.allowlist_files(hash_list=hash_list, comment=comment, incident_id=incident_id, detailed_response=detailed_response) if detailed_response: return CommandResults( readable_output=tableToMarkdown('Allowlist Files', res), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.blocklist', outputs=res, raw_response=res ) markdown_data = [{'added_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Allowlist Files', markdown_data, headers=['added_hashes'], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.allowlist.added_hashes.fileHash(val.fileHash == obj.fileHash)': hash_list}, raw_response=res ) def remove_allowlist_files_command(client, args): hash_list = argToList(args.get('hash_list')) comment = args.get('comment') incident_id = arg_to_number(args.get('incident_id')) res = client.remove_allowlist_files(hash_list=hash_list, comment=comment, incident_id=incident_id) markdown_data = [{'removed_hashes': file_hash} for file_hash in hash_list] return CommandResults( readable_output=tableToMarkdown('Allowlist Files Removed', markdown_data, headers=['removed_hashes'], headerTransform=pascalToSpace), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.allowlist', outputs=markdown_data, raw_response=res ) def quarantine_files_command(client, args): endpoint_id_list = argToList(args.get("endpoint_id_list")) file_path = args.get("file_path") file_hash = args.get("file_hash") incident_id = arg_to_number(args.get('incident_id')) reply = client.quarantine_files( endpoint_id_list=endpoint_id_list, file_path=file_path, file_hash=file_hash, incident_id=incident_id ) output = { 'endpointIdList': endpoint_id_list, 'filePath': file_path, 'fileHash': file_hash, 'actionId': reply.get("action_id") } return CommandResults( readable_output=tableToMarkdown('Quarantine files', output, headers=[*output], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.quarantineFiles.actionIds(val.actionId === obj.actionId)': output}, raw_response=reply ) def restore_file_command(client, args): file_hash = args.get('file_hash') endpoint_id = args.get('endpoint_id') incident_id = arg_to_number(args.get('incident_id')) reply = client.restore_file( file_hash=file_hash, endpoint_id=endpoint_id, incident_id=incident_id ) action_id = reply.get("action_id") return CommandResults( readable_output=tableToMarkdown('Restore files', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.restoredFiles.actionId(val.actionId == obj.actionId)': action_id}, raw_response=reply ) def get_quarantine_status_command(client, args): file_path = args.get('file_path') file_hash = args.get('file_hash') endpoint_id = args.get('endpoint_id') reply = client.get_quarantine_status( file_path=file_path, file_hash=file_hash, endpoint_id=endpoint_id ) output = { 'status': reply['status'], 'endpointId': reply['endpoint_id'], 'filePath': reply['file_path'], 'fileHash': reply['file_hash'] } return CommandResults( readable_output=tableToMarkdown('Quarantine files status', output, headers=[*output], headerTransform=pascalToSpace), outputs={f'{INTEGRATION_CONTEXT_BRAND}.quarantineFiles.status(val.fileHash === obj.fileHash &&' f'val.endpointId === obj.endpointId && val.filePath === obj.filePath)': output}, raw_response=reply ) def endpoint_scan_command(client, args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) incident_id = arg_to_number(args.get('incident_id')) validate_args_scan_commands(args) reply = client.endpoint_scan( url_suffix='/endpoints/scan/', endpoint_id_list=argToList(endpoint_id_list), dist_name=dist_name, gte_first_seen=gte_first_seen, gte_last_seen=gte_last_seen, lte_first_seen=lte_first_seen, lte_last_seen=lte_last_seen, ip_list=ip_list, group_name=group_name, platform=platform, alias=alias, isolate=isolate, hostname=hostname, incident_id=incident_id ) action_id = reply.get("action_id") context = { "actionId": action_id, "aborted": False } return CommandResults( readable_output=tableToMarkdown('Endpoint scan', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.endpointScan(val.actionId == obj.actionId)': context}, raw_response=reply ) def endpoint_scan_abort_command(client, args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) isolate = args.get('isolate') hostname = argToList(args.get('hostname')) incident_id = arg_to_number(args.get('incident_id')) validate_args_scan_commands(args) reply = client.endpoint_scan( url_suffix='endpoints/abort_scan/', endpoint_id_list=argToList(endpoint_id_list), dist_name=dist_name, gte_first_seen=gte_first_seen, gte_last_seen=gte_last_seen, lte_first_seen=lte_first_seen, lte_last_seen=lte_last_seen, ip_list=ip_list, group_name=group_name, platform=platform, alias=alias, isolate=isolate, hostname=hostname, incident_id=incident_id ) action_id = reply.get("action_id") context = { "actionId": action_id, "aborted": True } return CommandResults( readable_output=tableToMarkdown('Endpoint abort scan', {'Action Id': action_id}, ['Action Id']), outputs={f'{INTEGRATION_CONTEXT_BRAND}.endpointScan(val.actionId == obj.actionId)': context}, raw_response=reply ) def validate_args_scan_commands(args): endpoint_id_list = argToList(args.get('endpoint_id_list')) dist_name = argToList(args.get('dist_name')) gte_first_seen = args.get('gte_first_seen') gte_last_seen = args.get('gte_last_seen') lte_first_seen = args.get('lte_first_seen') lte_last_seen = args.get('lte_last_seen') ip_list = argToList(args.get('ip_list')) group_name = argToList(args.get('group_name')) platform = argToList(args.get('platform')) alias = argToList(args.get('alias')) hostname = argToList(args.get('hostname')) all_ = argToBoolean(args.get('all', 'false')) # to prevent the case where an empty filtered command will trigger by default a scan on all the endpoints. err_msg = 'To scan/abort scan all the endpoints run this command with the \'all\' argument as True ' \ 'and without any other filters. This may cause performance issues.\n' \ 'To scan/abort scan some of the endpoints, please use the filter arguments.' if all_: if endpoint_id_list or dist_name or gte_first_seen or gte_last_seen or lte_first_seen or lte_last_seen \ or ip_list or group_name or platform or alias or hostname: raise Exception(err_msg) else: if not endpoint_id_list and not dist_name and not gte_first_seen and not gte_last_seen \ and not lte_first_seen and not lte_last_seen and not ip_list and not group_name and not platform \ and not alias and not hostname: raise Exception(err_msg) def sort_by_key(list_to_sort, main_key, fallback_key): """Sorts a given list elements by main_key for all elements with the key, uses sorting by fallback_key on all elements that dont have the main_key""" list_elements_with_main_key = [element for element in list_to_sort if element.get(main_key)] sorted_list = sorted(list_elements_with_main_key, key=itemgetter(main_key)) if len(list_to_sort) == len(sorted_list): return sorted_list list_elements_with_fallback_without_main = [element for element in list_to_sort if element.get(fallback_key) and not element.get(main_key)] sorted_list.extend(sorted(list_elements_with_fallback_without_main, key=itemgetter(fallback_key))) if len(sorted_list) == len(list_to_sort): return sorted_list list_elements_without_fallback_and_main = [element for element in list_to_sort if not element.get(fallback_key) and not element.get(main_key)] sorted_list.extend(list_elements_without_fallback_and_main) return sorted_list def drop_field_underscore(section): section_copy = section.copy() for field in section_copy.keys(): if '_' in field: section[field.replace('_', '')] = section.get(field) def reformat_sublist_fields(sublist): for section in sublist: drop_field_underscore(section) def handle_outgoing_incident_owner_sync(update_args): if 'owner' in update_args and demisto.params().get('sync_owners'): if update_args.get('owner'): user_info = demisto.findUser(username=update_args.get('owner')) if user_info: update_args['assigned_user_mail'] = user_info.get('email') else: # handle synced unassignment update_args['assigned_user_mail'] = None def handle_user_unassignment(update_args): if ('assigned_user_mail' in update_args and update_args.get('assigned_user_mail') in ['None', 'null', '', None]) \ or ('assigned_user_pretty_name' in update_args and update_args.get('assigned_user_pretty_name') in ['None', 'null', '', None]): update_args['unassign_user'] = 'true' update_args['assigned_user_mail'] = None update_args['assigned_user_pretty_name'] = None def handle_outgoing_issue_closure(update_args, inc_status): if inc_status == 2: update_args['status'] = XSOAR_RESOLVED_STATUS_TO_Core.get(update_args.get('closeReason', 'Other')) demisto.debug(f"Closing Remote Core incident with status {update_args['status']}") update_args['resolve_comment'] = update_args.get('closeNotes', '') def get_update_args(delta, inc_status): """Change the updated field names to fit the update command""" update_args = delta handle_outgoing_incident_owner_sync(update_args) handle_user_unassignment(update_args) if update_args.get('closingUserId'): handle_outgoing_issue_closure(update_args, inc_status) return update_args def update_remote_system_command(client, args): remote_args = UpdateRemoteSystemArgs(args) if remote_args.delta: demisto.debug(f'Got the following delta keys {str(list(remote_args.delta.keys()))} to update Core ' f'incident {remote_args.remote_incident_id}') try: if remote_args.incident_changed: update_args = get_update_args(remote_args.delta, remote_args.inc_status) update_args['incident_id'] = remote_args.remote_incident_id demisto.debug(f'Sending incident with remote ID [{remote_args.remote_incident_id}] to Core\n') update_incident_command(client, update_args) else: demisto.debug(f'Skipping updating remote incident fields [{remote_args.remote_incident_id}] ' f'as it is not new nor changed') return remote_args.remote_incident_id except Exception as e: demisto.debug(f"Error in Core outgoing mirror for incident {remote_args.remote_incident_id} \n" f"Error message: {str(e)}") return remote_args.remote_incident_id def delete_endpoints_command(client: Client, args: Dict[str, str]) -> Tuple[str, Any, Any]: endpoint_id_list: list = argToList(args.get('endpoint_ids')) client.delete_endpoints(endpoint_id_list) return f'Successfully deleted the following endpoints: {args.get("endpoint_ids")}', None, None def get_policy_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: endpoint_id = args.get('endpoint_id') reply = client.get_policy(endpoint_id) context = {'endpoint_id': endpoint_id, 'policy_name': reply.get('policy_name')} return ( f'The policy name of endpoint: {endpoint_id} is: {reply.get("policy_name")}.', { f'{INTEGRATION_CONTEXT_BRAND}.Policy(val.endpoint_id == obj.endpoint_id)': context }, reply ) def get_endpoint_device_control_violations_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: endpoint_ids: list = argToList(args.get('endpoint_ids')) type_of_violation = args.get('type') timestamp_gte: int = arg_to_timestamp( arg=args.get('timestamp_gte'), arg_name='timestamp_gte' ) timestamp_lte: int = arg_to_timestamp( arg=args.get('timestamp_lte'), arg_name='timestamp_lte' ) ip_list: list = argToList(args.get('ip_list')) vendor: list = argToList(args.get('vendor')) vendor_id: list = argToList(args.get('vendor_id')) product: list = argToList(args.get('product')) product_id: list = argToList(args.get('product_id')) serial: list = argToList(args.get('serial')) hostname: list = argToList(args.get('hostname')) violation_id_list: list = argToList(args.get('violation_id_list', '')) username: list = argToList(args.get('username')) violation_ids = [arg_to_int(arg=item, arg_name=str(item)) for item in violation_id_list] reply = client.get_endpoint_device_control_violations( endpoint_ids=endpoint_ids, type_of_violation=[type_of_violation], timestamp_gte=timestamp_gte, timestamp_lte=timestamp_lte, ip_list=ip_list, vendor=vendor, vendor_id=vendor_id, product=product, product_id=product_id, serial=serial, hostname=hostname, violation_ids=violation_ids, username=username ) headers = ['date', 'hostname', 'platform', 'username', 'ip', 'type', 'violation_id', 'vendor', 'product', 'serial'] violations: list = copy.deepcopy(reply.get('violations')) # type: ignore for violation in violations: timestamp: str = violation.get('timestamp') violation['date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) return ( tableToMarkdown(name='Endpoint Device Control Violation', t=violations, headers=headers, headerTransform=string_to_table_header, removeNull=True), { f'{INTEGRATION_CONTEXT_BRAND}.EndpointViolations(val.violation_id==obj.violation_id)': violations }, reply ) def retrieve_files_command(client: Client, args: Dict[str, str]) -> CommandResults: endpoint_id_list: list = argToList(args.get('endpoint_ids')) windows: list = argToList(args.get('windows_file_paths')) linux: list = argToList(args.get('linux_file_paths')) macos: list = argToList(args.get('mac_file_paths')) file_path_list: list = argToList(args.get('generic_file_path')) incident_id: Optional[int] = arg_to_number(args.get('incident_id')) reply = client.retrieve_file( endpoint_id_list=endpoint_id_list, windows=windows, linux=linux, macos=macos, file_path_list=file_path_list, incident_id=incident_id ) result = {'action_id': reply.get('action_id')} return CommandResults( readable_output=tableToMarkdown(name='Retrieve files', t=result, headerTransform=string_to_table_header), outputs={f'{INTEGRATION_CONTEXT_BRAND}.RetrievedFiles(val.action_id == obj.action_id)': result}, raw_response=reply ) def retrieve_file_details_command(client: Client, args): action_id_list = argToList(args.get('action_id', '')) action_id_list = [arg_to_int(arg=item, arg_name=str(item)) for item in action_id_list] result = [] raw_result = [] file_results = [] endpoints_count = 0 retrived_files_count = 0 for action_id in action_id_list: data = client.retrieve_file_details(action_id) raw_result.append(data) for endpoint, link in data.items(): endpoints_count += 1 obj = { 'action_id': action_id, 'endpoint_id': endpoint } if link: retrived_files_count += 1 obj['file_link'] = link file_link = "download" + link.split("download")[1] file = client.get_file(file_link=file_link) file_results.append(fileResult(filename=f'{endpoint}_{retrived_files_count}.zip', data=file)) result.append(obj) hr = f'### Action id : {args.get("action_id", "")} \n Retrieved {retrived_files_count} files from ' \ f'{endpoints_count} endpoints. \n To get the exact action status run the core-action-status-get command' return_entry = {'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': raw_result, 'HumanReadable': hr, 'ReadableContentsFormat': formats['markdown'], 'EntryContext': {} } return return_entry, file_results def get_scripts_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_name: list = argToList(args.get('script_name')) description: list = argToList(args.get('description')) created_by: list = argToList(args.get('created_by')) windows_supported = args.get('windows_supported') linux_supported = args.get('linux_supported') macos_supported = args.get('macos_supported') is_high_risk = args.get('is_high_risk') offset = arg_to_int(arg=args.get('offset', 0), arg_name='offset') limit = arg_to_int(arg=args.get('limit', 50), arg_name='limit') result = client.get_scripts( name=script_name, description=description, created_by=created_by, windows_supported=[windows_supported], linux_supported=[linux_supported], macos_supported=[macos_supported], is_high_risk=[is_high_risk] ) scripts = copy.deepcopy(result.get('scripts')[offset:(offset + limit)]) # type: ignore for script in scripts: timestamp = script.get('modification_date') script['modification_date_timestamp'] = timestamp script['modification_date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) headers: list = ['name', 'description', 'script_uid', 'modification_date', 'created_by', 'windows_supported', 'linux_supported', 'macos_supported', 'is_high_risk'] return ( tableToMarkdown(name='Scripts', t=scripts, headers=headers, removeNull=True, headerTransform=string_to_table_header), { f'{INTEGRATION_CONTEXT_BRAND}.Scripts(val.script_uid == obj.script_uid)': scripts }, result ) def get_script_metadata_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_uid = args.get('script_uid') reply = client.get_script_metadata(script_uid) script_metadata = copy.deepcopy(reply) timestamp = script_metadata.get('modification_date') script_metadata['modification_date_timestamp'] = timestamp script_metadata['modification_date'] = timestamp_to_datestring(timestamp, TIME_FORMAT) return ( tableToMarkdown(name='Script Metadata', t=script_metadata, removeNull=True, headerTransform=string_to_table_header), { f'{INTEGRATION_CONTEXT_BRAND}.ScriptMetadata(val.script_uid == obj.script_uid)': reply }, reply ) def get_script_code_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]: script_uid = args.get('script_uid') reply = client.get_script_code(script_uid) context = { 'script_uid': script_uid, 'code': reply } return ( f'### Script code: \n ``` {str(reply)} ```', { f'{INTEGRATION_CONTEXT_BRAND}.ScriptCode(val.script_uid == obj.script_uid)': context }, reply ) def action_status_get_command(client: Client, args) -> CommandResults: action_id_list = argToList(args.get('action_id', '')) action_id_list = [arg_to_int(arg=item, arg_name=str(item)) for item in action_id_list] result = [] for action_id in action_id_list: data = client.action_status_get(action_id) for endpoint_id, status in data.items(): result.append({ 'action_id': action_id, 'endpoint_id': endpoint_id, 'status': status }) return CommandResults( readable_output=tableToMarkdown(name='Get Action Status', t=result, removeNull=True), outputs={f'{INTEGRATION_CONTEXT_BRAND}.GetActionStatus(val.action_id == obj.action_id)': result}, raw_response=result ) def run_script_command(client: Client, args: Dict) -> CommandResults: script_uid = args.get('script_uid') endpoint_ids = argToList(args.get('endpoint_ids')) timeout = arg_to_number(args.get('timeout', 600)) or 600 incident_id = arg_to_number(args.get('incident_id')) if parameters := args.get('parameters'): try: parameters = json.loads(parameters) except json.decoder.JSONDecodeError as e: raise ValueError(f'The parameters argument is not in a valid JSON structure:\n{e}') else: parameters = {} response = client.run_script(script_uid, endpoint_ids, parameters, timeout, incident_id=incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Script', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=response, ) def run_snippet_code_script_command(client: Client, args: Dict) -> CommandResults: snippet_code = args.get('snippet_code') endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) response = client.run_snippet_code_script(snippet_code=snippet_code, endpoint_ids=endpoint_ids, incident_id=incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Snippet Code Script', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, ) def get_script_execution_status_command(client: Client, args: Dict) -> List[CommandResults]: action_ids = argToList(args.get('action_id', '')) command_results = [] for action_id in action_ids: response = client.get_script_execution_status(action_id) reply = response.get('reply') reply['action_id'] = int(action_id) command_results.append(CommandResults( readable_output=tableToMarkdown(f'Script Execution Status - {action_id}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptStatus', outputs_key_field='action_id', outputs=reply, raw_response=response, )) return command_results def get_script_execution_results_command(client: Client, args: Dict) -> List[CommandResults]: action_ids = argToList(args.get('action_id', '')) command_results = [] for action_id in action_ids: response = client.get_script_execution_results(action_id) results = response.get('reply', {}).get('results') context = { 'action_id': int(action_id), 'results': results, } command_results.append(CommandResults( readable_output=tableToMarkdown(f'Script Execution Results - {action_id}', results), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptResult', outputs_key_field='action_id', outputs=context, raw_response=response, )) return command_results def get_script_execution_result_files_command(client: Client, args: Dict) -> Dict: action_id = args.get('action_id', '') endpoint_id = args.get('endpoint_id') file_response = client.get_script_execution_result_files(action_id, endpoint_id) try: filename = file_response.headers.get('Content-Disposition').split('attachment; filename=')[1] except Exception as e: demisto.debug(f'Failed extracting filename from response headers - [{str(e)}]') filename = action_id + '.zip' return fileResult(filename, file_response.content) def run_script_execute_commands_command(client: Client, args: Dict) -> CommandResults: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 parameters = {'commands_list': argToList(args.get('commands'))} response = client.run_script('a6f7683c8e217d85bd3c398f0d3fb6bf', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') return CommandResults( readable_output=tableToMarkdown('Run Script Execute Commands', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, ) def run_script_delete_file_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 file_paths = argToList(args.get('file_path')) all_files_response = [] for file_path in file_paths: parameters = {'file_path': file_path} response = client.run_script('548023b6e4a01ec51a495ba6e5d2a15d', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_files_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script Delete File on {file_path}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_files_response def run_script_file_exists_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 file_paths = argToList(args.get('file_path')) all_files_response = [] for file_path in file_paths: parameters = {'path': file_path} response = client.run_script('414763381b5bfb7b05796c9fe690df46', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_files_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script File Exists on {file_path}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_files_response def run_script_kill_process_command(client: Client, args: Dict) -> List[CommandResults]: endpoint_ids = argToList(args.get('endpoint_ids')) incident_id = arg_to_number(args.get('incident_id')) timeout = arg_to_number(args.get('timeout', 600)) or 600 processes_names = argToList(args.get('process_name')) all_processes_response = [] for process_name in processes_names: parameters = {'process_name': process_name} response = client.run_script('fd0a544a99a9421222b4f57a11839481', endpoint_ids, parameters, timeout, incident_id) reply = response.get('reply') all_processes_response.append(CommandResults( readable_output=tableToMarkdown(f'Run Script Kill Process on {process_name}', reply), outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.ScriptRun', outputs_key_field='action_id', outputs=reply, raw_response=reply, )) return all_processes_response def add_exclusion_command(client: Client, args: Dict) -> CommandResults: name = args.get('name') indicator = args.get('filterObject') if not indicator: raise DemistoException("Didn't get filterObject arg. This arg is required.") status = args.get('status', "ENABLED") comment = args.get('comment') res = client.add_exclusion(name=name, status=status, indicator=json.loads(indicator), comment=comment) return CommandResults( readable_output=tableToMarkdown('Add Exclusion', res), outputs={f'{INTEGRATION_CONTEXT_BRAND}.exclusion.rule_id(val.rule_id == obj.rule_id)': res.get("rule_id")}, raw_response=res ) def delete_exclusion_command(client: Client, args: Dict) -> CommandResults: alert_exclusion_id = arg_to_number(args.get('alert_exclusion_id')) if not alert_exclusion_id: raise DemistoException("Didn't get alert_exclusion_id arg. This arg is required.") res = client.delete_exclusion(alert_exclusion_id=alert_exclusion_id) return CommandResults( readable_output=f"Successfully deleted the following exclusion: {alert_exclusion_id}", outputs={f'{INTEGRATION_CONTEXT_BRAND}.deletedExclusion.rule_id(val.rule_id == obj.rule_id)': res.get("rule_id")}, raw_response=res ) def get_exclusion_command(client: Client, args: Dict) -> CommandResults: res = client.get_exclusion(tenant_id=args.get('tenant_ID'), filter=args.get('filterObject'), limit=arg_to_number(args.get('limit', 20))) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.exclusion', outputs=res, readable_output=tableToMarkdown('Exclusion', res), raw_response=res ) def report_incorrect_wildfire_command(client: Client, args) -> CommandResults: file_hash = args.get('file_hash') reason = args.get('reason') email = args.get('email') new_verdict = arg_to_int( arg=args.get('new_verdict'), arg_name='Failed to parse "new_verdict". Must be a number.', required=True ) response = client.report_incorrect_wildfire(file_hash, new_verdict, reason, email) return CommandResults( readable_output=f'Reported incorrect WildFire on {file_hash}', outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.WildFire', outputs={"file_hash": file_hash, "new_verdict": new_verdict}, raw_response=response, ) def decode_dict_values(dict_to_decode: dict): """Decode JSON str values of a given dict. Args: dict_to_decode (dict): The dict to decode. """ for key, value in dict_to_decode.items(): # if value is a dictionary, we want to recursively decode it's values if isinstance(value, dict): decode_dict_values(value) # if value is a string, we want to try to decode it, if it cannot be decoded, we will move on. elif isinstance(value, str): try: dict_to_decode[key] = json.loads(value) except ValueError: continue def filter_general_fields(alert: dict) -> dict: """filter only relevant general fields from a given alert. Args: alert (dict): The alert to filter Returns: dict: The filtered alert """ updated_alert = {} updated_event = {} for field in ALERT_GENERAL_FIELDS: if field in alert: updated_alert[field] = alert.get(field) event = alert.get('raw_abioc', {}).get('event', {}) if not event: return_warning('No XDR cloud analytics event.') else: for field in ALERT_EVENT_GENERAL_FIELDS: if field in event: updated_event[field] = event.get(field) updated_alert['event'] = updated_event return updated_alert def filter_vendor_fields(alert: dict): """Remove non relevant fields from the alert event (filter by vendor: Amazon/google/Microsoft) Args: alert (dict): The alert to filter Returns: dict: The filtered alert """ vendor_mapper = { 'Amazon': ALERT_EVENT_AWS_FIELDS, 'Google': ALERT_EVENT_GCP_FIELDS, 'MSFT': ALERT_EVENT_AZURE_FIELDS, } event = alert.get('event', {}) vendor = event.get('vendor') if vendor and vendor in vendor_mapper: raw_log = event.get('raw_log', {}) if raw_log and isinstance(raw_log, dict): for key in list(raw_log): if key not in vendor_mapper[vendor]: raw_log.pop(key) def get_original_alerts_command(client: Client, args: Dict) -> CommandResults: alert_id_list = argToList(args.get('alert_ids', [])) raw_response = client.get_original_alerts(alert_id_list) reply = copy.deepcopy(raw_response) alerts = reply.get('alerts', []) filtered_alerts = [] for i, alert in enumerate(alerts): # decode raw_response try: alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', '')) # some of the returned JSON fields are double encoded, so it needs to be double-decoded. # example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"} decode_dict_values(alert) except Exception: continue # remove original_alert_json field and add its content to alert. alert.update( alert.pop('original_alert_json', None)) updated_alert = filter_general_fields(alert) if 'event' in updated_alert: filter_vendor_fields(updated_alert) filtered_alerts.append(updated_alert) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.OriginalAlert', outputs_key_field='internal_id', outputs=filtered_alerts, raw_response=raw_response, ) def get_dynamic_analysis_command(client: Client, args: Dict) -> CommandResults: alert_id_list = argToList(args.get('alert_ids', [])) raw_response = client.get_original_alerts(alert_id_list) reply = copy.deepcopy(raw_response) alerts = reply.get('alerts', []) filtered_alerts = [] for i, alert in enumerate(alerts): # decode raw_response try: alert['original_alert_json'] = safe_load_json(alert.get('original_alert_json', '')) # some of the returned JSON fields are double encoded, so it needs to be double-decoded. # example: {"x": "someValue", "y": "{\"z\":\"anotherValue\"}"} decode_dict_values(alert) except Exception: continue # remove original_alert_json field and add its content to alert. alert.update(alert.pop('original_alert_json', None)) if demisto.get(alert, 'messageData.dynamicAnalysis'): filtered_alerts.append(demisto.get(alert, 'messageData.dynamicAnalysis')) if not filtered_alerts: return CommandResults( readable_output="There is no dynamicAnalysis for these alert ids.", raw_response=raw_response ) return CommandResults( outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.DynamicAnalysis', outputs=filtered_alerts, raw_response=raw_response, ) def run_polling_command(client: Client, args: dict, cmd: str, command_function: Callable, command_decision_field: str, results_function: Callable, polling_field: str, polling_value: List, stop_polling: bool = False) -> CommandResults: """ args: demito args cmd: the command to schedule by after the current command command_function: the function which is runs the actual command command_decision_field: the field in the response based on it what the command status and if the command occurred results_function: the function which we are polling on and retrieves the status of the command_function polling_field: the field which from the result of the results_function which we are interested in its value polling_value: list of values of the polling_field we want to check stop_polling: yes - polling_value is stopping, not - polling_value not stopping """ ScheduledCommand.raise_error_if_not_supported() interval_in_secs = int(args.get('interval_in_seconds', 60)) timeout_in_seconds = int(args.get('timeout_in_seconds', 600)) if command_decision_field not in args: # create new command run command_results = command_function(client, args) if isinstance(command_results, CommandResults): outputs = [command_results.raw_response] if command_results.raw_response else [] else: outputs = [c.raw_response for c in command_results] command_decision_values = [o.get(command_decision_field) for o in outputs] if outputs else [] # type: ignore if outputs and command_decision_values: polling_args = { command_decision_field: command_decision_values, 'interval_in_seconds': interval_in_secs, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_secs, args=polling_args, timeout_in_seconds=timeout_in_seconds) if isinstance(command_results, list): command_results = command_results[0] command_results.scheduled_command = scheduled_command return command_results else: if command_results.readable_output: demisto.error(f"{command_results.readable_output}") else: demisto.error(f"Command {command_function} didn't succeeded, returned {outputs}") return command_results # get polling result command_results = results_function(client, args) outputs_result_func = command_results.raw_response result = outputs_result_func.get(polling_field) if isinstance(outputs_result_func, dict) else\ outputs_result_func[0].get(polling_field) cond = result not in polling_value if stop_polling else result in polling_value if cond: # schedule next poll polling_args = { 'interval_in_seconds': interval_in_secs, **args } scheduled_command = ScheduledCommand( command=cmd, next_run_in_seconds=interval_in_secs, args=polling_args, timeout_in_seconds=timeout_in_seconds) # result with scheduled_command only - no update to the war room command_results = CommandResults(scheduled_command=scheduled_command) return command_results def main(): """ Executes an integration command """ command = demisto.command() LOG(f'Command being called is {command}') args = demisto.args() api_key = demisto.params().get('apikey') api_key_id = demisto.params().get('apikey_id') url = demisto.params().get('url') if not api_key or not api_key_id or not url: headers = { "HOST": demisto.getLicenseCustomField("Core.ApiHostName"), demisto.getLicenseCustomField("Core.ApiHeader"): demisto.getLicenseCustomField("Core.ApiKey"), "Content-Type": "application/json" } url = "http://" + demisto.getLicenseCustomField("Core.ApiHost") + "/api/webapp/" add_sensitive_log_strs(demisto.getLicenseCustomField("Core.ApiKey")) else: headers = { "Content-Type": "application/json", "x-xdr-auth-id": str(api_key_id), "Authorization": api_key } add_sensitive_log_strs(api_key) base_url = urljoin(url, '/public_api/v1') proxy = demisto.params().get('proxy') verify_cert = not demisto.params().get('insecure', False) try: timeout = int(demisto.params().get('timeout', 120)) except ValueError as e: demisto.debug(f'Failed casting timeout parameter to int, falling back to 120 - {e}') timeout = 120 client = Client( base_url=base_url, proxy=proxy, verify=verify_cert, headers=headers, timeout=timeout ) try: if command == 'test-module': client.test_module() demisto.results('ok') elif command == 'core-get-endpoints': return_results(get_endpoints_command(client, args)) elif command == 'core-isolate-endpoint': polling_args = { **args, "endpoint_id_list": args.get('endpoint_id') } return_results(run_polling_command(client=client, args=polling_args, cmd="core-isolate-endpoint", command_function=isolate_endpoint_command, command_decision_field="action_id", results_function=get_endpoints_command, polling_field="is_isolated", polling_value=["AGENT_ISOLATED"], stop_polling=True)) elif command == 'core-unisolate-endpoint': polling_args = { **args, "endpoint_id_list": args.get('endpoint_id') } return_results(run_polling_command(client=client, args=polling_args, cmd="core-unisolate-endpoint", command_function=unisolate_endpoint_command, command_decision_field="action_id", results_function=get_endpoints_command, polling_field="is_isolated", polling_value=["AGENT_UNISOLATED", "CANCELLED", "ֿPENDING_ABORT", "ABORTED", "EXPIRED", "COMPLETED_PARTIAL", "COMPLETED_SUCCESSFULLY", "FAILED", "TIMEOUT"], stop_polling=True)) elif command == 'core-get-distribution-url': return_outputs(*get_distribution_url_command(client, args)) elif command == 'core-get-create-distribution-status': return_outputs(*get_distribution_status_command(client, args)) elif command == 'core-get-distribution-versions': return_outputs(*get_distribution_versions_command(client)) elif command == 'core-create-distribution': return_outputs(*create_distribution_command(client, args)) elif command == 'core-get-audit-management-logs': return_outputs(*get_audit_management_logs_command(client, args)) elif command == 'core-get-audit-agent-reports': return_outputs(*get_audit_agent_reports_command(client, args)) elif command == 'core-blocklist-files': return_results(blocklist_files_command(client, args)) elif command == 'core-allowlist-files': return_results(allowlist_files_command(client, args)) elif command == 'core-quarantine-files': polling_args = { **args, "endpoint_id": argToList(args.get("endpoint_id_list"))[0] } return_results(run_polling_command(client=client, args=polling_args, cmd="core-quarantine-files", command_function=quarantine_files_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-get-quarantine-status': return_results(get_quarantine_status_command(client, args)) elif command == 'core-restore-file': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=restore_file_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-endpoint-scan': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=endpoint_scan_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-endpoint-scan-abort': return_results(endpoint_scan_abort_command(client, args)) elif command == 'update-remote-system': return_results(update_remote_system_command(client, args)) elif command == 'core-delete-endpoints': return_outputs(*delete_endpoints_command(client, args)) elif command == 'core-get-policy': return_outputs(*get_policy_command(client, args)) elif command == 'core-get-endpoint-device-control-violations': return_outputs(*get_endpoint_device_control_violations_command(client, args)) elif command == 'core-retrieve-files': return_results(run_polling_command(client=client, args=args, cmd="core-retrieve-files", command_function=retrieve_files_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-retrieve-file-details': return_entry, file_results = retrieve_file_details_command(client, args) demisto.results(return_entry) if file_results: demisto.results(file_results) elif command == 'core-get-scripts': return_outputs(*get_scripts_command(client, args)) elif command == 'core-get-script-metadata': return_outputs(*get_script_metadata_command(client, args)) elif command == 'core-get-script-code': return_outputs(*get_script_code_command(client, args)) elif command == 'core-action-status-get': return_results(action_status_get_command(client, args)) elif command == 'core-run-script': return_results(run_script_command(client, args)) elif command == 'core-run-snippet-code-script': return_results(run_polling_command(client=client, args=args, cmd="core-run-snippet-code-script", command_function=run_snippet_code_script_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-get-script-execution-status': return_results(get_script_execution_status_command(client, args)) elif command == 'core-get-script-execution-results': return_results(get_script_execution_results_command(client, args)) elif command == 'core-get-script-execution-result-files': return_results(get_script_execution_result_files_command(client, args)) elif command == 'core-run-script-execute-commands': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-execute-commands", command_function=run_script_execute_commands_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-delete-file': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-delete-file", command_function=run_script_delete_file_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-file-exists': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-file-exists", command_function=run_script_file_exists_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'core-run-script-kill-process': return_results(run_polling_command(client=client, args=args, cmd="core-run-script-kill-process", command_function=run_script_kill_process_command, command_decision_field="action_id", results_function=action_status_get_command, polling_field="status", polling_value=["PENDING", "IN_PROGRESS", "PENDING_ABORT"])) elif command == 'endpoint': return_results(endpoint_command(client, args)) elif command == 'core-report-incorrect-wildfire': return_results(report_incorrect_wildfire_command(client, args)) elif command == 'core-remove-blocklist-files': return_results(remove_blocklist_files_command(client, args)) elif command == 'core-remove-allowlist-files': return_results(remove_allowlist_files_command(client, args)) elif command == 'core-add-exclusion': return_results(add_exclusion_command(client, args)) elif command == 'core-delete-exclusion': return_results(delete_exclusion_command(client, args)) elif command == 'core-get-exclusion': return_results(get_exclusion_command(client, args)) elif command == 'core-get-cloud-original-alerts': return_results(get_original_alerts_command(client, args)) elif command == 'core-get-dynamic-analysis': return_results(get_dynamic_analysis_command(client, args)) except Exception as err: demisto.error(traceback.format_exc()) return_error(str(err)) if __name__ in ('__main__', '__builtin__', 'builtins'): main()
import csv import json import logging import time import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression import shared # time pipenv run python estimate_personal_coefficients.py race_type = shared.race_type() year = shared.forecast_year() startTime = time.time() ideal_paces = pd.read_csv(f'Jukola-terrain/ideal-paces-{race_type}.tsv', delimiter='\t') logging.info(f"{ideal_paces.head().round(3)}") runs = pd.read_csv(f'data/runs_{shared.race_id_str()}.tsv', delimiter='\t') runs["log_pace"] = np.log(runs["pace"]) runs = runs.query("num_runs > 1") # TODO use median or other means to reduce outliers runner_means = runs[["name", "log_pace"]].groupby(["name"]).agg("mean") runs["pace_mean"] = runner_means["log_pace"][runs["name"]].values runs["personal_coefficient"] = runs["log_pace"] / runs["pace_mean"] runs = pd.merge(runs, ideal_paces[["year", "leg", "terrain_coefficient"]], how="left", on=["year", "leg"]) logging.info(f"{runs.sample(10).round(3)}") X = np.array(runs["terrain_coefficient"]).reshape(-1, 1) y = np.array(runs["personal_coefficient"]).reshape(-1, 1) lr = LinearRegression().fit(X, y) defaults = { "default_intercept": lr.intercept_[0], "default_coef": lr.coef_[0][0], "score": lr.score(X, y) } logging.info(f"defaults: {defaults}") with open(f"data/default_personal_coefficients_{shared.race_id_str()}.json", 'w') as fp: json.dump(defaults, fp) subset = runs by_name = pd.DataFrame(data=subset.groupby("name")["terrain_coefficient"].apply(list).items(), columns=["name", "terrain_coefficients"]) personal = pd.DataFrame(data=subset.groupby("name")["personal_coefficient"].apply(list).items(), columns=["name", "personal_coefficients"]) by_name["personal_coefficients"] = personal["personal_coefficients"] by_name["num_runs"] = by_name["terrain_coefficients"].apply(len) by_name = by_name[by_name["num_runs"] > 2] def fit_model_for_runner(row): name = row["name"] terrain_coefficients = row["terrain_coefficients"] X = np.array(terrain_coefficients).reshape(len(terrain_coefficients), 1) y = np.array(row["personal_coefficients"]).reshape(len(terrain_coefficients), 1) lr = LinearRegression().fit(X, y) score = lr.score(X, y) # logging.info(f"{name} intercept_: {lr.intercept_}, coef_: {lr.coef_[0][0]}") return [lr.coef_[0][0], lr.intercept_[0], score] by_name[["coef", "intercept", "score"]] = by_name.apply(fit_model_for_runner, axis=1, result_type="expand") logging.info(f"{by_name.sample(10).round(3)}") by_name["bad_prediction"] = (by_name["coef"] <= 0) | (by_name["score"] <= 0) logging.info(f'bad_prediction mean: {by_name['bad_prediction'].mean()}') bad_prediction_summary = by_name[["bad_prediction", "num_runs", "score"]].groupby(['num_runs']).agg(["mean", "count"]) logging.info(bad_prediction_summary) by_name = by_name[by_name["bad_prediction"] == False] by_name = by_name[["name", "coef", "intercept", "score"]] by_name.round(5).to_csv(f"data/personal_coefficients_{shared.race_id_str()}.tsv", "\t", quoting=csv.QUOTE_ALL, index=False)
import csv import json import logging import time import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression import shared # time pipenv run python estimate_personal_coefficients.py race_type = shared.race_type() year = shared.forecast_year() startTime = time.time() ideal_paces = pd.read_csv(f'Jukola-terrain/ideal-paces-{race_type}.tsv', delimiter='\t') logging.info(f"{ideal_paces.head().round(3)}") runs = pd.read_csv(f'data/runs_{shared.race_id_str()}.tsv', delimiter='\t') runs["log_pace"] = np.log(runs["pace"]) runs = runs.query("num_runs > 1") # TODO use median or other means to reduce outliers runner_means = runs[["name", "log_pace"]].groupby(["name"]).agg("mean") runs["pace_mean"] = runner_means["log_pace"][runs["name"]].values runs["personal_coefficient"] = runs["log_pace"] / runs["pace_mean"] runs = pd.merge(runs, ideal_paces[["year", "leg", "terrain_coefficient"]], how="left", on=["year", "leg"]) logging.info(f"{runs.sample(10).round(3)}") X = np.array(runs["terrain_coefficient"]).reshape(-1, 1) y = np.array(runs["personal_coefficient"]).reshape(-1, 1) lr = LinearRegression().fit(X, y) defaults = { "default_intercept": lr.intercept_[0], "default_coef": lr.coef_[0][0], "score": lr.score(X, y) } logging.info(f"defaults: {defaults}") with open(f"data/default_personal_coefficients_{shared.race_id_str()}.json", 'w') as fp: json.dump(defaults, fp) subset = runs by_name = pd.DataFrame(data=subset.groupby("name")["terrain_coefficient"].apply(list).items(), columns=["name", "terrain_coefficients"]) personal = pd.DataFrame(data=subset.groupby("name")["personal_coefficient"].apply(list).items(), columns=["name", "personal_coefficients"]) by_name["personal_coefficients"] = personal["personal_coefficients"] by_name["num_runs"] = by_name["terrain_coefficients"].apply(len) by_name = by_name[by_name["num_runs"] > 2] def fit_model_for_runner(row): name = row["name"] terrain_coefficients = row["terrain_coefficients"] X = np.array(terrain_coefficients).reshape(len(terrain_coefficients), 1) y = np.array(row["personal_coefficients"]).reshape(len(terrain_coefficients), 1) lr = LinearRegression().fit(X, y) score = lr.score(X, y) # logging.info(f"{name} intercept_: {lr.intercept_}, coef_: {lr.coef_[0][0]}") return [lr.coef_[0][0], lr.intercept_[0], score] by_name[["coef", "intercept", "score"]] = by_name.apply(fit_model_for_runner, axis=1, result_type="expand") logging.info(f"{by_name.sample(10).round(3)}") by_name["bad_prediction"] = (by_name["coef"] <= 0) | (by_name["score"] <= 0) logging.info(f'bad_prediction mean: {by_name["bad_prediction"].mean()}') bad_prediction_summary = by_name[["bad_prediction", "num_runs", "score"]].groupby(['num_runs']).agg(["mean", "count"]) logging.info(bad_prediction_summary) by_name = by_name[by_name["bad_prediction"] == False] by_name = by_name[["name", "coef", "intercept", "score"]] by_name.round(5).to_csv(f"data/personal_coefficients_{shared.race_id_str()}.tsv", "\t", quoting=csv.QUOTE_ALL, index=False)
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import OrderedDict from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union import numpy as np import torch from monai.config import IgniteInfo, KeysCollection from monai.utils import deprecated, ensure_tuple, get_torch_version_tuple, look_up_option, min_version, optional_import idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed") if TYPE_CHECKING: from ignite.engine import Engine else: Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine") __all__ = [ "stopping_fn_from_metric", "stopping_fn_from_loss", "evenly_divisible_all_gather", "string_list_all_gather", "write_metrics_reports", "from_engine", ] def stopping_fn_from_metric(metric_name: str): """ Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name. """ def stopping_fn(engine: Engine): return engine.state.metrics[metric_name] return stopping_fn def stopping_fn_from_loss(): """ Returns a stopping function for ignite.handlers.EarlyStopping using the loss value. """ def stopping_fn(engine: Engine): return -engine.state.output return stopping_fn @deprecated(since="0.6.0", removed="0.7.0", msg_suffix="The API had been moved to monai.utils module.") def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor: """ Utility function for distributed data parallel to pad at first dim to make it evenly divisible and all_gather. Args: data: source tensor to pad and execute all_gather in distributed data parallel. Note: The input data on different ranks must have exactly same `dtype`. """ if not isinstance(data, torch.Tensor): raise ValueError("input data must be PyTorch Tensor.") if idist.get_world_size() <= 1: return data # make sure the data is evenly-divisible on multi-GPUs length = data.shape[0] all_lens = idist.all_gather(length) max_len = max(all_lens) if length < max_len: size = [max_len - length] + list(data.shape[1:]) data = torch.cat([data, data.new_full(size, 0)], dim=0) # all gather across all processes data = idist.all_gather(data) # delete the padding NaN items return torch.cat([data[i * max_len : i * max_len + l, ...] for i, l in enumerate(all_lens)], dim=0) @deprecated(since="0.6.0", removed="0.7.0", msg_suffix="The API had been moved to monai.utils module.") def string_list_all_gather(strings: List[str]) -> List[str]: """ Utility function for distributed data parallel to all gather a list of strings. Note that if the item in `strings` is longer than 1024 chars, it will be truncated to 1024: https://pytorch.org/ignite/v0.4.5/distributed.html#ignite.distributed.utils.all_gather. Args: strings: a list of strings to all gather. """ world_size = idist.get_world_size() if world_size <= 1: return strings result: List[List[str]] = [[] for _ in range(world_size)] # get length of strings length = len(strings) all_lens = idist.all_gather(length) max_len = max(all_lens) # pad the item to make sure the same length if length < max_len: strings += ["" for _ in range(max_len - length)] if get_torch_version_tuple() <= (1, 6): raise RuntimeError("string all_gather can not be supported in PyTorch < 1.7.0.") for s in strings: gathered = idist.all_gather(s) for i, g in enumerate(gathered): if len(g) > 0: result[i].append(g) return [i for k in result for i in k] def write_metrics_reports( save_dir: str, images: Optional[Sequence[str]], metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]], metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]], summary_ops: Optional[Union[str, Sequence[str]]], deli: str = "\t", output_type: str = "csv", ): """ Utility function to write the metrics into files, contains 3 parts: 1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair. 2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image. 3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file. Args: save_dir: directory to save all the metrics reports. images: name or path of every input image corresponding to the metric_details data. if None, will use index number as the filename of every input image. metrics: a dictionary of (metric name, metric value) pairs. metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics computation, for example, the raw value can be the mean_dice of every channel of every input image. summary_ops: expected computation operations to generate the summary report. it can be: None, "*" or list of strings, default to None. None - don't generate summary report for every expected metric_details. "*" - generate summary report for every metric_details with all the supported operations. list of strings - generate summary report for every metric_details with specified operations, they should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"]. the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile". for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html. note that: for the overall summary, it computes `nanmean` of all classes for each image first, then compute summary. example of the generated summary report:: class mean median max 5percentile 95percentile notnans class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000 class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000 mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000 deli: the delimiter character in the file, default to "\t". output_type: expected output file type, supported types: ["csv"], default to "csv". """ if output_type.lower() != "csv": raise ValueError(f"unsupported output type: {output_type}.") if not os.path.exists(save_dir): os.makedirs(save_dir) if metrics is not None and len(metrics) > 0: with open(os.path.join(save_dir, "metrics.csv"), "w") as f: for k, v in metrics.items(): f.write(f"{k}{deli}{str(v)}\n") if metric_details is not None and len(metric_details) > 0: for k, v in metric_details.items(): if isinstance(v, torch.Tensor): v = v.cpu().numpy() if v.ndim == 0: # reshape to [1, 1] if no batch and class dims v = v.reshape((1, 1)) elif v.ndim == 1: # reshape to [N, 1] if no class dim v = v.reshape((-1, 1)) # add the average value of all classes to v class_labels = ["class" + str(i) for i in range(v.shape[1])] + ["mean"] v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1) with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f: f.write(f"filename{deli}{deli.join(class_labels)}\n") for i, b in enumerate(v): f.write(f"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\n") if summary_ops is not None: supported_ops = OrderedDict( { "mean": lambda x: np.nanmean(x), "median": lambda x: np.nanmedian(x), "max": lambda x: np.nanmax(x), "min": lambda x: np.nanmin(x), "90percentile": lambda x: np.nanpercentile(x[0], x[1]), "std": lambda x: np.nanstd(x), "notnans": lambda x: (~np.isnan(x)).sum(), } ) ops = ensure_tuple(summary_ops) if "*" in ops: ops = tuple(supported_ops.keys()) def _compute_op(op: str, d: np.ndarray): if not op.endswith("percentile"): c_op = look_up_option(op, supported_ops) return c_op(d) threshold = int(op.split("percentile")[0]) return supported_ops["90percentile"]((d, threshold)) with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f: f.write(f"class{deli}{deli.join(ops)}\n") for i, c in enumerate(np.transpose(v)): f.write(f"{class_labels[i]}{deli}{deli.join([f"{_compute_op(k, c):.4f}" for k in ops])}\n") def from_engine(keys: KeysCollection, first: bool = False): """ Utility function to simplify the `batch_transform` or `output_transform` args of ignite components when handling dictionary or list of dictionaries(for example: `engine.state.batch` or `engine.state.output`). Users only need to set the expected keys, then it will return a callable function to extract data from dictionary and construct a tuple respectively. If data is a list of dictionaries after decollating, extract expected keys and construct lists respectively, for example, if data is `[{"A": 1, "B": 2}, {"A": 3, "B": 4}]`, from_engine(["A", "B"]): `([1, 3], [2, 4])`. It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward. For example, set the first key as the prediction and the second key as label to get the expected data from `engine.state.output` for a metric:: from monai.handlers import MeanDice, from_engine metric = MeanDice( include_background=False, output_transform=from_engine(["pred", "label"]) ) Args: keys: specified keys to extract data from dictionary or decollated list of dictionaries. first: whether only extract specified keys from the first item if input data is a list of dictionaries, it's used to extract the scalar data which doesn't have batch dim and was replicated into every dictionary when decollating, like `loss`, etc. """ keys = ensure_tuple(keys) def _wrapper(data): if isinstance(data, dict): return tuple(data[k] for k in keys) elif isinstance(data, list) and isinstance(data[0], dict): # if data is a list of dictionaries, extract expected keys and construct lists, # if `first=True`, only extract keys from the first item of the list ret = [data[0][k] if first else [i[k] for i in data] for k in keys] return tuple(ret) if len(ret) > 1 else ret[0] return _wrapper
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import OrderedDict from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union import numpy as np import torch from monai.config import IgniteInfo, KeysCollection from monai.utils import deprecated, ensure_tuple, get_torch_version_tuple, look_up_option, min_version, optional_import idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed") if TYPE_CHECKING: from ignite.engine import Engine else: Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine") __all__ = [ "stopping_fn_from_metric", "stopping_fn_from_loss", "evenly_divisible_all_gather", "string_list_all_gather", "write_metrics_reports", "from_engine", ] def stopping_fn_from_metric(metric_name: str): """ Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name. """ def stopping_fn(engine: Engine): return engine.state.metrics[metric_name] return stopping_fn def stopping_fn_from_loss(): """ Returns a stopping function for ignite.handlers.EarlyStopping using the loss value. """ def stopping_fn(engine: Engine): return -engine.state.output return stopping_fn @deprecated(since="0.6.0", removed="0.7.0", msg_suffix="The API had been moved to monai.utils module.") def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor: """ Utility function for distributed data parallel to pad at first dim to make it evenly divisible and all_gather. Args: data: source tensor to pad and execute all_gather in distributed data parallel. Note: The input data on different ranks must have exactly same `dtype`. """ if not isinstance(data, torch.Tensor): raise ValueError("input data must be PyTorch Tensor.") if idist.get_world_size() <= 1: return data # make sure the data is evenly-divisible on multi-GPUs length = data.shape[0] all_lens = idist.all_gather(length) max_len = max(all_lens) if length < max_len: size = [max_len - length] + list(data.shape[1:]) data = torch.cat([data, data.new_full(size, 0)], dim=0) # all gather across all processes data = idist.all_gather(data) # delete the padding NaN items return torch.cat([data[i * max_len : i * max_len + l, ...] for i, l in enumerate(all_lens)], dim=0) @deprecated(since="0.6.0", removed="0.7.0", msg_suffix="The API had been moved to monai.utils module.") def string_list_all_gather(strings: List[str]) -> List[str]: """ Utility function for distributed data parallel to all gather a list of strings. Note that if the item in `strings` is longer than 1024 chars, it will be truncated to 1024: https://pytorch.org/ignite/v0.4.5/distributed.html#ignite.distributed.utils.all_gather. Args: strings: a list of strings to all gather. """ world_size = idist.get_world_size() if world_size <= 1: return strings result: List[List[str]] = [[] for _ in range(world_size)] # get length of strings length = len(strings) all_lens = idist.all_gather(length) max_len = max(all_lens) # pad the item to make sure the same length if length < max_len: strings += ["" for _ in range(max_len - length)] if get_torch_version_tuple() <= (1, 6): raise RuntimeError("string all_gather can not be supported in PyTorch < 1.7.0.") for s in strings: gathered = idist.all_gather(s) for i, g in enumerate(gathered): if len(g) > 0: result[i].append(g) return [i for k in result for i in k] def write_metrics_reports( save_dir: str, images: Optional[Sequence[str]], metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]], metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]], summary_ops: Optional[Union[str, Sequence[str]]], deli: str = "\t", output_type: str = "csv", ): """ Utility function to write the metrics into files, contains 3 parts: 1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair. 2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image. 3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file. Args: save_dir: directory to save all the metrics reports. images: name or path of every input image corresponding to the metric_details data. if None, will use index number as the filename of every input image. metrics: a dictionary of (metric name, metric value) pairs. metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics computation, for example, the raw value can be the mean_dice of every channel of every input image. summary_ops: expected computation operations to generate the summary report. it can be: None, "*" or list of strings, default to None. None - don't generate summary report for every expected metric_details. "*" - generate summary report for every metric_details with all the supported operations. list of strings - generate summary report for every metric_details with specified operations, they should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"]. the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile". for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html. note that: for the overall summary, it computes `nanmean` of all classes for each image first, then compute summary. example of the generated summary report:: class mean median max 5percentile 95percentile notnans class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000 class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000 mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000 deli: the delimiter character in the file, default to "\t". output_type: expected output file type, supported types: ["csv"], default to "csv". """ if output_type.lower() != "csv": raise ValueError(f"unsupported output type: {output_type}.") if not os.path.exists(save_dir): os.makedirs(save_dir) if metrics is not None and len(metrics) > 0: with open(os.path.join(save_dir, "metrics.csv"), "w") as f: for k, v in metrics.items(): f.write(f"{k}{deli}{str(v)}\n") if metric_details is not None and len(metric_details) > 0: for k, v in metric_details.items(): if isinstance(v, torch.Tensor): v = v.cpu().numpy() if v.ndim == 0: # reshape to [1, 1] if no batch and class dims v = v.reshape((1, 1)) elif v.ndim == 1: # reshape to [N, 1] if no class dim v = v.reshape((-1, 1)) # add the average value of all classes to v class_labels = ["class" + str(i) for i in range(v.shape[1])] + ["mean"] v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1) with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f: f.write(f"filename{deli}{deli.join(class_labels)}\n") for i, b in enumerate(v): f.write(f"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\n") if summary_ops is not None: supported_ops = OrderedDict( { "mean": lambda x: np.nanmean(x), "median": lambda x: np.nanmedian(x), "max": lambda x: np.nanmax(x), "min": lambda x: np.nanmin(x), "90percentile": lambda x: np.nanpercentile(x[0], x[1]), "std": lambda x: np.nanstd(x), "notnans": lambda x: (~np.isnan(x)).sum(), } ) ops = ensure_tuple(summary_ops) if "*" in ops: ops = tuple(supported_ops.keys()) def _compute_op(op: str, d: np.ndarray): if not op.endswith("percentile"): c_op = look_up_option(op, supported_ops) return c_op(d) threshold = int(op.split("percentile")[0]) return supported_ops["90percentile"]((d, threshold)) with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f: f.write(f"class{deli}{deli.join(ops)}\n") for i, c in enumerate(np.transpose(v)): f.write(f"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\n") def from_engine(keys: KeysCollection, first: bool = False): """ Utility function to simplify the `batch_transform` or `output_transform` args of ignite components when handling dictionary or list of dictionaries(for example: `engine.state.batch` or `engine.state.output`). Users only need to set the expected keys, then it will return a callable function to extract data from dictionary and construct a tuple respectively. If data is a list of dictionaries after decollating, extract expected keys and construct lists respectively, for example, if data is `[{"A": 1, "B": 2}, {"A": 3, "B": 4}]`, from_engine(["A", "B"]): `([1, 3], [2, 4])`. It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward. For example, set the first key as the prediction and the second key as label to get the expected data from `engine.state.output` for a metric:: from monai.handlers import MeanDice, from_engine metric = MeanDice( include_background=False, output_transform=from_engine(["pred", "label"]) ) Args: keys: specified keys to extract data from dictionary or decollated list of dictionaries. first: whether only extract specified keys from the first item if input data is a list of dictionaries, it's used to extract the scalar data which doesn't have batch dim and was replicated into every dictionary when decollating, like `loss`, etc. """ keys = ensure_tuple(keys) def _wrapper(data): if isinstance(data, dict): return tuple(data[k] for k in keys) elif isinstance(data, list) and isinstance(data[0], dict): # if data is a list of dictionaries, extract expected keys and construct lists, # if `first=True`, only extract keys from the first item of the list ret = [data[0][k] if first else [i[k] for i in data] for k in keys] return tuple(ret) if len(ret) > 1 else ret[0] return _wrapper
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import time from abc import ABC, abstractmethod from copy import deepcopy from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Union from urllib import parse import requests from airbyte_cdk.models import SyncMode from airbyte_cdk.sources.streams.http import HttpStream, HttpSubStream from requests.exceptions import HTTPError DEFAULT_PAGE_SIZE = 100 class GithubStream(HttpStream, ABC): url_base = "https://api.github.com/" primary_key = "id" use_cache = True # Detect streams with high API load large_stream = False stream_base_params = {} def __init__(self, repositories: List[str], page_size_for_large_streams: int, **kwargs): super().__init__(**kwargs) self.repositories = repositories # GitHub pagination could be from 1 to 100. self.page_size = page_size_for_large_streams if self.large_stream else DEFAULT_PAGE_SIZE MAX_RETRIES = 3 adapter = requests.adapters.HTTPAdapter(max_retries=MAX_RETRIES) self._session.mount("https://", adapter) self._session.mount("http://", adapter) def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/{self.name}" def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]: for repository in self.repositories: yield {"repository": repository} def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: links = response.links if "next" in links: next_link = links["next"]["url"] parsed_link = parse.urlparse(next_link) page = dict(parse.parse_qsl(parsed_link.query)).get("page") return {"page": page} def should_retry(self, response: requests.Response) -> bool: # We don't call `super()` here because we have custom error handling and GitHub API sometimes returns strange # errors. So in `read_records()` we have custom error handling which don't require to call `super()` here. retry_flag = response.headers.get("X-RateLimit-Remaining") == "0" or response.status_code in ( requests.codes.SERVER_ERROR, requests.codes.BAD_GATEWAY, ) if retry_flag: self.logger.info( f"Rate limit handling for stream `{self.name}` for the response with {response.status_code} status code with message: {response.text}" ) # Handling secondary rate limits for Github # Additional information here: https://docs.github.com/en/rest/guides/best-practices-for-integrators#dealing-with-secondary-rate-limits elif response.headers.get("Retry-After"): time_delay = int(response.headers["Retry-After"]) self.logger.info(f"Handling Secondary Rate limits, setting sync delay for {time_delay} second(s)") time.sleep(time_delay) return retry_flag def backoff_time(self, response: requests.Response) -> Union[int, float]: # This method is called if we run into the rate limit. GitHub limits requests to 5000 per hour and provides # `X-RateLimit-Reset` header which contains time when this hour will be finished and limits will be reset so # we again could have 5000 per another hour. if response.status_code == requests.codes.SERVER_ERROR: return None reset_time = response.headers.get("X-RateLimit-Reset") backoff_time = float(reset_time) - time.time() if reset_time else 60 return max(backoff_time, 60) # This is a guarantee that no negative value will be returned. def read_records(self, stream_slice: Mapping[str, any] = None, **kwargs) -> Iterable[Mapping[str, Any]]: # get out the stream_slice parts for later use. organisation = stream_slice.get("organization", "") repository = stream_slice.get("repository", "") # Reading records while handling the errors try: yield from super().read_records(stream_slice=stream_slice, **kwargs) except HTTPError as e: error_msg = str(e.response.json().get("message")) # This whole try/except situation in `read_records()` isn't good but right now in `self._send_request()` # function we have `response.raise_for_status()` so we don't have much choice on how to handle errors. # Bocked on https://github.com/airbytehq/airbyte/issues/3514. if e.response.status_code == requests.codes.NOT_FOUND: # A lot of streams are not available for repositories owned by a user instead of an organization. if isinstance(self, Organizations): error_msg = ( f"Syncing `{self.__class__.__name__}` stream isn"t available for organization `{stream_slice["organization"]}`." ) else: error_msg = f"Syncing `{self.__class__.__name__}` stream isn"t available for repository `{stream_slice["repository"]}`." elif e.response.status_code == requests.codes.FORBIDDEN: # When using the `check_connection` method, we should raise an error if we do not have access to the repository. if isinstance(self, Repositories): raise e # When `403` for the stream, that has no access to the organization's teams, based on OAuth Apps Restrictions: # https://docs.github.com/en/organizations/restricting-access-to-your-organizations-data/enabling-oauth-app-access-restrictions-for-your-organization # For all `Organisation` based streams elif isinstance(self, Organizations) or isinstance(self, Teams) or isinstance(self, Users): error_msg = ( f"Syncing `{self.name}` stream isn't available for organization `{organisation}`. Full error message: {error_msg}" ) # For all other `Repository` base streams else: error_msg = ( f"Syncing `{self.name}` stream isn't available for repository `{repository}`. Full error message: {error_msg}" ) elif e.response.status_code == requests.codes.GONE and isinstance(self, Projects): # Some repos don't have projects enabled and we we get "410 Client Error: Gone for # url: https://api.github.com/repos/xyz/projects?per_page=100" error. error_msg = f"Syncing `Projects` stream isn't available for repository `{stream_slice["repository"]}`." elif e.response.status_code == requests.codes.CONFLICT: error_msg = ( f"Syncing `{self.name}` stream isn't available for repository " f"`{stream_slice["repository"]}`, it seems like this repository is empty." ) else: self.logger.error(f"Undefined error while reading records: {error_msg}") raise e self.logger.warn(error_msg) def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: params = {"per_page": self.page_size} if next_page_token: params.update(next_page_token) params.update(self.stream_base_params) return params def request_headers(self, **kwargs) -> Mapping[str, Any]: # Without sending `User-Agent` header we will be getting `403 Client Error: Forbidden for url` error. return { "User-Agent": "PostmanRuntime/7.28.0", } def parse_response( self, response: requests.Response, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, ) -> Iterable[Mapping]: for record in response.json(): # GitHub puts records in an array. yield self.transform(record=record, stream_slice=stream_slice) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record["repository"] = stream_slice["repository"] return record class SemiIncrementalGithubStream(GithubStream): """ Semi incremental streams are also incremental but with one difference, they: - read all records; - output only new records. This means that semi incremental streams read all records (like full_refresh streams) but do filtering directly in the code and output only latest records (like incremental streams). """ cursor_field = "updated_at" # This flag is used to indicate that current stream supports `sort` and `direction` request parameters and that # we should break processing records if possible. If `sort` is set to `updated` and `direction` is set to `desc` # this means that latest records will be at the beginning of the response and after we processed those latest # records we can just stop and not process other record. This will increase speed of each incremental stream # which supports those 2 request parameters. Currently only `IssueMilestones` and `PullRequests` streams are # supporting this. is_sorted_descending = False def __init__(self, start_date: str, **kwargs): super().__init__(**kwargs) self._start_date = start_date @property def state_checkpoint_interval(self) -> Optional[int]: if not self.is_sorted_descending: return self.page_size return None def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): """ Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object and returning an updated state object. """ repository = latest_record["repository"] updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {})[self.cursor_field] = updated_state return current_stream_state def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] stream_state_value = stream_state.get(repository, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: start_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > start_point: yield record elif self.is_sorted_descending and record[self.cursor_field] < start_point: break class IncrementalGithubStream(SemiIncrementalGithubStream): def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]: params = super().request_params(stream_state=stream_state, **kwargs) since_params = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) if since_params: params["since"] = since_params return params # Below are full refresh streams class RepositoryStats(GithubStream): """ This stream is technical and not intended for the user, we use it for checking connection with the repository. API docs: https://docs.github.com/en/rest/reference/repos#get-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: yield response.json() class Assignees(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-assignees """ class Branches(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-branches """ primary_key = ["repository", "name"] def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/branches" class Collaborators(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-repository-collaborators """ class IssueLabels(GithubStream): """ API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues#list-labels-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/labels" class Organizations(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/orgs#get-an-organization """ # GitHub pagination could be from 1 to 100. page_size = 100 def __init__(self, organizations: List[str], **kwargs): super(GithubStream, self).__init__(**kwargs) self.organizations = organizations def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]: for organization in self.organizations: yield {"organization": organization} def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice["organization"]}" def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: yield response.json() def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record["organization"] = stream_slice["organization"] return record class Repositories(Organizations): """ API docs: https://docs.github.com/en/rest/reference/repos#list-organization-repositories """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice["organization"]}/repos" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): # GitHub puts records in an array. yield self.transform(record=record, stream_slice=stream_slice) class Tags(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-repository-tags """ primary_key = ["repository", "name"] def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/tags" class Teams(Organizations): """ API docs: https://docs.github.com/en/rest/reference/teams#list-teams """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice["organization"]}/teams" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): yield self.transform(record=record, stream_slice=stream_slice) class Users(Organizations): """ API docs: https://docs.github.com/en/rest/reference/orgs#list-organization-members """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice["organization"]}/members" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): yield self.transform(record=record, stream_slice=stream_slice) # Below are semi incremental streams class Releases(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-releases """ cursor_field = "created_at" def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) assets = record.get("assets", []) for asset in assets: uploader = asset.pop("uploader", None) asset["uploader_id"] = uploader.get("id") if uploader else None return record class Events(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/activity#list-repository-events """ cursor_field = "created_at" class PullRequests(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-pull-requests """ large_stream = True first_read_override_key = "first_read_override" def __init__(self, **kwargs): super().__init__(**kwargs) self._first_read = True def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]: """ Decide if this a first read or not by the presence of the state object """ self._first_read = not bool(stream_state) or stream_state.get(self.first_read_override_key, False) yield from super().read_records(stream_state=stream_state, **kwargs) def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/pulls" def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) for nested in ("head", "base"): entry = record.get(nested, {}) entry["repo_id"] = (record.get("head", {}).pop("repo", {}) or {}).get("id") return record def request_params(self, **kwargs) -> MutableMapping[str, Any]: base_params = super().request_params(**kwargs) # The very first time we read this stream we want to read ascending so we can save state in case of # a halfway failure. But if there is state, we read descending to allow incremental behavior. params = {"state": "all", "sort": "updated", "direction": "desc" if self.is_sorted_descending else "asc"} return {**base_params, **params} @property def is_sorted_descending(self) -> bool: """ Depending if there any state we read stream in ascending or descending order. """ return not self._first_read class CommitComments(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-commit-comments-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/comments" class IssueMilestones(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-milestones """ is_sorted_descending = True stream_base_params = { "state": "all", "sort": "updated", "direction": "desc", } def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/milestones" class Stargazers(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/activity#list-stargazers """ primary_key = "user_id" cursor_field = "starred_at" def request_headers(self, **kwargs) -> Mapping[str, Any]: base_headers = super().request_headers(**kwargs) # We need to send below header if we want to get `starred_at` field. See docs (Alternative response with # star creation timestamps) - https://docs.github.com/en/rest/reference/activity#list-stargazers. headers = {"Accept": "application/vnd.github.v3.star+json"} return {**base_headers, **headers} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: """ We need to provide the "user_id" for the primary_key attribute and don't remove the whole "user" block from the record. """ record = super().transform(record=record, stream_slice=stream_slice) record["user_id"] = record.get("user").get("id") return record class Projects(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-repository-projects """ stream_base_params = { "state": "all", } def request_headers(self, **kwargs) -> Mapping[str, Any]: base_headers = super().request_headers(**kwargs) # Projects stream requires sending following `Accept` header. If we won't sent it # we'll get `415 Client Error: Unsupported Media Type` error. headers = {"Accept": "application/vnd.github.inertia-preview+json"} return {**base_headers, **headers} class IssueEvents(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-issue-events-for-a-repository """ cursor_field = "created_at" def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/issues/events" # Below are incremental streams class Comments(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-issue-comments-for-a-repository """ large_stream = True def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/issues/comments" class Commits(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-commits Pull commits from each branch of each repository, tracking state for each branch """ primary_key = "sha" cursor_field = "created_at" def __init__(self, branches_to_pull: Mapping[str, List[str]], default_branches: Mapping[str, str], **kwargs): super().__init__(**kwargs) self.branches_to_pull = branches_to_pull self.default_branches = default_branches def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]: params = super(IncrementalGithubStream, self).request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs) params["since"] = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) params["sha"] = stream_slice["branch"] return params def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: for stream_slice in super().stream_slices(**kwargs): repository = stream_slice["repository"] for branch in self.branches_to_pull.get(repository, []): yield {"branch": branch, "repository": repository} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) # Record of the `commits` stream doesn't have an updated_at/created_at field at the top level (so we could # just write `record["updated_at"]` or `record["created_at"]`). Instead each record has such value in # `commit.author.date`. So the easiest way is to just enrich the record returned from API with top level # field `created_at` and use it as cursor_field. # Include the branch in the record record["created_at"] = record["commit"]["author"]["date"] record["branch"] = stream_slice["branch"] return record def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): state_value = latest_cursor_value = latest_record.get(self.cursor_field) current_repository = latest_record["repository"] current_branch = latest_record["branch"] if current_stream_state.get(current_repository): repository_commits_state = current_stream_state[current_repository] if repository_commits_state.get(self.cursor_field): # transfer state from old source version to per-branch version if current_branch == self.default_branches[current_repository]: state_value = max(latest_cursor_value, repository_commits_state[self.cursor_field]) del repository_commits_state[self.cursor_field] elif repository_commits_state.get(current_branch, {}).get(self.cursor_field): state_value = max(latest_cursor_value, repository_commits_state[current_branch][self.cursor_field]) if current_repository not in current_stream_state: current_stream_state[current_repository] = {} current_stream_state[current_repository][current_branch] = {self.cursor_field: state_value} return current_stream_state def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: repository = stream_slice["repository"] branch = stream_slice["branch"] if stream_state: stream_state_value = stream_state.get(repository, {}).get(branch, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) if branch == self.default_branches[repository]: return super().get_starting_point(stream_state=stream_state, stream_slice=stream_slice) return self._start_date class Issues(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-repository-issues """ large_stream = True stream_base_params = { "state": "all", "sort": "updated", "direction": "asc", } class ReviewComments(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-review-comments-in-a-repository """ large_stream = True def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/pulls/comments" # Pull request substreams class PullRequestSubstream(HttpSubStream, SemiIncrementalGithubStream, ABC): use_cache = False def __init__(self, parent: PullRequests, **kwargs): super().__init__(parent=parent, **kwargs) def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: """ Override the parent PullRequests stream configuration to always fetch records in ascending order """ parent_state = deepcopy(stream_state) or {} parent_state[PullRequests.first_read_override_key] = True parent_stream_slices = super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=parent_state) for parent_stream_slice in parent_stream_slices: yield { "pull_request_updated_at": parent_stream_slice["parent"]["updated_at"], "pull_request_number": parent_stream_slice["parent"]["number"], "repository": parent_stream_slice["parent"]["repository"], } def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """ We've already determined the list of pull requests to run the stream against. Skip the start_point_map and cursor_field logic in SemiIncrementalGithubStream.read_records. """ yield from super(SemiIncrementalGithubStream, self).read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) class PullRequestStats(PullRequestSubstream): """ API docs: https://docs.github.com/en/rest/reference/pulls#get-a-pull-request """ @property def record_keys(self) -> List[str]: return list(self.get_json_schema()["properties"].keys()) def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"repos/{stream_slice["repository"]}/pulls/{stream_slice["pull_request_number"]}" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any], **kwargs) -> Iterable[Mapping]: yield self.transform(record=response.json(), stream_slice=stream_slice) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) return {key: value for key, value in record.items() if key in self.record_keys} class Reviews(PullRequestSubstream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-reviews-for-a-pull-request """ cursor_field = "pull_request_updated_at" def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"repos/{stream_slice["repository"]}/pulls/{stream_slice["pull_request_number"]}/reviews" # Set the parent stream state's cursor field before fetching its records def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: parent_state = deepcopy(stream_state) or {} for repository in self.repositories: if repository in parent_state and self.cursor_field in parent_state[repository]: parent_state[repository][self.parent.cursor_field] = parent_state[repository][self.cursor_field] yield from super().stream_slices(stream_state=parent_state, **kwargs) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record[self.cursor_field] = stream_slice[self.cursor_field] return record class PullRequestCommits(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-commits-on-a-pull-request """ primary_key = "sha" def __init__(self, parent: HttpStream, **kwargs): super().__init__(**kwargs) self.parent = parent def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/pulls/{stream_slice["pull_number"]}/commits" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "pull_number": record["number"]} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["pull_number"] = stream_slice["pull_number"] return record # Reactions streams class ReactionStream(GithubStream, ABC): parent_key = "id" use_cache = False def __init__(self, **kwargs): self._stream_kwargs = deepcopy(kwargs) self._parent_stream = self.parent_entity(**kwargs) kwargs.pop("start_date", None) super().__init__(**kwargs) @property @abstractmethod def parent_entity(self): """ Specify the class of the parent stream for which receive reactions """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: parent_path = self._parent_stream.path(stream_slice=stream_slice, **kwargs) return f"{parent_path}/{stream_slice[self.parent_key]}/reactions" def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: for stream_slice in super().stream_slices(**kwargs): for parent_record in self._parent_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice): yield {self.parent_key: parent_record[self.parent_key], "repository": stream_slice["repository"]} class CommitCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-a-commit-comment """ parent_entity = CommitComments class IssueCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-an-issue-comment """ parent_entity = Comments class IssueReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-an-issue """ parent_entity = Issues parent_key = "number" class PullRequestCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-a-pull-request-review-comment """ parent_entity = ReviewComments class Deployments(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/deployments#list-deployments """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/deployments" class ProjectColumns(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-project-columns """ cursor_field = "updated_at" def __init__(self, parent: HttpStream, start_date: str, **kwargs): super().__init__(**kwargs) self.parent = parent self._start_date = start_date def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"projects/{stream_slice["project_id"]}/columns" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "project_id": record["id"]} def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: starting_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > starting_point: yield record def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] project_id = str(stream_slice["project_id"]) stream_state_value = stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): repository = latest_record["repository"] project_id = str(latest_record["project_id"]) updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {}).setdefault(project_id, {})[self.cursor_field] = updated_state return current_stream_state def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["project_id"] = stream_slice["project_id"] return record class ProjectCards(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-project-cards """ cursor_field = "updated_at" def __init__(self, parent: HttpStream, start_date: str, **kwargs): super().__init__(**kwargs) self.parent = parent self._start_date = start_date def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"projects/columns/{stream_slice["column_id"]}/cards" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "project_id": record["project_id"], "column_id": record["id"]} def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: starting_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > starting_point: yield record def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] project_id = str(stream_slice["project_id"]) column_id = str(stream_slice["column_id"]) stream_state_value = stream_state.get(repository, {}).get(project_id, {}).get(column_id, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): repository = latest_record["repository"] project_id = str(latest_record["project_id"]) column_id = str(latest_record["column_id"]) updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(project_id, {}).get(column_id, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {}).setdefault(project_id, {}).setdefault(column_id, {})[ self.cursor_field ] = updated_state return current_stream_state def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["project_id"] = stream_slice["project_id"] record["column_id"] = stream_slice["column_id"] return record class Workflows(GithubStream): """ Get all workflows of a GitHub repository API documentation: https://docs.github.com/en/rest/reference/actions#workflows """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/actions/workflows" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: response = response.json().get("workflows") for record in response: yield record class WorkflowRuns(GithubStream): """ Get all workflows of a GitHub repository API documentation: https://docs.github.com/en/rest/reference/actions#list-workflow-runs-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice["repository"]}/actions/runs" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: response = response.json().get("workflow_runs") for record in response: yield record
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import time from abc import ABC, abstractmethod from copy import deepcopy from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Union from urllib import parse import requests from airbyte_cdk.models import SyncMode from airbyte_cdk.sources.streams.http import HttpStream, HttpSubStream from requests.exceptions import HTTPError DEFAULT_PAGE_SIZE = 100 class GithubStream(HttpStream, ABC): url_base = "https://api.github.com/" primary_key = "id" use_cache = True # Detect streams with high API load large_stream = False stream_base_params = {} def __init__(self, repositories: List[str], page_size_for_large_streams: int, **kwargs): super().__init__(**kwargs) self.repositories = repositories # GitHub pagination could be from 1 to 100. self.page_size = page_size_for_large_streams if self.large_stream else DEFAULT_PAGE_SIZE MAX_RETRIES = 3 adapter = requests.adapters.HTTPAdapter(max_retries=MAX_RETRIES) self._session.mount("https://", adapter) self._session.mount("http://", adapter) def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/{self.name}" def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]: for repository in self.repositories: yield {"repository": repository} def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: links = response.links if "next" in links: next_link = links["next"]["url"] parsed_link = parse.urlparse(next_link) page = dict(parse.parse_qsl(parsed_link.query)).get("page") return {"page": page} def should_retry(self, response: requests.Response) -> bool: # We don't call `super()` here because we have custom error handling and GitHub API sometimes returns strange # errors. So in `read_records()` we have custom error handling which don't require to call `super()` here. retry_flag = response.headers.get("X-RateLimit-Remaining") == "0" or response.status_code in ( requests.codes.SERVER_ERROR, requests.codes.BAD_GATEWAY, ) if retry_flag: self.logger.info( f"Rate limit handling for stream `{self.name}` for the response with {response.status_code} status code with message: {response.text}" ) # Handling secondary rate limits for Github # Additional information here: https://docs.github.com/en/rest/guides/best-practices-for-integrators#dealing-with-secondary-rate-limits elif response.headers.get("Retry-After"): time_delay = int(response.headers["Retry-After"]) self.logger.info(f"Handling Secondary Rate limits, setting sync delay for {time_delay} second(s)") time.sleep(time_delay) return retry_flag def backoff_time(self, response: requests.Response) -> Union[int, float]: # This method is called if we run into the rate limit. GitHub limits requests to 5000 per hour and provides # `X-RateLimit-Reset` header which contains time when this hour will be finished and limits will be reset so # we again could have 5000 per another hour. if response.status_code == requests.codes.SERVER_ERROR: return None reset_time = response.headers.get("X-RateLimit-Reset") backoff_time = float(reset_time) - time.time() if reset_time else 60 return max(backoff_time, 60) # This is a guarantee that no negative value will be returned. def read_records(self, stream_slice: Mapping[str, any] = None, **kwargs) -> Iterable[Mapping[str, Any]]: # get out the stream_slice parts for later use. organisation = stream_slice.get("organization", "") repository = stream_slice.get("repository", "") # Reading records while handling the errors try: yield from super().read_records(stream_slice=stream_slice, **kwargs) except HTTPError as e: error_msg = str(e.response.json().get("message")) # This whole try/except situation in `read_records()` isn't good but right now in `self._send_request()` # function we have `response.raise_for_status()` so we don't have much choice on how to handle errors. # Bocked on https://github.com/airbytehq/airbyte/issues/3514. if e.response.status_code == requests.codes.NOT_FOUND: # A lot of streams are not available for repositories owned by a user instead of an organization. if isinstance(self, Organizations): error_msg = ( f"Syncing `{self.__class__.__name__}` stream isn't available for organization `{stream_slice['organization']}`." ) else: error_msg = f"Syncing `{self.__class__.__name__}` stream isn't available for repository `{stream_slice['repository']}`." elif e.response.status_code == requests.codes.FORBIDDEN: # When using the `check_connection` method, we should raise an error if we do not have access to the repository. if isinstance(self, Repositories): raise e # When `403` for the stream, that has no access to the organization's teams, based on OAuth Apps Restrictions: # https://docs.github.com/en/organizations/restricting-access-to-your-organizations-data/enabling-oauth-app-access-restrictions-for-your-organization # For all `Organisation` based streams elif isinstance(self, Organizations) or isinstance(self, Teams) or isinstance(self, Users): error_msg = ( f"Syncing `{self.name}` stream isn't available for organization `{organisation}`. Full error message: {error_msg}" ) # For all other `Repository` base streams else: error_msg = ( f"Syncing `{self.name}` stream isn't available for repository `{repository}`. Full error message: {error_msg}" ) elif e.response.status_code == requests.codes.GONE and isinstance(self, Projects): # Some repos don't have projects enabled and we we get "410 Client Error: Gone for # url: https://api.github.com/repos/xyz/projects?per_page=100" error. error_msg = f"Syncing `Projects` stream isn't available for repository `{stream_slice['repository']}`." elif e.response.status_code == requests.codes.CONFLICT: error_msg = ( f"Syncing `{self.name}` stream isn't available for repository " f"`{stream_slice['repository']}`, it seems like this repository is empty." ) else: self.logger.error(f"Undefined error while reading records: {error_msg}") raise e self.logger.warn(error_msg) def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: params = {"per_page": self.page_size} if next_page_token: params.update(next_page_token) params.update(self.stream_base_params) return params def request_headers(self, **kwargs) -> Mapping[str, Any]: # Without sending `User-Agent` header we will be getting `403 Client Error: Forbidden for url` error. return { "User-Agent": "PostmanRuntime/7.28.0", } def parse_response( self, response: requests.Response, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, ) -> Iterable[Mapping]: for record in response.json(): # GitHub puts records in an array. yield self.transform(record=record, stream_slice=stream_slice) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record["repository"] = stream_slice["repository"] return record class SemiIncrementalGithubStream(GithubStream): """ Semi incremental streams are also incremental but with one difference, they: - read all records; - output only new records. This means that semi incremental streams read all records (like full_refresh streams) but do filtering directly in the code and output only latest records (like incremental streams). """ cursor_field = "updated_at" # This flag is used to indicate that current stream supports `sort` and `direction` request parameters and that # we should break processing records if possible. If `sort` is set to `updated` and `direction` is set to `desc` # this means that latest records will be at the beginning of the response and after we processed those latest # records we can just stop and not process other record. This will increase speed of each incremental stream # which supports those 2 request parameters. Currently only `IssueMilestones` and `PullRequests` streams are # supporting this. is_sorted_descending = False def __init__(self, start_date: str, **kwargs): super().__init__(**kwargs) self._start_date = start_date @property def state_checkpoint_interval(self) -> Optional[int]: if not self.is_sorted_descending: return self.page_size return None def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): """ Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object and returning an updated state object. """ repository = latest_record["repository"] updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {})[self.cursor_field] = updated_state return current_stream_state def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] stream_state_value = stream_state.get(repository, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: start_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > start_point: yield record elif self.is_sorted_descending and record[self.cursor_field] < start_point: break class IncrementalGithubStream(SemiIncrementalGithubStream): def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]: params = super().request_params(stream_state=stream_state, **kwargs) since_params = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) if since_params: params["since"] = since_params return params # Below are full refresh streams class RepositoryStats(GithubStream): """ This stream is technical and not intended for the user, we use it for checking connection with the repository. API docs: https://docs.github.com/en/rest/reference/repos#get-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: yield response.json() class Assignees(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-assignees """ class Branches(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-branches """ primary_key = ["repository", "name"] def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/branches" class Collaborators(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-repository-collaborators """ class IssueLabels(GithubStream): """ API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues#list-labels-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/labels" class Organizations(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/orgs#get-an-organization """ # GitHub pagination could be from 1 to 100. page_size = 100 def __init__(self, organizations: List[str], **kwargs): super(GithubStream, self).__init__(**kwargs) self.organizations = organizations def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]: for organization in self.organizations: yield {"organization": organization} def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice['organization']}" def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: yield response.json() def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record["organization"] = stream_slice["organization"] return record class Repositories(Organizations): """ API docs: https://docs.github.com/en/rest/reference/repos#list-organization-repositories """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice['organization']}/repos" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): # GitHub puts records in an array. yield self.transform(record=record, stream_slice=stream_slice) class Tags(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-repository-tags """ primary_key = ["repository", "name"] def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/tags" class Teams(Organizations): """ API docs: https://docs.github.com/en/rest/reference/teams#list-teams """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice['organization']}/teams" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): yield self.transform(record=record, stream_slice=stream_slice) class Users(Organizations): """ API docs: https://docs.github.com/en/rest/reference/orgs#list-organization-members """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice['organization']}/members" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): yield self.transform(record=record, stream_slice=stream_slice) # Below are semi incremental streams class Releases(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-releases """ cursor_field = "created_at" def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) assets = record.get("assets", []) for asset in assets: uploader = asset.pop("uploader", None) asset["uploader_id"] = uploader.get("id") if uploader else None return record class Events(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/activity#list-repository-events """ cursor_field = "created_at" class PullRequests(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-pull-requests """ large_stream = True first_read_override_key = "first_read_override" def __init__(self, **kwargs): super().__init__(**kwargs) self._first_read = True def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]: """ Decide if this a first read or not by the presence of the state object """ self._first_read = not bool(stream_state) or stream_state.get(self.first_read_override_key, False) yield from super().read_records(stream_state=stream_state, **kwargs) def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/pulls" def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) for nested in ("head", "base"): entry = record.get(nested, {}) entry["repo_id"] = (record.get("head", {}).pop("repo", {}) or {}).get("id") return record def request_params(self, **kwargs) -> MutableMapping[str, Any]: base_params = super().request_params(**kwargs) # The very first time we read this stream we want to read ascending so we can save state in case of # a halfway failure. But if there is state, we read descending to allow incremental behavior. params = {"state": "all", "sort": "updated", "direction": "desc" if self.is_sorted_descending else "asc"} return {**base_params, **params} @property def is_sorted_descending(self) -> bool: """ Depending if there any state we read stream in ascending or descending order. """ return not self._first_read class CommitComments(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-commit-comments-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/comments" class IssueMilestones(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-milestones """ is_sorted_descending = True stream_base_params = { "state": "all", "sort": "updated", "direction": "desc", } def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/milestones" class Stargazers(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/activity#list-stargazers """ primary_key = "user_id" cursor_field = "starred_at" def request_headers(self, **kwargs) -> Mapping[str, Any]: base_headers = super().request_headers(**kwargs) # We need to send below header if we want to get `starred_at` field. See docs (Alternative response with # star creation timestamps) - https://docs.github.com/en/rest/reference/activity#list-stargazers. headers = {"Accept": "application/vnd.github.v3.star+json"} return {**base_headers, **headers} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: """ We need to provide the "user_id" for the primary_key attribute and don't remove the whole "user" block from the record. """ record = super().transform(record=record, stream_slice=stream_slice) record["user_id"] = record.get("user").get("id") return record class Projects(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-repository-projects """ stream_base_params = { "state": "all", } def request_headers(self, **kwargs) -> Mapping[str, Any]: base_headers = super().request_headers(**kwargs) # Projects stream requires sending following `Accept` header. If we won't sent it # we'll get `415 Client Error: Unsupported Media Type` error. headers = {"Accept": "application/vnd.github.inertia-preview+json"} return {**base_headers, **headers} class IssueEvents(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-issue-events-for-a-repository """ cursor_field = "created_at" def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/issues/events" # Below are incremental streams class Comments(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-issue-comments-for-a-repository """ large_stream = True def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/issues/comments" class Commits(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/repos#list-commits Pull commits from each branch of each repository, tracking state for each branch """ primary_key = "sha" cursor_field = "created_at" def __init__(self, branches_to_pull: Mapping[str, List[str]], default_branches: Mapping[str, str], **kwargs): super().__init__(**kwargs) self.branches_to_pull = branches_to_pull self.default_branches = default_branches def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]: params = super(IncrementalGithubStream, self).request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs) params["since"] = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) params["sha"] = stream_slice["branch"] return params def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: for stream_slice in super().stream_slices(**kwargs): repository = stream_slice["repository"] for branch in self.branches_to_pull.get(repository, []): yield {"branch": branch, "repository": repository} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) # Record of the `commits` stream doesn't have an updated_at/created_at field at the top level (so we could # just write `record["updated_at"]` or `record["created_at"]`). Instead each record has such value in # `commit.author.date`. So the easiest way is to just enrich the record returned from API with top level # field `created_at` and use it as cursor_field. # Include the branch in the record record["created_at"] = record["commit"]["author"]["date"] record["branch"] = stream_slice["branch"] return record def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): state_value = latest_cursor_value = latest_record.get(self.cursor_field) current_repository = latest_record["repository"] current_branch = latest_record["branch"] if current_stream_state.get(current_repository): repository_commits_state = current_stream_state[current_repository] if repository_commits_state.get(self.cursor_field): # transfer state from old source version to per-branch version if current_branch == self.default_branches[current_repository]: state_value = max(latest_cursor_value, repository_commits_state[self.cursor_field]) del repository_commits_state[self.cursor_field] elif repository_commits_state.get(current_branch, {}).get(self.cursor_field): state_value = max(latest_cursor_value, repository_commits_state[current_branch][self.cursor_field]) if current_repository not in current_stream_state: current_stream_state[current_repository] = {} current_stream_state[current_repository][current_branch] = {self.cursor_field: state_value} return current_stream_state def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: repository = stream_slice["repository"] branch = stream_slice["branch"] if stream_state: stream_state_value = stream_state.get(repository, {}).get(branch, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) if branch == self.default_branches[repository]: return super().get_starting_point(stream_state=stream_state, stream_slice=stream_slice) return self._start_date class Issues(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/issues#list-repository-issues """ large_stream = True stream_base_params = { "state": "all", "sort": "updated", "direction": "asc", } class ReviewComments(IncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-review-comments-in-a-repository """ large_stream = True def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/pulls/comments" # Pull request substreams class PullRequestSubstream(HttpSubStream, SemiIncrementalGithubStream, ABC): use_cache = False def __init__(self, parent: PullRequests, **kwargs): super().__init__(parent=parent, **kwargs) def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: """ Override the parent PullRequests stream configuration to always fetch records in ascending order """ parent_state = deepcopy(stream_state) or {} parent_state[PullRequests.first_read_override_key] = True parent_stream_slices = super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=parent_state) for parent_stream_slice in parent_stream_slices: yield { "pull_request_updated_at": parent_stream_slice["parent"]["updated_at"], "pull_request_number": parent_stream_slice["parent"]["number"], "repository": parent_stream_slice["parent"]["repository"], } def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """ We've already determined the list of pull requests to run the stream against. Skip the start_point_map and cursor_field logic in SemiIncrementalGithubStream.read_records. """ yield from super(SemiIncrementalGithubStream, self).read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) class PullRequestStats(PullRequestSubstream): """ API docs: https://docs.github.com/en/rest/reference/pulls#get-a-pull-request """ @property def record_keys(self) -> List[str]: return list(self.get_json_schema()["properties"].keys()) def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"repos/{stream_slice['repository']}/pulls/{stream_slice['pull_request_number']}" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any], **kwargs) -> Iterable[Mapping]: yield self.transform(record=response.json(), stream_slice=stream_slice) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) return {key: value for key, value in record.items() if key in self.record_keys} class Reviews(PullRequestSubstream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-reviews-for-a-pull-request """ cursor_field = "pull_request_updated_at" def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"repos/{stream_slice['repository']}/pulls/{stream_slice['pull_request_number']}/reviews" # Set the parent stream state's cursor field before fetching its records def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: parent_state = deepcopy(stream_state) or {} for repository in self.repositories: if repository in parent_state and self.cursor_field in parent_state[repository]: parent_state[repository][self.parent.cursor_field] = parent_state[repository][self.cursor_field] yield from super().stream_slices(stream_state=parent_state, **kwargs) def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record[self.cursor_field] = stream_slice[self.cursor_field] return record class PullRequestCommits(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/pulls#list-commits-on-a-pull-request """ primary_key = "sha" def __init__(self, parent: HttpStream, **kwargs): super().__init__(**kwargs) self.parent = parent def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/pulls/{stream_slice['pull_number']}/commits" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "pull_number": record["number"]} def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["pull_number"] = stream_slice["pull_number"] return record # Reactions streams class ReactionStream(GithubStream, ABC): parent_key = "id" use_cache = False def __init__(self, **kwargs): self._stream_kwargs = deepcopy(kwargs) self._parent_stream = self.parent_entity(**kwargs) kwargs.pop("start_date", None) super().__init__(**kwargs) @property @abstractmethod def parent_entity(self): """ Specify the class of the parent stream for which receive reactions """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: parent_path = self._parent_stream.path(stream_slice=stream_slice, **kwargs) return f"{parent_path}/{stream_slice[self.parent_key]}/reactions" def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: for stream_slice in super().stream_slices(**kwargs): for parent_record in self._parent_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice): yield {self.parent_key: parent_record[self.parent_key], "repository": stream_slice["repository"]} class CommitCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-a-commit-comment """ parent_entity = CommitComments class IssueCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-an-issue-comment """ parent_entity = Comments class IssueReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-an-issue """ parent_entity = Issues parent_key = "number" class PullRequestCommentReactions(ReactionStream): """ API docs: https://docs.github.com/en/rest/reference/reactions#list-reactions-for-a-pull-request-review-comment """ parent_entity = ReviewComments class Deployments(SemiIncrementalGithubStream): """ API docs: https://docs.github.com/en/rest/reference/deployments#list-deployments """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/deployments" class ProjectColumns(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-project-columns """ cursor_field = "updated_at" def __init__(self, parent: HttpStream, start_date: str, **kwargs): super().__init__(**kwargs) self.parent = parent self._start_date = start_date def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"projects/{stream_slice['project_id']}/columns" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "project_id": record["id"]} def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: starting_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > starting_point: yield record def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] project_id = str(stream_slice["project_id"]) stream_state_value = stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): repository = latest_record["repository"] project_id = str(latest_record["project_id"]) updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(project_id, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {}).setdefault(project_id, {})[self.cursor_field] = updated_state return current_stream_state def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["project_id"] = stream_slice["project_id"] return record class ProjectCards(GithubStream): """ API docs: https://docs.github.com/en/rest/reference/projects#list-project-cards """ cursor_field = "updated_at" def __init__(self, parent: HttpStream, start_date: str, **kwargs): super().__init__(**kwargs) self.parent = parent self._start_date = start_date def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"projects/columns/{stream_slice['column_id']}/cards" def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"repository": record["repository"], "project_id": record["project_id"], "column_id": record["id"]} def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: starting_point = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice) for record in super().read_records( sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ): if record[self.cursor_field] > starting_point: yield record def get_starting_point(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any]) -> str: if stream_state: repository = stream_slice["repository"] project_id = str(stream_slice["project_id"]) column_id = str(stream_slice["column_id"]) stream_state_value = stream_state.get(repository, {}).get(project_id, {}).get(column_id, {}).get(self.cursor_field) if stream_state_value: return max(self._start_date, stream_state_value) return self._start_date def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]): repository = latest_record["repository"] project_id = str(latest_record["project_id"]) column_id = str(latest_record["column_id"]) updated_state = latest_record[self.cursor_field] stream_state_value = current_stream_state.get(repository, {}).get(project_id, {}).get(column_id, {}).get(self.cursor_field) if stream_state_value: updated_state = max(updated_state, stream_state_value) current_stream_state.setdefault(repository, {}).setdefault(project_id, {}).setdefault(column_id, {})[ self.cursor_field ] = updated_state return current_stream_state def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]: record = super().transform(record=record, stream_slice=stream_slice) record["project_id"] = stream_slice["project_id"] record["column_id"] = stream_slice["column_id"] return record class Workflows(GithubStream): """ Get all workflows of a GitHub repository API documentation: https://docs.github.com/en/rest/reference/actions#workflows """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/actions/workflows" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: response = response.json().get("workflows") for record in response: yield record class WorkflowRuns(GithubStream): """ Get all workflows of a GitHub repository API documentation: https://docs.github.com/en/rest/reference/actions#list-workflow-runs-for-a-repository """ def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"repos/{stream_slice['repository']}/actions/runs" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: response = response.json().get("workflow_runs") for record in response: yield record
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Allows a model to self-chat on a given task. """ from parlai.core.params import ParlaiParser from parlai.core.agents import create_agent, create_agent_from_model_file from parlai.core.worlds import create_task from parlai.utils.world_logging import WorldLogger from parlai.utils.misc import TimeLogger from parlai.core.script import ParlaiScript, register_script import parlai.utils.logging as logging import math import json import random def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Generate self-chats of a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-st', '--selfchat-task', type='bool', default=True, help='Create a self chat version of the task', ) parser.add_argument( '--num-self-chats', type=int, default=1, help='Number of self chats to run' ) parser.add_argument( '--selfchat-max-turns', type=int, default=6, help='The number of dialogue turns before self chat ends', ) parser.add_argument( '--seed-messages-from-task', action='store_true', help='Automatically seed conversation with messages from task dataset.', ) parser.add_argument( '--outfile', type=str, default=None, help='File to save self chat logs' ) parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], help='Format to save logs in. conversations is a jsonl format, parlai is a text format.', ) parser.add_argument( '-pmf', '--partner-model-file', default=None, help='Define a different partner for self chat', ) parser.add_argument( '--partner-opt-file', default=None, help='Path to file containing opts to override for partner', ) parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser def _run_self_chat_episode(opt, world, world_logger): bsz = opt.get('batchsize', 1) num_turns = opt['selfchat_max_turns'] num_parleys = math.ceil(num_turns / bsz) for _ in range(num_parleys): world.parley() world_logger.log(world) if opt['display_examples']: print(world.display()) if opt['display_examples']: print('-- end of episode --') world.reset() world_logger.reset_world() # flush this episode def self_chat(opt): random.seed(opt['seed']) partner = opt['partner_model_file'] partner_opt_file = opt.get('partner_opt_file') # Create agents agent1 = create_agent(opt, requireModelExists=True) agent1.opt.log("Agent 1 Opt") if partner is None: # Self chat with same model agent2 = agent1.clone() else: # Self chat with different models if partner_opt_file: print(f"WARNING: Loading override opts from: {partner_opt_file}") with open(partner_opt_file) as f: partner_opt = json.load(f) else: partner_opt = {} partner_opt['interactive_mode'] = opt.get('interactive_mode', True) print( f"WARNING: Setting partner interactive mode to: {partner_opt["interactive_mode"]}" ) agent2 = create_agent_from_model_file(partner, partner_opt) agent2.opt.log("Agent 2 Opt") # Set IDs agent1.id = agent1.id + "_1" agent2.id = agent2.id + "_2" model_id = agent1.id + "_" + agent2.id world = create_task(opt, user_agents=[agent1, agent2]) # Set up world logging logger = WorldLogger(opt) log_time = TimeLogger() # Run some self chats. for i in range(opt['num_self_chats']): _run_self_chat_episode(opt, world, logger) report = world.report() text, report = log_time.log(i + 1, opt['num_self_chats'], report) logging.info(text) # Save chats if opt['outfile'] is None: outfile = '/tmp/{}_selfchat'.format(model_id) else: outfile = opt['outfile'] if opt['save_format'] == 'conversations' and hasattr(world, 'write'): # use self chat specific world to write conversation # this might be useful for logging extra contextual # information (like personas) world.write(logger, outfile) else: # use default logger write function logger.write(outfile, world, opt['save_format']) return logger.get_logs() @register_script('self_chat') class SelfChat(ParlaiScript): @classmethod def setup_args(cls): return setup_args() def run(self): return self_chat(self.opt) if __name__ == '__main__': SelfChat.main()
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Allows a model to self-chat on a given task. """ from parlai.core.params import ParlaiParser from parlai.core.agents import create_agent, create_agent_from_model_file from parlai.core.worlds import create_task from parlai.utils.world_logging import WorldLogger from parlai.utils.misc import TimeLogger from parlai.core.script import ParlaiScript, register_script import parlai.utils.logging as logging import math import json import random def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Generate self-chats of a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-st', '--selfchat-task', type='bool', default=True, help='Create a self chat version of the task', ) parser.add_argument( '--num-self-chats', type=int, default=1, help='Number of self chats to run' ) parser.add_argument( '--selfchat-max-turns', type=int, default=6, help='The number of dialogue turns before self chat ends', ) parser.add_argument( '--seed-messages-from-task', action='store_true', help='Automatically seed conversation with messages from task dataset.', ) parser.add_argument( '--outfile', type=str, default=None, help='File to save self chat logs' ) parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], help='Format to save logs in. conversations is a jsonl format, parlai is a text format.', ) parser.add_argument( '-pmf', '--partner-model-file', default=None, help='Define a different partner for self chat', ) parser.add_argument( '--partner-opt-file', default=None, help='Path to file containing opts to override for partner', ) parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser def _run_self_chat_episode(opt, world, world_logger): bsz = opt.get('batchsize', 1) num_turns = opt['selfchat_max_turns'] num_parleys = math.ceil(num_turns / bsz) for _ in range(num_parleys): world.parley() world_logger.log(world) if opt['display_examples']: print(world.display()) if opt['display_examples']: print('-- end of episode --') world.reset() world_logger.reset_world() # flush this episode def self_chat(opt): random.seed(opt['seed']) partner = opt['partner_model_file'] partner_opt_file = opt.get('partner_opt_file') # Create agents agent1 = create_agent(opt, requireModelExists=True) agent1.opt.log("Agent 1 Opt") if partner is None: # Self chat with same model agent2 = agent1.clone() else: # Self chat with different models if partner_opt_file: print(f"WARNING: Loading override opts from: {partner_opt_file}") with open(partner_opt_file) as f: partner_opt = json.load(f) else: partner_opt = {} partner_opt['interactive_mode'] = opt.get('interactive_mode', True) print( f"WARNING: Setting partner interactive mode to: {partner_opt['interactive_mode']}" ) agent2 = create_agent_from_model_file(partner, partner_opt) agent2.opt.log("Agent 2 Opt") # Set IDs agent1.id = agent1.id + "_1" agent2.id = agent2.id + "_2" model_id = agent1.id + "_" + agent2.id world = create_task(opt, user_agents=[agent1, agent2]) # Set up world logging logger = WorldLogger(opt) log_time = TimeLogger() # Run some self chats. for i in range(opt['num_self_chats']): _run_self_chat_episode(opt, world, logger) report = world.report() text, report = log_time.log(i + 1, opt['num_self_chats'], report) logging.info(text) # Save chats if opt['outfile'] is None: outfile = '/tmp/{}_selfchat'.format(model_id) else: outfile = opt['outfile'] if opt['save_format'] == 'conversations' and hasattr(world, 'write'): # use self chat specific world to write conversation # this might be useful for logging extra contextual # information (like personas) world.write(logger, outfile) else: # use default logger write function logger.write(outfile, world, opt['save_format']) return logger.get_logs() @register_script('self_chat') class SelfChat(ParlaiScript): @classmethod def setup_args(cls): return setup_args() def run(self): return self_chat(self.opt) if __name__ == '__main__': SelfChat.main()
import asyncio import logging from datetime import datetime from typing import List from heisenbridge.command_parse import CommandParser from heisenbridge.private_room import PrivateRoom class NetworkRoom: pass class ChannelRoom(PrivateRoom): key: str names_buffer: List[str] bans_buffer: List[str] def init(self) -> None: super().init() self.key = None cmd = CommandParser(prog="MODE", description="send MODE command") cmd.add_argument("args", nargs="*", help="MODE command arguments") self.commands.register(cmd, self.cmd_mode) cmd = CommandParser(prog="NAMES", description="resynchronize channel members") self.commands.register(cmd, self.cmd_names) cmd = CommandParser(prog="BANS", description="show channel ban list") self.commands.register(cmd, self.cmd_bans) cmd = CommandParser(prog="OP", description="op someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_op) cmd = CommandParser(prog="DEOP", description="deop someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_deop) cmd = CommandParser(prog="VOICE", description="voice someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_voice) cmd = CommandParser(prog="DEVOICE", description="devoice someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_devoice) self.names_buffer = [] self.bans_buffer = [] def from_config(self, config: dict) -> None: if "name" not in config: raise Exception("No name key in config for ChatRoom") if "network" not in config: raise Exception("No network key in config for ChatRoom") self.name = config["name"] self.network_name = config["network"] if "key" in config: self.key = config["key"] def to_config(self) -> dict: return {"name": self.name, "network": self.network_name, "key": self.key} @staticmethod def create(network: NetworkRoom, name: str) -> "ChannelRoom": logging.debug(f"ChannelRoom.create(network='{network.name}', name='{name}'") room = ChannelRoom(None, network.user_id, network.serv, [network.serv.user_id, network.user_id]) room.name = name.lower() room.network = network room.network_name = network.name asyncio.ensure_future(room._create_mx()) return room async def _create_mx(self): # handle !room names properly visible_name = self.name if visible_name.startswith("!"): visible_name = "!" + visible_name[6:] self.id = await self.network.serv.create_room( f"{visible_name} ({self.network.name})", "", [self.network.user_id], ) self.serv.register_room(self) await self.save() # start event queue now that we have an id self._queue.start() def is_valid(self) -> bool: if not self.in_room(self.user_id): return False return super().is_valid() async def cleanup(self) -> None: if self.network: if self.network.conn and self.network.conn.connected: self.network.conn.part(self.name) if self.name in self.network.rooms: del self.network.rooms[self.name] async def cmd_mode(self, args) -> None: self.network.conn.mode(self.name, " ".join(args.args)) async def cmd_modes(self, args) -> None: self.network.conn.mode(self.name, "") async def cmd_names(self, args) -> None: self.network.conn.names(self.name) async def cmd_bans(self, args) -> None: self.network.conn.mode(self.name, "+b") async def cmd_op(self, args) -> None: self.network.conn.mode(self.name, f"+o {args.nick}") async def cmd_deop(self, args) -> None: self.network.conn.mode(self.name, f"-o {args.nick}") async def cmd_voice(self, args) -> None: self.network.conn.mode(self.name, f"+v {args.nick}") async def cmd_devoice(self, args) -> None: self.network.conn.mode(self.name, f"-v {args.nick}") async def cmd_topic(self, args) -> None: self.network.conn.topic(self.name, " ".join(args.text)) def on_pubmsg(self, conn, event): self.on_privmsg(conn, event) def on_pubnotice(self, conn, event): self.on_privnotice(conn, event) def on_namreply(self, conn, event) -> None: self.names_buffer.extend(event.arguments[2].split()) def _add_puppet(self, nick): irc_user_id = self.serv.irc_user_id(self.network.name, nick) self.ensure_irc_user_id(self.network.name, nick) self.invite(irc_user_id) self.join(irc_user_id) def _remove_puppet(self, user_id): if user_id == self.serv.user_id or user_id == self.user_id: return self.leave(user_id) def on_endofnames(self, conn, event) -> None: to_remove = list(self.members) to_add = [] names = list(self.names_buffer) self.names_buffer = [] modes = {} for nick in names: nick, mode = self.serv.strip_nick(nick) if mode: if mode not in modes: modes[mode] = [] modes[mode].append(nick) # ignore us if nick == conn.real_nickname: continue # convert to mx id, check if we already have them irc_user_id = self.serv.irc_user_id(self.network.name, nick) # make sure this user is not removed from room if irc_user_id in to_remove: to_remove.remove(irc_user_id) continue # if this user is not in room, add to invite list if not self.in_room(irc_user_id): to_add.append((irc_user_id, nick)) # never remove us or appservice if self.serv.user_id in to_remove: to_remove.remove(self.serv.user_id) if self.user_id in to_remove: to_remove.remove(self.user_id) self.send_notice( "Synchronizing members:" + f" got {len(names)} from server," + f" {len(self.members)} in room," + f" {len(to_add)} will be invited and {len(to_remove)} removed." ) # known common mode names modenames = { "~": "owner", "&": "admin", "@": "op", "%": "half-op", "+": "voice", } # show modes from top to bottom for mode, name in modenames.items(): if mode in modes: self.send_notice(f"Users with {name} ({mode}): {", ".join(modes[mode])}") del modes[mode] # show unknown modes for mode, nicks in modes.items(): self.send_notice(f"Users with '{mode}": {", ".join(nicks)}") # FIXME: this floods the event queue if there's a lot of people for (irc_user_id, nick) in to_add: self._add_puppet(nick) for irc_user_id in to_remove: self._remove_puppet(irc_user_id) def on_join(self, conn, event) -> None: # we don't need to sync ourself if conn.real_nickname == event.source.nick: self.send_notice("Joined channel.") # sync channel modes/key on join self.network.conn.mode(self.name, "") return # convert to mx id, check if we already have them irc_user_id = self.serv.irc_user_id(self.network_name, event.source.nick) if irc_user_id in self.members: return # ensure, append, invite and join self._add_puppet(event.source.nick) def on_quit(self, conn, event) -> None: self.on_part(conn, event) def on_part(self, conn, event) -> None: # we don't need to sync ourself if conn.real_nickname == event.source.nick: return irc_user_id = self.serv.irc_user_id(self.network_name, event.source.nick) if irc_user_id not in self.members: return self._remove_puppet(irc_user_id) def update_key(self, modes): # update channel key if modes[0].startswith("-") and modes[0].find("k") > -1: if self.key is not None: self.key = None asyncio.ensure_future(self.save()) elif modes[0].startswith("+"): key_pos = modes[0].find("k") if key_pos > -1: key = modes[key_pos] if self.key != key: self.key = key asyncio.ensure_future(self.save()) def on_mode(self, conn, event) -> None: modes = list(event.arguments) self.send_notice("{} set modes {}".format(event.source.nick, " ".join(modes))) self.update_key(modes) def on_notopic(self, conn, event) -> None: self.set_topic("") def on_currenttopic(self, conn, event) -> None: self.set_topic(event.arguments[1]) def on_topic(self, conn, event) -> None: self.send_notice("{} changed the topic".format(event.source.nick)) self.set_topic(event.arguments[0]) def on_kick(self, conn, event) -> None: target_user_id = self.serv.irc_user_id(self.network.name, event.arguments[0]) self.kick(target_user_id, f"Kicked by {event.source.nick}: {event.arguments[1]}") if target_user_id in self.members: self.members.remove(target_user_id) def on_banlist(self, conn, event) -> None: parts = list(event.arguments) parts.pop(0) logging.info(parts) self.bans_buffer.append(parts) def on_endofbanlist(self, conn, event) -> None: bans = self.bans_buffer self.bans_buffer = [] self.send_notice("Current channel bans:") for ban in bans: bantime = datetime.utcfromtimestamp(int(ban[2])).strftime("%c %Z") self.send_notice(f"\t{ban[0]} set by {ban[1]} at {bantime}") def on_channelmodeis(self, conn, event) -> None: modes = list(event.arguments) modes.pop(0) self.send_notice(f"Current channel modes: {" ".join(modes)}") self.update_key(modes) def on_channelcreate(self, conn, event) -> None: created = datetime.utcfromtimestamp(int(event.arguments[1])).strftime("%c %Z") self.send_notice(f"Channel was created at {created}")
import asyncio import logging from datetime import datetime from typing import List from heisenbridge.command_parse import CommandParser from heisenbridge.private_room import PrivateRoom class NetworkRoom: pass class ChannelRoom(PrivateRoom): key: str names_buffer: List[str] bans_buffer: List[str] def init(self) -> None: super().init() self.key = None cmd = CommandParser(prog="MODE", description="send MODE command") cmd.add_argument("args", nargs="*", help="MODE command arguments") self.commands.register(cmd, self.cmd_mode) cmd = CommandParser(prog="NAMES", description="resynchronize channel members") self.commands.register(cmd, self.cmd_names) cmd = CommandParser(prog="BANS", description="show channel ban list") self.commands.register(cmd, self.cmd_bans) cmd = CommandParser(prog="OP", description="op someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_op) cmd = CommandParser(prog="DEOP", description="deop someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_deop) cmd = CommandParser(prog="VOICE", description="voice someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_voice) cmd = CommandParser(prog="DEVOICE", description="devoice someone") cmd.add_argument("nick", help="nick to target") self.commands.register(cmd, self.cmd_devoice) self.names_buffer = [] self.bans_buffer = [] def from_config(self, config: dict) -> None: if "name" not in config: raise Exception("No name key in config for ChatRoom") if "network" not in config: raise Exception("No network key in config for ChatRoom") self.name = config["name"] self.network_name = config["network"] if "key" in config: self.key = config["key"] def to_config(self) -> dict: return {"name": self.name, "network": self.network_name, "key": self.key} @staticmethod def create(network: NetworkRoom, name: str) -> "ChannelRoom": logging.debug(f"ChannelRoom.create(network='{network.name}', name='{name}'") room = ChannelRoom(None, network.user_id, network.serv, [network.serv.user_id, network.user_id]) room.name = name.lower() room.network = network room.network_name = network.name asyncio.ensure_future(room._create_mx()) return room async def _create_mx(self): # handle !room names properly visible_name = self.name if visible_name.startswith("!"): visible_name = "!" + visible_name[6:] self.id = await self.network.serv.create_room( f"{visible_name} ({self.network.name})", "", [self.network.user_id], ) self.serv.register_room(self) await self.save() # start event queue now that we have an id self._queue.start() def is_valid(self) -> bool: if not self.in_room(self.user_id): return False return super().is_valid() async def cleanup(self) -> None: if self.network: if self.network.conn and self.network.conn.connected: self.network.conn.part(self.name) if self.name in self.network.rooms: del self.network.rooms[self.name] async def cmd_mode(self, args) -> None: self.network.conn.mode(self.name, " ".join(args.args)) async def cmd_modes(self, args) -> None: self.network.conn.mode(self.name, "") async def cmd_names(self, args) -> None: self.network.conn.names(self.name) async def cmd_bans(self, args) -> None: self.network.conn.mode(self.name, "+b") async def cmd_op(self, args) -> None: self.network.conn.mode(self.name, f"+o {args.nick}") async def cmd_deop(self, args) -> None: self.network.conn.mode(self.name, f"-o {args.nick}") async def cmd_voice(self, args) -> None: self.network.conn.mode(self.name, f"+v {args.nick}") async def cmd_devoice(self, args) -> None: self.network.conn.mode(self.name, f"-v {args.nick}") async def cmd_topic(self, args) -> None: self.network.conn.topic(self.name, " ".join(args.text)) def on_pubmsg(self, conn, event): self.on_privmsg(conn, event) def on_pubnotice(self, conn, event): self.on_privnotice(conn, event) def on_namreply(self, conn, event) -> None: self.names_buffer.extend(event.arguments[2].split()) def _add_puppet(self, nick): irc_user_id = self.serv.irc_user_id(self.network.name, nick) self.ensure_irc_user_id(self.network.name, nick) self.invite(irc_user_id) self.join(irc_user_id) def _remove_puppet(self, user_id): if user_id == self.serv.user_id or user_id == self.user_id: return self.leave(user_id) def on_endofnames(self, conn, event) -> None: to_remove = list(self.members) to_add = [] names = list(self.names_buffer) self.names_buffer = [] modes = {} for nick in names: nick, mode = self.serv.strip_nick(nick) if mode: if mode not in modes: modes[mode] = [] modes[mode].append(nick) # ignore us if nick == conn.real_nickname: continue # convert to mx id, check if we already have them irc_user_id = self.serv.irc_user_id(self.network.name, nick) # make sure this user is not removed from room if irc_user_id in to_remove: to_remove.remove(irc_user_id) continue # if this user is not in room, add to invite list if not self.in_room(irc_user_id): to_add.append((irc_user_id, nick)) # never remove us or appservice if self.serv.user_id in to_remove: to_remove.remove(self.serv.user_id) if self.user_id in to_remove: to_remove.remove(self.user_id) self.send_notice( "Synchronizing members:" + f" got {len(names)} from server," + f" {len(self.members)} in room," + f" {len(to_add)} will be invited and {len(to_remove)} removed." ) # known common mode names modenames = { "~": "owner", "&": "admin", "@": "op", "%": "half-op", "+": "voice", } # show modes from top to bottom for mode, name in modenames.items(): if mode in modes: self.send_notice(f"Users with {name} ({mode}): {', '.join(modes[mode])}") del modes[mode] # show unknown modes for mode, nicks in modes.items(): self.send_notice(f"Users with '{mode}': {', '.join(nicks)}") # FIXME: this floods the event queue if there's a lot of people for (irc_user_id, nick) in to_add: self._add_puppet(nick) for irc_user_id in to_remove: self._remove_puppet(irc_user_id) def on_join(self, conn, event) -> None: # we don't need to sync ourself if conn.real_nickname == event.source.nick: self.send_notice("Joined channel.") # sync channel modes/key on join self.network.conn.mode(self.name, "") return # convert to mx id, check if we already have them irc_user_id = self.serv.irc_user_id(self.network_name, event.source.nick) if irc_user_id in self.members: return # ensure, append, invite and join self._add_puppet(event.source.nick) def on_quit(self, conn, event) -> None: self.on_part(conn, event) def on_part(self, conn, event) -> None: # we don't need to sync ourself if conn.real_nickname == event.source.nick: return irc_user_id = self.serv.irc_user_id(self.network_name, event.source.nick) if irc_user_id not in self.members: return self._remove_puppet(irc_user_id) def update_key(self, modes): # update channel key if modes[0].startswith("-") and modes[0].find("k") > -1: if self.key is not None: self.key = None asyncio.ensure_future(self.save()) elif modes[0].startswith("+"): key_pos = modes[0].find("k") if key_pos > -1: key = modes[key_pos] if self.key != key: self.key = key asyncio.ensure_future(self.save()) def on_mode(self, conn, event) -> None: modes = list(event.arguments) self.send_notice("{} set modes {}".format(event.source.nick, " ".join(modes))) self.update_key(modes) def on_notopic(self, conn, event) -> None: self.set_topic("") def on_currenttopic(self, conn, event) -> None: self.set_topic(event.arguments[1]) def on_topic(self, conn, event) -> None: self.send_notice("{} changed the topic".format(event.source.nick)) self.set_topic(event.arguments[0]) def on_kick(self, conn, event) -> None: target_user_id = self.serv.irc_user_id(self.network.name, event.arguments[0]) self.kick(target_user_id, f"Kicked by {event.source.nick}: {event.arguments[1]}") if target_user_id in self.members: self.members.remove(target_user_id) def on_banlist(self, conn, event) -> None: parts = list(event.arguments) parts.pop(0) logging.info(parts) self.bans_buffer.append(parts) def on_endofbanlist(self, conn, event) -> None: bans = self.bans_buffer self.bans_buffer = [] self.send_notice("Current channel bans:") for ban in bans: bantime = datetime.utcfromtimestamp(int(ban[2])).strftime("%c %Z") self.send_notice(f"\t{ban[0]} set by {ban[1]} at {bantime}") def on_channelmodeis(self, conn, event) -> None: modes = list(event.arguments) modes.pop(0) self.send_notice(f"Current channel modes: {' '.join(modes)}") self.update_key(modes) def on_channelcreate(self, conn, event) -> None: created = datetime.utcfromtimestamp(int(event.arguments[1])).strftime("%c %Z") self.send_notice(f"Channel was created at {created}")
import multiprocessing import telegram from flask import ( abort, Flask, jsonify, redirect, request, ) from spotigram.authorization import retrieve_token_info def user_link(user): return f'[{user}](https://open.spotify.com/user/{user})' def _build_app_and_bot_for(bot_cls): app = Flask(__name__) bot = bot_cls.bot() update_queue = multiprocessing.Queue() @app.route('/', methods=['POST']) def index(): return jsonify(status=200) @app.route('/', methods=['GET']) def redirect_to_bot(): return redirect("https://telegram.me/random_genre_bot", code=302) @app.route('/callback', methods=['GET']) def callback(): token_info, context = retrieve_token_info( request.args.get('state'), request.args.get('code') ) if not token_info: abort(400, 'Request not valid. Try getting a new login link.') bot.send_message( chat_id=context['chat_id'], text=f"User {user_link(context["user_id"])} logged in.", parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True, ) return redirect("https://telegram.me/random_genre_bot", code=302) @app.route('/hook/' + bot_cls.TOKEN, methods=['POST']) def webhook(): update = telegram.Update.de_json(request.get_json(force=True), bot) update_queue.put(update) return "OK" return app, bot, update_queue
import multiprocessing import telegram from flask import ( abort, Flask, jsonify, redirect, request, ) from spotigram.authorization import retrieve_token_info def user_link(user): return f'[{user}](https://open.spotify.com/user/{user})' def _build_app_and_bot_for(bot_cls): app = Flask(__name__) bot = bot_cls.bot() update_queue = multiprocessing.Queue() @app.route('/', methods=['POST']) def index(): return jsonify(status=200) @app.route('/', methods=['GET']) def redirect_to_bot(): return redirect("https://telegram.me/random_genre_bot", code=302) @app.route('/callback', methods=['GET']) def callback(): token_info, context = retrieve_token_info( request.args.get('state'), request.args.get('code') ) if not token_info: abort(400, 'Request not valid. Try getting a new login link.') bot.send_message( chat_id=context['chat_id'], text=f"User {user_link(context['user_id'])} logged in.", parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True, ) return redirect("https://telegram.me/random_genre_bot", code=302) @app.route('/hook/' + bot_cls.TOKEN, methods=['POST']) def webhook(): update = telegram.Update.de_json(request.get_json(force=True), bot) update_queue.put(update) return "OK" return app, bot, update_queue
import re import pandas as pd from shutil import copyfile from chinese_shadowing.config import path_data from chinese_shadowing.config import path_temporary if __name__ == '__main__': folder_name = 'HSK 1-6 2012' media_location = path_temporary / folder_name path_notes = path_temporary.joinpath('notes').with_suffix('.txt') columns = [ 'simplified', 'traditional', 'pinyin', 'pinyin_with_number', 'meaning', 'part_of_speech', 'audio', 'homophone', 'homograph', 'sentence_simplified', 'sentence_traditional', 'sentence_simplified_cloze', 'sentence_traditional_cloze', 'sentence_pinyin', 'sentence_traditional_pinyin_with_number', 'sentence_meaning', 'sentence_audio', 'sentence_image', 'tag' ] keep = [ 'sentence_simplified', 'sentence_traditional', 'sentence_pinyin', 'sentence_meaning', 'sentence_audio', 'tag' ] df = pd.read_csv(path_notes, index_col=0, sep='\t', names=columns)[keep] df.dropna(inplace=True) df = df.reset_index(drop=True) def clean_html(raw_html: str): cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', str(raw_html)) return cleantext df = df.applymap(clean_html) def extract_hsk_level(raw_string): return re.search('([1-6])', raw_string).group(1) df['hsk'] = df['tag'].apply(extract_hsk_level) df.drop(columns=['tag'], inplace=True) def extract_filename(raw_string): return re.search('\[sound:(.+?)\]', raw_string).group(1) df['sentence_audio'] = df['sentence_audio'].apply(extract_filename) df['audio_file'] = df.index df['audio_file'] = df['audio_file'].apply(lambda s: str(s) + '.mp3') df['old_audio_file'] = df['sentence_audio'] df['old_audio_file'] = df['old_audio_file'].apply(lambda s: s.replace('_1393816261464', '')) df.drop(columns=['sentence_audio'], inplace=True) df.columns = [column.replace('sentence_', '') for column in list(df.columns)] for index, row in df.iterrows(): try: copyfile(media_location / row['old_audio_file'], path_data / row['audio_file']) except FileNotFoundError: print(f'FileNotFoundError: {row['old_audio_file']}') df.drop(columns=['old_audio_file'], inplace=True) df.to_csv(path_data/'notes.csv', encoding='utf-8') print(df)
import re import pandas as pd from shutil import copyfile from chinese_shadowing.config import path_data from chinese_shadowing.config import path_temporary if __name__ == '__main__': folder_name = 'HSK 1-6 2012' media_location = path_temporary / folder_name path_notes = path_temporary.joinpath('notes').with_suffix('.txt') columns = [ 'simplified', 'traditional', 'pinyin', 'pinyin_with_number', 'meaning', 'part_of_speech', 'audio', 'homophone', 'homograph', 'sentence_simplified', 'sentence_traditional', 'sentence_simplified_cloze', 'sentence_traditional_cloze', 'sentence_pinyin', 'sentence_traditional_pinyin_with_number', 'sentence_meaning', 'sentence_audio', 'sentence_image', 'tag' ] keep = [ 'sentence_simplified', 'sentence_traditional', 'sentence_pinyin', 'sentence_meaning', 'sentence_audio', 'tag' ] df = pd.read_csv(path_notes, index_col=0, sep='\t', names=columns)[keep] df.dropna(inplace=True) df = df.reset_index(drop=True) def clean_html(raw_html: str): cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, '', str(raw_html)) return cleantext df = df.applymap(clean_html) def extract_hsk_level(raw_string): return re.search('([1-6])', raw_string).group(1) df['hsk'] = df['tag'].apply(extract_hsk_level) df.drop(columns=['tag'], inplace=True) def extract_filename(raw_string): return re.search('\[sound:(.+?)\]', raw_string).group(1) df['sentence_audio'] = df['sentence_audio'].apply(extract_filename) df['audio_file'] = df.index df['audio_file'] = df['audio_file'].apply(lambda s: str(s) + '.mp3') df['old_audio_file'] = df['sentence_audio'] df['old_audio_file'] = df['old_audio_file'].apply(lambda s: s.replace('_1393816261464', '')) df.drop(columns=['sentence_audio'], inplace=True) df.columns = [column.replace('sentence_', '') for column in list(df.columns)] for index, row in df.iterrows(): try: copyfile(media_location / row['old_audio_file'], path_data / row['audio_file']) except FileNotFoundError: print(f'FileNotFoundError: {row["old_audio_file"]}') df.drop(columns=['old_audio_file'], inplace=True) df.to_csv(path_data/'notes.csv', encoding='utf-8') print(df)
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import json import os import random import re import shutil import socket import string import subprocess import sys import threading import time from typing import Any, Dict, List from normalization.destination_type import DestinationType from normalization.transform_config.transform import TransformConfig NORMALIZATION_TEST_TARGET = "NORMALIZATION_TEST_TARGET" NORMALIZATION_TEST_MSSQL_DB_PORT = "NORMALIZATION_TEST_MSSQL_DB_PORT" NORMALIZATION_TEST_MYSQL_DB_PORT = "NORMALIZATION_TEST_MYSQL_DB_PORT" NORMALIZATION_TEST_POSTGRES_DB_PORT = "NORMALIZATION_TEST_POSTGRES_DB_PORT" class DbtIntegrationTest(object): def __init__(self): self.target_schema = "test_normalization" self.container_prefix = f"test_normalization_db_{self.random_string(3)}" self.db_names = [] @staticmethod def generate_random_string(prefix: str) -> str: return prefix + DbtIntegrationTest.random_string(5) @staticmethod def random_string(length: int) -> str: return "".join(random.choice(string.ascii_lowercase) for i in range(length)) def set_target_schema(self, target_schema: str): self.target_schema = target_schema def setup_db(self, destinations_to_test: List[str]): if DestinationType.POSTGRES.value in destinations_to_test: self.setup_postgres_db() if DestinationType.MYSQL.value in destinations_to_test: self.setup_mysql_db() if DestinationType.MSSQL.value in destinations_to_test: self.setup_mssql_db() def setup_postgres_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "username": "integration-tests", "password": "integration-tests", "port": port, "database": "postgres", "schema": self.target_schema, } if start_db: self.db_names.append("postgres") print("Starting localhost postgres container for tests") commands = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_postgres", "-e", f"POSTGRES_USER={config["username"]}", "-e", f"POSTGRES_PASSWORD={config["password"]}", "-p", f"{config["port"]}:5432", "-d", "marcosmarxm/postgres-ssl:dev", "-c", "ssl=on", "-c", "ssl_cert_file=/var/lib/postgresql/server.crt", "-c", "ssl_key_file=/var/lib/postgresql/server.key", ] print("Executing: ", " ".join(commands)) subprocess.call(commands) print("....Waiting for Postgres DB to start...15 sec") time.sleep(15) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/postgres.json", "w") as fh: fh.write(json.dumps(config)) def setup_mysql_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "port": port, "database": self.target_schema, "username": "root", "password": "", } if start_db: self.db_names.append("mysql") print("Starting localhost mysql container for tests") commands = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_mysql", "-e", "MYSQL_ALLOW_EMPTY_PASSWORD=yes", "-e", "MYSQL_INITDB_SKIP_TZINFO=yes", "-e", f"MYSQL_DATABASE={config["database"]}", "-p", f"{config["port"]}:3306", "-d", "mysql", ] print("Executing: ", " ".join(commands)) subprocess.call(commands) print("....Waiting for MySQL DB to start...15 sec") time.sleep(15) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/mysql.json", "w") as fh: fh.write(json.dumps(config)) def setup_mssql_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "username": "SA", "password": "MyStr0ngP@ssw0rd", "port": port, "database": self.target_schema, "schema": self.target_schema, } if start_db: self.db_names.append("mssql") print("Starting localhost MS SQL Server container for tests") command_start_container = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_mssql", "-h", f"{self.container_prefix}_mssql", "-e", "ACCEPT_EULA='Y'", "-e", f"SA_PASSWORD='{config["password"]}'", "-e", "MSSQL_PID='Standard'", "-p", f"{config["port"]}:1433", "-d", "mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04", ] # cmds & parameters cmd_start_container = " ".join(command_start_container) wait_sec = 30 # run the docker container print("Executing: ", cmd_start_container) subprocess.check_call(cmd_start_container, shell=True) # wait for service is available print(f"....Waiting for MS SQL Server to start...{wait_sec} sec") time.sleep(wait_sec) # Run additional commands to prepare the table command_create_db = [ "docker", "exec", f"{self.container_prefix}_mssql", "/opt/mssql-tools/bin/sqlcmd", "-S", config["host"], "-U", config["username"], "-P", config["password"], "-Q", f"CREATE DATABASE [{config["database"]}]", ] # create test db print("Executing: ", " ".join(command_create_db)) subprocess.call(command_create_db) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/mssql.json", "w") as fh: fh.write(json.dumps(config)) @staticmethod def find_free_port(): """ Find an unused port to create a database listening on localhost to run destination-postgres """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) addr = s.getsockname() s.close() return addr[1] def tear_down_db(self): for db_name in self.db_names: print(f"Stopping localhost {db_name} container for tests") try: subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"]) except Exception as e: print(f"WARN: Exception while shutting down {db_name}: {e}") @staticmethod def change_current_test_dir(request): # This makes the test run whether it is executed from the tests folder (with pytest/gradle) # or from the base-normalization folder (through pycharm) integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests") if os.path.exists(integration_tests_dir): os.chdir(integration_tests_dir) else: os.chdir(request.fspath.dirname) def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]: """ Each destination requires different settings to connect to. This step generates the adequate profiles.yml as described here: https://docs.getdbt.com/reference/profiles.yml """ config_generator = TransformConfig() profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json") # Adapt credential file to look like destination config.json if destination_type.value == DestinationType.BIGQUERY.value: credentials = profiles_config["basic_bigquery_config"] profiles_config = { "credentials_json": json.dumps(credentials), "dataset_id": self.target_schema, "project_id": credentials["project_id"], } elif destination_type.value == DestinationType.MYSQL.value: profiles_config["database"] = self.target_schema else: profiles_config["schema"] = self.target_schema profiles_yaml = config_generator.transform(destination_type, profiles_config) config_generator.write_yaml_config(test_root_dir, profiles_yaml, "profiles.yml") return profiles_config @staticmethod def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]): print("Executing: ", " ".join(commands)) with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f: process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) def writer(): if os.path.exists(message_file): with open(message_file, "rb") as input_data: while True: line = input_data.readline() if not line: break process.stdin.write(line) process.stdin.close() thread = threading.Thread(target=writer) thread.start() for line in iter(process.stdout.readline, b""): f.write(line) sys.stdout.write(line.decode("utf-8")) thread.join() process.wait() return process.returncode == 0 @staticmethod def get_normalization_image(destination_type: DestinationType) -> str: if DestinationType.MSSQL.value == destination_type.value: return "airbyte/normalization-mssql:dev" elif DestinationType.MYSQL.value == destination_type.value: return "airbyte/normalization-mysql:dev" elif DestinationType.ORACLE.value == destination_type.value: return "airbyte/normalization-oracle:dev" else: return "airbyte/normalization:dev" def dbt_run(self, destination_type: DestinationType, test_root_dir: str): """ Run the dbt CLI to perform transformations on the test raw data in the destination """ normalization_image: str = self.get_normalization_image(destination_type) # Perform sanity check on dbt project settings assert self.run_check_dbt_command(normalization_image, "debug", test_root_dir) assert self.run_check_dbt_command(normalization_image, "deps", test_root_dir) final_sql_files = os.path.join(test_root_dir, "final") shutil.rmtree(final_sql_files, ignore_errors=True) # Compile dbt models files into destination sql dialect, then run the transformation queries assert self.run_check_dbt_command(normalization_image, "run", test_root_dir) @staticmethod def run_check_dbt_command(normalization_image: str, command: str, cwd: str) -> bool: """ Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs """ error_count = 0 commands = [ "docker", "run", "--rm", "--init", "-v", f"{cwd}:/workspace", "-v", f"{cwd}/build:/build", "-v", f"{cwd}/final:/build/run/airbyte_utils/models/generated", "-v", "/tmp:/tmp", "--network", "host", "--entrypoint", "/usr/local/bin/dbt", "-i", normalization_image, command, "--profiles-dir=/workspace", "--project-dir=/workspace", ] print("Executing: ", " ".join(commands)) print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}") with open(os.path.join(cwd, "dbt_output.log"), "ab") as f: process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) for line in iter(lambda: process.stdout.readline(), b""): f.write(line) str_line = line.decode("utf-8") sys.stdout.write(str_line) # keywords to match lines as signaling errors if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line: # exception keywords in lines to ignore as errors (such as summary or expected warnings) is_exception = False for except_clause in [ "Done.", # DBT Summary "PASS=", # DBT Summary "Nothing to do.", # When no schema/data tests are setup "Configuration paths exist in your dbt_project.yml", # When no cte / view are generated "Error loading config file: .dockercfg: $HOME is not defined", # ignore warning "depends on a node named 'disabled_test' which was not found", # Tests throwing warning because it is disabled ]: if except_clause in str_line: is_exception = True break if not is_exception: # count lines signaling an error/failure/warning error_count += 1 process.wait() message = ( f"{" ".join(commands)}\n\tterminated with return code {process.returncode} " f"with {error_count} 'Error/Warning/Fail' mention(s)." ) print(message) assert error_count == 0, message assert process.returncode == 0, message if error_count > 0: return False return process.returncode == 0 @staticmethod def copy_replace(src, dst, pattern=None, replace_value=None): """ Copies a file from src to dst replacing pattern by replace_value Parameters ---------- src : string Path to the source filename to copy from dst : string Path to the output filename to copy to pattern list of Patterns to replace inside the src file replace_value list of Values to replace by in the dst file """ file1 = open(src, "r") if isinstance(src, str) else src file2 = open(dst, "w") if isinstance(dst, str) else dst pattern = [pattern] if isinstance(pattern, str) else pattern replace_value = [replace_value] if isinstance(replace_value, str) else replace_value if replace_value and pattern: if len(replace_value) != len(pattern): raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.") rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)] else: rules = [] for line in file1: if rules: for rule in rules: line = re.sub(rule[0], rule[1], line) file2.write(line) if isinstance(src, str): file1.close() if isinstance(dst, str): file2.close() @staticmethod def get_test_targets() -> List[str]: """ Returns a list of destinations to run tests on. if the environment variable NORMALIZATION_TEST_TARGET is set with a comma separated list of destination names, then the tests are run only on that subsets of destinations Otherwise tests are run against all destinations """ if os.getenv(NORMALIZATION_TEST_TARGET): target_str = os.getenv(NORMALIZATION_TEST_TARGET) return [d.value for d in {DestinationType.from_string(s) for s in target_str.split(",")}] else: return [d.value for d in DestinationType]
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import json import os import random import re import shutil import socket import string import subprocess import sys import threading import time from typing import Any, Dict, List from normalization.destination_type import DestinationType from normalization.transform_config.transform import TransformConfig NORMALIZATION_TEST_TARGET = "NORMALIZATION_TEST_TARGET" NORMALIZATION_TEST_MSSQL_DB_PORT = "NORMALIZATION_TEST_MSSQL_DB_PORT" NORMALIZATION_TEST_MYSQL_DB_PORT = "NORMALIZATION_TEST_MYSQL_DB_PORT" NORMALIZATION_TEST_POSTGRES_DB_PORT = "NORMALIZATION_TEST_POSTGRES_DB_PORT" class DbtIntegrationTest(object): def __init__(self): self.target_schema = "test_normalization" self.container_prefix = f"test_normalization_db_{self.random_string(3)}" self.db_names = [] @staticmethod def generate_random_string(prefix: str) -> str: return prefix + DbtIntegrationTest.random_string(5) @staticmethod def random_string(length: int) -> str: return "".join(random.choice(string.ascii_lowercase) for i in range(length)) def set_target_schema(self, target_schema: str): self.target_schema = target_schema def setup_db(self, destinations_to_test: List[str]): if DestinationType.POSTGRES.value in destinations_to_test: self.setup_postgres_db() if DestinationType.MYSQL.value in destinations_to_test: self.setup_mysql_db() if DestinationType.MSSQL.value in destinations_to_test: self.setup_mssql_db() def setup_postgres_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_POSTGRES_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "username": "integration-tests", "password": "integration-tests", "port": port, "database": "postgres", "schema": self.target_schema, } if start_db: self.db_names.append("postgres") print("Starting localhost postgres container for tests") commands = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_postgres", "-e", f"POSTGRES_USER={config['username']}", "-e", f"POSTGRES_PASSWORD={config['password']}", "-p", f"{config['port']}:5432", "-d", "marcosmarxm/postgres-ssl:dev", "-c", "ssl=on", "-c", "ssl_cert_file=/var/lib/postgresql/server.crt", "-c", "ssl_key_file=/var/lib/postgresql/server.key", ] print("Executing: ", " ".join(commands)) subprocess.call(commands) print("....Waiting for Postgres DB to start...15 sec") time.sleep(15) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/postgres.json", "w") as fh: fh.write(json.dumps(config)) def setup_mysql_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_MYSQL_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "port": port, "database": self.target_schema, "username": "root", "password": "", } if start_db: self.db_names.append("mysql") print("Starting localhost mysql container for tests") commands = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_mysql", "-e", "MYSQL_ALLOW_EMPTY_PASSWORD=yes", "-e", "MYSQL_INITDB_SKIP_TZINFO=yes", "-e", f"MYSQL_DATABASE={config['database']}", "-p", f"{config['port']}:3306", "-d", "mysql", ] print("Executing: ", " ".join(commands)) subprocess.call(commands) print("....Waiting for MySQL DB to start...15 sec") time.sleep(15) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/mysql.json", "w") as fh: fh.write(json.dumps(config)) def setup_mssql_db(self): start_db = True if os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT): port = int(os.getenv(NORMALIZATION_TEST_MSSQL_DB_PORT)) start_db = False else: port = self.find_free_port() config = { "host": "localhost", "username": "SA", "password": "MyStr0ngP@ssw0rd", "port": port, "database": self.target_schema, "schema": self.target_schema, } if start_db: self.db_names.append("mssql") print("Starting localhost MS SQL Server container for tests") command_start_container = [ "docker", "run", "--rm", "--name", f"{self.container_prefix}_mssql", "-h", f"{self.container_prefix}_mssql", "-e", "ACCEPT_EULA='Y'", "-e", f"SA_PASSWORD='{config['password']}'", "-e", "MSSQL_PID='Standard'", "-p", f"{config['port']}:1433", "-d", "mcr.microsoft.com/mssql/server:2019-GA-ubuntu-16.04", ] # cmds & parameters cmd_start_container = " ".join(command_start_container) wait_sec = 30 # run the docker container print("Executing: ", cmd_start_container) subprocess.check_call(cmd_start_container, shell=True) # wait for service is available print(f"....Waiting for MS SQL Server to start...{wait_sec} sec") time.sleep(wait_sec) # Run additional commands to prepare the table command_create_db = [ "docker", "exec", f"{self.container_prefix}_mssql", "/opt/mssql-tools/bin/sqlcmd", "-S", config["host"], "-U", config["username"], "-P", config["password"], "-Q", f"CREATE DATABASE [{config['database']}]", ] # create test db print("Executing: ", " ".join(command_create_db)) subprocess.call(command_create_db) if not os.path.exists("../secrets"): os.makedirs("../secrets") with open("../secrets/mssql.json", "w") as fh: fh.write(json.dumps(config)) @staticmethod def find_free_port(): """ Find an unused port to create a database listening on localhost to run destination-postgres """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) addr = s.getsockname() s.close() return addr[1] def tear_down_db(self): for db_name in self.db_names: print(f"Stopping localhost {db_name} container for tests") try: subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"]) except Exception as e: print(f"WARN: Exception while shutting down {db_name}: {e}") @staticmethod def change_current_test_dir(request): # This makes the test run whether it is executed from the tests folder (with pytest/gradle) # or from the base-normalization folder (through pycharm) integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests") if os.path.exists(integration_tests_dir): os.chdir(integration_tests_dir) else: os.chdir(request.fspath.dirname) def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]: """ Each destination requires different settings to connect to. This step generates the adequate profiles.yml as described here: https://docs.getdbt.com/reference/profiles.yml """ config_generator = TransformConfig() profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json") # Adapt credential file to look like destination config.json if destination_type.value == DestinationType.BIGQUERY.value: credentials = profiles_config["basic_bigquery_config"] profiles_config = { "credentials_json": json.dumps(credentials), "dataset_id": self.target_schema, "project_id": credentials["project_id"], } elif destination_type.value == DestinationType.MYSQL.value: profiles_config["database"] = self.target_schema else: profiles_config["schema"] = self.target_schema profiles_yaml = config_generator.transform(destination_type, profiles_config) config_generator.write_yaml_config(test_root_dir, profiles_yaml, "profiles.yml") return profiles_config @staticmethod def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]): print("Executing: ", " ".join(commands)) with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f: process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) def writer(): if os.path.exists(message_file): with open(message_file, "rb") as input_data: while True: line = input_data.readline() if not line: break process.stdin.write(line) process.stdin.close() thread = threading.Thread(target=writer) thread.start() for line in iter(process.stdout.readline, b""): f.write(line) sys.stdout.write(line.decode("utf-8")) thread.join() process.wait() return process.returncode == 0 @staticmethod def get_normalization_image(destination_type: DestinationType) -> str: if DestinationType.MSSQL.value == destination_type.value: return "airbyte/normalization-mssql:dev" elif DestinationType.MYSQL.value == destination_type.value: return "airbyte/normalization-mysql:dev" elif DestinationType.ORACLE.value == destination_type.value: return "airbyte/normalization-oracle:dev" else: return "airbyte/normalization:dev" def dbt_run(self, destination_type: DestinationType, test_root_dir: str): """ Run the dbt CLI to perform transformations on the test raw data in the destination """ normalization_image: str = self.get_normalization_image(destination_type) # Perform sanity check on dbt project settings assert self.run_check_dbt_command(normalization_image, "debug", test_root_dir) assert self.run_check_dbt_command(normalization_image, "deps", test_root_dir) final_sql_files = os.path.join(test_root_dir, "final") shutil.rmtree(final_sql_files, ignore_errors=True) # Compile dbt models files into destination sql dialect, then run the transformation queries assert self.run_check_dbt_command(normalization_image, "run", test_root_dir) @staticmethod def run_check_dbt_command(normalization_image: str, command: str, cwd: str) -> bool: """ Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs """ error_count = 0 commands = [ "docker", "run", "--rm", "--init", "-v", f"{cwd}:/workspace", "-v", f"{cwd}/build:/build", "-v", f"{cwd}/final:/build/run/airbyte_utils/models/generated", "-v", "/tmp:/tmp", "--network", "host", "--entrypoint", "/usr/local/bin/dbt", "-i", normalization_image, command, "--profiles-dir=/workspace", "--project-dir=/workspace", ] print("Executing: ", " ".join(commands)) print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}") with open(os.path.join(cwd, "dbt_output.log"), "ab") as f: process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) for line in iter(lambda: process.stdout.readline(), b""): f.write(line) str_line = line.decode("utf-8") sys.stdout.write(str_line) # keywords to match lines as signaling errors if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line: # exception keywords in lines to ignore as errors (such as summary or expected warnings) is_exception = False for except_clause in [ "Done.", # DBT Summary "PASS=", # DBT Summary "Nothing to do.", # When no schema/data tests are setup "Configuration paths exist in your dbt_project.yml", # When no cte / view are generated "Error loading config file: .dockercfg: $HOME is not defined", # ignore warning "depends on a node named 'disabled_test' which was not found", # Tests throwing warning because it is disabled ]: if except_clause in str_line: is_exception = True break if not is_exception: # count lines signaling an error/failure/warning error_count += 1 process.wait() message = ( f"{' '.join(commands)}\n\tterminated with return code {process.returncode} " f"with {error_count} 'Error/Warning/Fail' mention(s)." ) print(message) assert error_count == 0, message assert process.returncode == 0, message if error_count > 0: return False return process.returncode == 0 @staticmethod def copy_replace(src, dst, pattern=None, replace_value=None): """ Copies a file from src to dst replacing pattern by replace_value Parameters ---------- src : string Path to the source filename to copy from dst : string Path to the output filename to copy to pattern list of Patterns to replace inside the src file replace_value list of Values to replace by in the dst file """ file1 = open(src, "r") if isinstance(src, str) else src file2 = open(dst, "w") if isinstance(dst, str) else dst pattern = [pattern] if isinstance(pattern, str) else pattern replace_value = [replace_value] if isinstance(replace_value, str) else replace_value if replace_value and pattern: if len(replace_value) != len(pattern): raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.") rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)] else: rules = [] for line in file1: if rules: for rule in rules: line = re.sub(rule[0], rule[1], line) file2.write(line) if isinstance(src, str): file1.close() if isinstance(dst, str): file2.close() @staticmethod def get_test_targets() -> List[str]: """ Returns a list of destinations to run tests on. if the environment variable NORMALIZATION_TEST_TARGET is set with a comma separated list of destination names, then the tests are run only on that subsets of destinations Otherwise tests are run against all destinations """ if os.getenv(NORMALIZATION_TEST_TARGET): target_str = os.getenv(NORMALIZATION_TEST_TARGET) return [d.value for d in {DestinationType.from_string(s) for s in target_str.split(",")}] else: return [d.value for d in DestinationType]
# ---------------------------------------------------------------------------# # DRACOON API Python examples # Perform bulk edit permissions on data rooms one can manage # Requires dracoon package # Author: Octavio Simone, 03.08.2021 # ---------------------------------------------------------------------------# from dracoon import core, nodes import sys import getpass # replace with client id from OAuth app - for Cloud you can use dracoon_legacy_scripting if enabled in apps clientID = 'xxxxx' # replace with client secret - dracoon_legacy_scripting has no secret, it can be omitted as parameter clientSecret = 'xxxxxx' baseURL = 'https://dracoon.team' # replace with own DRACOON url USER_ID = 99 # create object to authenticate and send requests - client secret is optional (e.g. for use with dracoon_legacy_scripting) my_dracoon = core.Dracoon(clientID, clientSecret) my_dracoon.set_URLs(baseURL) # get user login credentials (basic, AD possible) RO_user = input('Username: ') RO_password = getpass.getpass('Password: ') # try to authenticate - exit if request fails (timeout, connection error..) try: login_response = my_dracoon.basic_auth(RO_user, RO_password) except core.requests.exceptions.RequestException as e: raise SystemExit(e) # authenticate or exit if authentication fails if login_response.status_code == 200: print('Login successful: ' + str(login_response.status_code)) else: print(login_response.status_code) if login_response.json()["error"] and login_response.json()["error_description"]: print(login_response.json()["error"]) print(login_response.json()["error_description"]) else: print(login_response.text) sys.exit() # exit script if login not successful # create request to get search for all folders in a parent room r = nodes.search_nodes('*', parentID=0, depthLevel=-1, offset=0, filter='type:eq:room') # perform request with authenticated DRACOON instance try: room_response = my_dracoon.get(r) except core.requests.exceptions.RequestException as e: raise SystemExit(e) room_list = [] # if call successful, check populate list if room_response.status_code == 200: # add rooms to list if manage permissions are available for room in room_response.json()['items']: if room["permissions"]["manage"] == True: room_list.append(room) #get total amount of rooms total_rooms = room_response.json()['range']['total'] #in case rooms exceed 500 items, get reamining rooms via offset if total_rooms > 500: for offset in range(500, total_rooms, 500): r = nodes.search_nodes('*', parentID=0, depthLevel=-1, offset=offset, filter='type:eq:room') room_response = my_dracoon.get(r) for room in room_response.json()['items']: if room["permissions"]["manage"] == True: room_list.append(room) print(f"{len(room_list)} rooms to process.") # set default retention period to 30 days for each room for room in room_list: params = { "items":[ { "id": USER_ID, "permissions": { "manage": True, "read": True, "create": True, "change": True, "delete": True, "manageDownloadShare": True, "manageUploadShare": True, "readRecycleBin": True, "restoreRecycleBin": True, "deleteRecycleBin": True } } ] } r = nodes.update_room_users(nodeID=room["id"], params=params) config_response = my_dracoon.put(r) if config_response.status_code == 200: print(f'Successfully added user with id {USER_ID} as admin to room {room['name']}') else: print(f'Error setting admin permission in room {room['name']}') print(f'Details: {config_response.text}') print(f'Status: {config_response.status_code}') continue
# ---------------------------------------------------------------------------# # DRACOON API Python examples # Perform bulk edit permissions on data rooms one can manage # Requires dracoon package # Author: Octavio Simone, 03.08.2021 # ---------------------------------------------------------------------------# from dracoon import core, nodes import sys import getpass # replace with client id from OAuth app - for Cloud you can use dracoon_legacy_scripting if enabled in apps clientID = 'xxxxx' # replace with client secret - dracoon_legacy_scripting has no secret, it can be omitted as parameter clientSecret = 'xxxxxx' baseURL = 'https://dracoon.team' # replace with own DRACOON url USER_ID = 99 # create object to authenticate and send requests - client secret is optional (e.g. for use with dracoon_legacy_scripting) my_dracoon = core.Dracoon(clientID, clientSecret) my_dracoon.set_URLs(baseURL) # get user login credentials (basic, AD possible) RO_user = input('Username: ') RO_password = getpass.getpass('Password: ') # try to authenticate - exit if request fails (timeout, connection error..) try: login_response = my_dracoon.basic_auth(RO_user, RO_password) except core.requests.exceptions.RequestException as e: raise SystemExit(e) # authenticate or exit if authentication fails if login_response.status_code == 200: print('Login successful: ' + str(login_response.status_code)) else: print(login_response.status_code) if login_response.json()["error"] and login_response.json()["error_description"]: print(login_response.json()["error"]) print(login_response.json()["error_description"]) else: print(login_response.text) sys.exit() # exit script if login not successful # create request to get search for all folders in a parent room r = nodes.search_nodes('*', parentID=0, depthLevel=-1, offset=0, filter='type:eq:room') # perform request with authenticated DRACOON instance try: room_response = my_dracoon.get(r) except core.requests.exceptions.RequestException as e: raise SystemExit(e) room_list = [] # if call successful, check populate list if room_response.status_code == 200: # add rooms to list if manage permissions are available for room in room_response.json()['items']: if room["permissions"]["manage"] == True: room_list.append(room) #get total amount of rooms total_rooms = room_response.json()['range']['total'] #in case rooms exceed 500 items, get reamining rooms via offset if total_rooms > 500: for offset in range(500, total_rooms, 500): r = nodes.search_nodes('*', parentID=0, depthLevel=-1, offset=offset, filter='type:eq:room') room_response = my_dracoon.get(r) for room in room_response.json()['items']: if room["permissions"]["manage"] == True: room_list.append(room) print(f"{len(room_list)} rooms to process.") # set default retention period to 30 days for each room for room in room_list: params = { "items":[ { "id": USER_ID, "permissions": { "manage": True, "read": True, "create": True, "change": True, "delete": True, "manageDownloadShare": True, "manageUploadShare": True, "readRecycleBin": True, "restoreRecycleBin": True, "deleteRecycleBin": True } } ] } r = nodes.update_room_users(nodeID=room["id"], params=params) config_response = my_dracoon.put(r) if config_response.status_code == 200: print(f'Successfully added user with id {USER_ID} as admin to room {room["name"]}') else: print(f'Error setting admin permission in room {room["name"]}') print(f'Details: {config_response.text}') print(f'Status: {config_response.status_code}') continue
import h3 import json from fuzzywuzzy import fuzz from pyspark.sql.functions import udf from pyspark.sql.types import ArrayType, DoubleType, FloatType, IntegerType, StringType, StructField, StructType from shapely.geometry import shape DEFAULT_RESOLUTION = 11 # Get H3 index for coordinates pair @udf(returnType=StringType()) def get_h3_index(lat: str, lng: str, resolution): try: # noinspection PyUnresolvedReferences return h3.geo_to_h3(float(lat), float(lng), resolution) except TypeError: return None # Get number of H3 cells in between to given cells of the same resolution @udf(returnType=IntegerType()) def get_h3_distance(h1: str, h2: str, default_value): try: # noinspection PyUnresolvedReferences return h3.h3_distance(h1, h2) except TypeError: return None except h3.H3ValueError: return default_value # Get parent index for a specific resolution @udf(returnType=StringType()) def h3_to_parent(h3_index: str, resolution: int): # noinspection PyUnresolvedReferences return h3.h3_to_parent(h3_index, resolution) # Create a GeoJSON string based on an array of coordinates @udf(returnType=StringType()) def create_geo_json_based_on_coordinates(coordinates: [[float]]): if not coordinates or len(coordinates) < 1: return last_index = len(coordinates) - 1 geo_json_type = 'Polygon' if (coordinates[0][0] != coordinates[last_index][0]) or (coordinates[0][1] != coordinates[last_index][1]): geo_json_type = 'LineString' geo_json_coordinates = [coordinates] if geo_json_type == 'Polygon' else coordinates geo_json = json.dumps(dict(type=geo_json_type, coordinates=geo_json_coordinates)) if shape(json.loads(geo_json)).is_valid: return geo_json return None # Get the centroid of a GeoJSON string @udf(returnType=StructType([ StructField(name='latitude', dataType=FloatType()), StructField(name='longitude', dataType=FloatType()) ])) def get_centroid_of_geo_json(geo_json: str): if geo_json: geo_json = json.loads(geo_json) # noinspection PyBroadException try: centroid = shape(geo_json).centroid return dict(latitude=centroid.y, longitude=centroid.x) except Exception: return # Get a similarity score for two strings @udf(returnType=IntegerType()) def get_string_distance(compare_str: str, base_str: str, alternative_base_str: str): if base_str: return fuzz.token_set_ratio(base_str, compare_str) return fuzz.token_set_ratio(alternative_base_str, compare_str) # Map a list of key-value-pairs to a list of strings @udf(returnType=ArrayType(elementType=StringType())) def concat_list_of_key_value_pairs(tags): return list(map(lambda t: f'{t['key']}={t['value']}', tags)) # Calculate the confidence of a Google result based on the name and H3 distance""" @udf(returnType=DoubleType()) def get_confidence_based_h3_and_name_distance(h3_distance: int, name_distance: int, max_h3_distance: int): def get_h3_confidence(d): if d <= 25: return 1 return 1 - d / max_h3_distance if d < max_h3_distance else 0 def get_name_confidence(d): return d / 100 h3_confidence = get_h3_confidence(h3_distance) if h3_distance else None name_confidence = get_name_confidence(name_distance) return (h3_confidence * (2 / 3) + name_confidence * (1 / 3)) if h3_confidence else name_confidence # Based on the confidence build the POI id using the H3 index and OSM id @udf(returnType=StringType()) def build_poi_id_based_on_confidence(confidence, h3_index_google, h3_index_osm, osm_id): if confidence and confidence >= 0.9: return f'{h3_index_google}_{osm_id}' return f'{h3_index_osm}_{osm_id}'
import h3 import json from fuzzywuzzy import fuzz from pyspark.sql.functions import udf from pyspark.sql.types import ArrayType, DoubleType, FloatType, IntegerType, StringType, StructField, StructType from shapely.geometry import shape DEFAULT_RESOLUTION = 11 # Get H3 index for coordinates pair @udf(returnType=StringType()) def get_h3_index(lat: str, lng: str, resolution): try: # noinspection PyUnresolvedReferences return h3.geo_to_h3(float(lat), float(lng), resolution) except TypeError: return None # Get number of H3 cells in between to given cells of the same resolution @udf(returnType=IntegerType()) def get_h3_distance(h1: str, h2: str, default_value): try: # noinspection PyUnresolvedReferences return h3.h3_distance(h1, h2) except TypeError: return None except h3.H3ValueError: return default_value # Get parent index for a specific resolution @udf(returnType=StringType()) def h3_to_parent(h3_index: str, resolution: int): # noinspection PyUnresolvedReferences return h3.h3_to_parent(h3_index, resolution) # Create a GeoJSON string based on an array of coordinates @udf(returnType=StringType()) def create_geo_json_based_on_coordinates(coordinates: [[float]]): if not coordinates or len(coordinates) < 1: return last_index = len(coordinates) - 1 geo_json_type = 'Polygon' if (coordinates[0][0] != coordinates[last_index][0]) or (coordinates[0][1] != coordinates[last_index][1]): geo_json_type = 'LineString' geo_json_coordinates = [coordinates] if geo_json_type == 'Polygon' else coordinates geo_json = json.dumps(dict(type=geo_json_type, coordinates=geo_json_coordinates)) if shape(json.loads(geo_json)).is_valid: return geo_json return None # Get the centroid of a GeoJSON string @udf(returnType=StructType([ StructField(name='latitude', dataType=FloatType()), StructField(name='longitude', dataType=FloatType()) ])) def get_centroid_of_geo_json(geo_json: str): if geo_json: geo_json = json.loads(geo_json) # noinspection PyBroadException try: centroid = shape(geo_json).centroid return dict(latitude=centroid.y, longitude=centroid.x) except Exception: return # Get a similarity score for two strings @udf(returnType=IntegerType()) def get_string_distance(compare_str: str, base_str: str, alternative_base_str: str): if base_str: return fuzz.token_set_ratio(base_str, compare_str) return fuzz.token_set_ratio(alternative_base_str, compare_str) # Map a list of key-value-pairs to a list of strings @udf(returnType=ArrayType(elementType=StringType())) def concat_list_of_key_value_pairs(tags): return list(map(lambda t: f'{t["key"]}={t["value"]}', tags)) # Calculate the confidence of a Google result based on the name and H3 distance""" @udf(returnType=DoubleType()) def get_confidence_based_h3_and_name_distance(h3_distance: int, name_distance: int, max_h3_distance: int): def get_h3_confidence(d): if d <= 25: return 1 return 1 - d / max_h3_distance if d < max_h3_distance else 0 def get_name_confidence(d): return d / 100 h3_confidence = get_h3_confidence(h3_distance) if h3_distance else None name_confidence = get_name_confidence(name_distance) return (h3_confidence * (2 / 3) + name_confidence * (1 / 3)) if h3_confidence else name_confidence # Based on the confidence build the POI id using the H3 index and OSM id @udf(returnType=StringType()) def build_poi_id_based_on_confidence(confidence, h3_index_google, h3_index_osm, osm_id): if confidence and confidence >= 0.9: return f'{h3_index_google}_{osm_id}' return f'{h3_index_osm}_{osm_id}'
"""Kamereon API.""" import logging from json import dumps as json_dumps from typing import Any from typing import cast from typing import Dict from typing import List from typing import Optional from warnings import warn import aiohttp from marshmallow.schema import Schema from . import models from . import schemas _LOGGER = logging.getLogger(__name__) _KCA_GET_ENDPOINTS: Dict[str, Any] = { "": {"version": 2}, "battery-status": {"version": 2}, "charge-history": {"version": 1}, "charge-mode": {"version": 1}, "charges": {"version": 1}, "charging-settings": {"version": 1}, "cockpit": {"version": 2}, "hvac-history": {"version": 1}, "hvac-sessions": {"version": 1}, "hvac-status": {"version": 1}, "hvac-settings": {"version": 1}, "location": {"version": 1}, "lock-status": {"version": 1}, "notification-settings": {"version": 1}, } _KCA_POST_ENDPOINTS: Dict[str, Any] = { "actions/charge-mode": {"version": 1, "type": "ChargeMode"}, "actions/charge-schedule": {"version": 2, "type": "ChargeSchedule"}, "actions/charging-start": {"version": 1, "type": "ChargingStart"}, "actions/hvac-schedule": {"version": 2, "type": "HvacSchedule"}, "actions/hvac-start": {"version": 1, "type": "HvacStart"}, } # Deprecated from 0.1.8 - kept for compatibility DATA_ENDPOINTS = _KCA_GET_ENDPOINTS ACTION_ENDPOINTS = _KCA_POST_ENDPOINTS def get_commerce_url(root_url: str) -> str: """Get the Kamereon base commerce url.""" return f"{root_url}/commerce/v1" def get_person_url(root_url: str, person_id: str) -> str: """Get the url to the person.""" return f"{get_commerce_url(root_url)}/persons/{person_id}" def get_account_url(root_url: str, account_id: str) -> str: """Get the url to the account.""" return f"{get_commerce_url(root_url)}/accounts/{account_id}" def get_car_adapter_url(root_url: str, account_id: str, version: int, vin: str) -> str: """Get the url to the car adapter.""" account_url = get_account_url(root_url, account_id) return f"{account_url}/kamereon/kca/car-adapter/v{version}/cars/{vin}" def get_contracts_url(root_url: str, account_id: str, vin: str) -> str: """Get the url to the car contracts.""" account_url = get_account_url(root_url, account_id) return f"{account_url}/vehicles/{vin}/contracts" def get_required_contracts(endpoint: str) -> str: # pragma: no cover """Get the required contracts for the specified endpoint.""" # "Deprecated in 0.1.3, contract codes are country-specific" # " and can't be used to guess requirements." warn("This method is deprecated.", DeprecationWarning, stacklevel=2) return "" def has_required_contracts( contracts: List[models.KamereonVehicleContract], endpoint: str ) -> bool: """Check if vehicle has contract for endpoint.""" # "Deprecated in 0.1.3, contract codes are country-specific" # " and can't be used to guess requirements." warn("This method is deprecated.", DeprecationWarning, stacklevel=2) return True # pragma: no cover async def request( websession: aiohttp.ClientSession, method: str, url: str, api_key: str, gigya_jwt: str, params: Dict[str, str], json: Optional[Dict[str, Any]] = None, schema: Optional[Schema] = None, *, wrap_array_in: Optional[str] = None, ) -> models.KamereonResponse: """Process Kamereon HTTP request.""" schema = schema or schemas.KamereonResponseSchema headers = { "Content-type": "application/vnd.api+json", "apikey": api_key, "x-gigya-id_token": gigya_jwt, } async with websession.request( method, url, headers=headers, params=params, json=json, ) as http_response: response_text = await http_response.text() if json: _LOGGER.debug( "Send Kamereon %s request to %s with body: %s", method, http_response.url, json_dumps(json), ) _LOGGER.debug( "Received Kamereon response %s on %s to %s: %s", http_response.status, method, http_response.url, response_text, ) # Some endpoints return arrays instead of objects. # These need to be wrapped in an object. if response_text.startswith("[") and wrap_array_in: response_text = f'{{'{wrap_array_in}': {response_text}}}' kamereon_response: models.KamereonResponse = schema.loads(response_text) # Check for Kamereon error kamereon_response.raise_for_error_code() # Check for HTTP error http_response.raise_for_status() return kamereon_response async def get_person( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, person_id: str, ) -> models.KamereonPersonResponse: """GET to /persons/{person_id}.""" url = get_person_url(root_url, person_id) params = {"country": country} return cast( models.KamereonPersonResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonPersonResponseSchema, ), ) async def get_vehicle_contracts( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, locale: str, account_id: str, vin: str, ) -> models.KamereonVehicleContractsResponse: """GET to /accounts/{accountId}/vehicles/{vin}/contracts.""" url = get_contracts_url(root_url, account_id, vin) params = { "country": country, "locale": locale, "brand": "RENAULT", "connectedServicesContracts": "true", "warranty": "true", "warrantyMaintenanceContracts": "true", } return cast( models.KamereonVehicleContractsResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleContractsResponseSchema, wrap_array_in="contractList", ), ) async def get_account_vehicles( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, ) -> models.KamereonVehiclesResponse: """GET to /accounts/{account_id}/vehicles.""" url = f"{get_account_url(root_url, account_id)}/vehicles" params = {"country": country} return cast( models.KamereonVehiclesResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehiclesResponseSchema, ), ) async def get_vehicle_details( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, ) -> models.KamereonVehicleDetailsResponse: """GET to /accounts/{account_id}/vehicles/{vin}/details.""" url = f"{get_account_url(root_url, account_id)}/vehicles/{vin}/details" params = {"country": country} return cast( models.KamereonVehicleDetailsResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleDetailsResponseSchema, ), ) async def get_vehicle_data( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, endpoint: str, endpoint_version: Optional[int] = None, params: Optional[Dict[str, str]] = None, ) -> models.KamereonVehicleDataResponse: """GET to /v{endpoint_version}/cars/{vin}/{endpoint}.""" endpoint_details = _KCA_GET_ENDPOINTS[endpoint] car_adapter_url = get_car_adapter_url( root_url=root_url, account_id=account_id, version=endpoint_version or int(endpoint_details["version"]), vin=vin, ) url = f"{car_adapter_url}/{endpoint}" if endpoint else car_adapter_url params = params or {} params["country"] = country return cast( models.KamereonVehicleDataResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleDataResponseSchema, ), ) async def set_vehicle_action( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, endpoint: str, attributes: Dict[str, Any], endpoint_version: Optional[int] = None, data_type: Optional[Dict[str, Any]] = None, ) -> models.KamereonVehicleDataResponse: """POST to /v{endpoint_version}/cars/{vin}/{endpoint}.""" if "/" not in endpoint: # Deprecated in 0.1.8 warn( f"You should use the full endpoint: actions/{endpoint}.", DeprecationWarning, stacklevel=2, ) endpoint = f"actions/{endpoint}" endpoint_details = _KCA_POST_ENDPOINTS[endpoint] car_adapter_url = get_car_adapter_url( root_url=root_url, account_id=account_id, version=endpoint_version or int(endpoint_details["version"]), vin=vin, ) url = f"{car_adapter_url}/{endpoint}" params = {"country": country} json = { "data": { "type": data_type or endpoint_details["type"], "attributes": attributes, } } return cast( models.KamereonVehicleDataResponse, await request( websession, "POST", url, api_key, gigya_jwt, params, json, schemas.KamereonVehicleDataResponseSchema, ), )
"""Kamereon API.""" import logging from json import dumps as json_dumps from typing import Any from typing import cast from typing import Dict from typing import List from typing import Optional from warnings import warn import aiohttp from marshmallow.schema import Schema from . import models from . import schemas _LOGGER = logging.getLogger(__name__) _KCA_GET_ENDPOINTS: Dict[str, Any] = { "": {"version": 2}, "battery-status": {"version": 2}, "charge-history": {"version": 1}, "charge-mode": {"version": 1}, "charges": {"version": 1}, "charging-settings": {"version": 1}, "cockpit": {"version": 2}, "hvac-history": {"version": 1}, "hvac-sessions": {"version": 1}, "hvac-status": {"version": 1}, "hvac-settings": {"version": 1}, "location": {"version": 1}, "lock-status": {"version": 1}, "notification-settings": {"version": 1}, } _KCA_POST_ENDPOINTS: Dict[str, Any] = { "actions/charge-mode": {"version": 1, "type": "ChargeMode"}, "actions/charge-schedule": {"version": 2, "type": "ChargeSchedule"}, "actions/charging-start": {"version": 1, "type": "ChargingStart"}, "actions/hvac-schedule": {"version": 2, "type": "HvacSchedule"}, "actions/hvac-start": {"version": 1, "type": "HvacStart"}, } # Deprecated from 0.1.8 - kept for compatibility DATA_ENDPOINTS = _KCA_GET_ENDPOINTS ACTION_ENDPOINTS = _KCA_POST_ENDPOINTS def get_commerce_url(root_url: str) -> str: """Get the Kamereon base commerce url.""" return f"{root_url}/commerce/v1" def get_person_url(root_url: str, person_id: str) -> str: """Get the url to the person.""" return f"{get_commerce_url(root_url)}/persons/{person_id}" def get_account_url(root_url: str, account_id: str) -> str: """Get the url to the account.""" return f"{get_commerce_url(root_url)}/accounts/{account_id}" def get_car_adapter_url(root_url: str, account_id: str, version: int, vin: str) -> str: """Get the url to the car adapter.""" account_url = get_account_url(root_url, account_id) return f"{account_url}/kamereon/kca/car-adapter/v{version}/cars/{vin}" def get_contracts_url(root_url: str, account_id: str, vin: str) -> str: """Get the url to the car contracts.""" account_url = get_account_url(root_url, account_id) return f"{account_url}/vehicles/{vin}/contracts" def get_required_contracts(endpoint: str) -> str: # pragma: no cover """Get the required contracts for the specified endpoint.""" # "Deprecated in 0.1.3, contract codes are country-specific" # " and can't be used to guess requirements." warn("This method is deprecated.", DeprecationWarning, stacklevel=2) return "" def has_required_contracts( contracts: List[models.KamereonVehicleContract], endpoint: str ) -> bool: """Check if vehicle has contract for endpoint.""" # "Deprecated in 0.1.3, contract codes are country-specific" # " and can't be used to guess requirements." warn("This method is deprecated.", DeprecationWarning, stacklevel=2) return True # pragma: no cover async def request( websession: aiohttp.ClientSession, method: str, url: str, api_key: str, gigya_jwt: str, params: Dict[str, str], json: Optional[Dict[str, Any]] = None, schema: Optional[Schema] = None, *, wrap_array_in: Optional[str] = None, ) -> models.KamereonResponse: """Process Kamereon HTTP request.""" schema = schema or schemas.KamereonResponseSchema headers = { "Content-type": "application/vnd.api+json", "apikey": api_key, "x-gigya-id_token": gigya_jwt, } async with websession.request( method, url, headers=headers, params=params, json=json, ) as http_response: response_text = await http_response.text() if json: _LOGGER.debug( "Send Kamereon %s request to %s with body: %s", method, http_response.url, json_dumps(json), ) _LOGGER.debug( "Received Kamereon response %s on %s to %s: %s", http_response.status, method, http_response.url, response_text, ) # Some endpoints return arrays instead of objects. # These need to be wrapped in an object. if response_text.startswith("[") and wrap_array_in: response_text = f'{{"{wrap_array_in}": {response_text}}}' kamereon_response: models.KamereonResponse = schema.loads(response_text) # Check for Kamereon error kamereon_response.raise_for_error_code() # Check for HTTP error http_response.raise_for_status() return kamereon_response async def get_person( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, person_id: str, ) -> models.KamereonPersonResponse: """GET to /persons/{person_id}.""" url = get_person_url(root_url, person_id) params = {"country": country} return cast( models.KamereonPersonResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonPersonResponseSchema, ), ) async def get_vehicle_contracts( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, locale: str, account_id: str, vin: str, ) -> models.KamereonVehicleContractsResponse: """GET to /accounts/{accountId}/vehicles/{vin}/contracts.""" url = get_contracts_url(root_url, account_id, vin) params = { "country": country, "locale": locale, "brand": "RENAULT", "connectedServicesContracts": "true", "warranty": "true", "warrantyMaintenanceContracts": "true", } return cast( models.KamereonVehicleContractsResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleContractsResponseSchema, wrap_array_in="contractList", ), ) async def get_account_vehicles( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, ) -> models.KamereonVehiclesResponse: """GET to /accounts/{account_id}/vehicles.""" url = f"{get_account_url(root_url, account_id)}/vehicles" params = {"country": country} return cast( models.KamereonVehiclesResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehiclesResponseSchema, ), ) async def get_vehicle_details( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, ) -> models.KamereonVehicleDetailsResponse: """GET to /accounts/{account_id}/vehicles/{vin}/details.""" url = f"{get_account_url(root_url, account_id)}/vehicles/{vin}/details" params = {"country": country} return cast( models.KamereonVehicleDetailsResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleDetailsResponseSchema, ), ) async def get_vehicle_data( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, endpoint: str, endpoint_version: Optional[int] = None, params: Optional[Dict[str, str]] = None, ) -> models.KamereonVehicleDataResponse: """GET to /v{endpoint_version}/cars/{vin}/{endpoint}.""" endpoint_details = _KCA_GET_ENDPOINTS[endpoint] car_adapter_url = get_car_adapter_url( root_url=root_url, account_id=account_id, version=endpoint_version or int(endpoint_details["version"]), vin=vin, ) url = f"{car_adapter_url}/{endpoint}" if endpoint else car_adapter_url params = params or {} params["country"] = country return cast( models.KamereonVehicleDataResponse, await request( websession, "GET", url, api_key, gigya_jwt, params=params, schema=schemas.KamereonVehicleDataResponseSchema, ), ) async def set_vehicle_action( websession: aiohttp.ClientSession, root_url: str, api_key: str, gigya_jwt: str, country: str, account_id: str, vin: str, endpoint: str, attributes: Dict[str, Any], endpoint_version: Optional[int] = None, data_type: Optional[Dict[str, Any]] = None, ) -> models.KamereonVehicleDataResponse: """POST to /v{endpoint_version}/cars/{vin}/{endpoint}.""" if "/" not in endpoint: # Deprecated in 0.1.8 warn( f"You should use the full endpoint: actions/{endpoint}.", DeprecationWarning, stacklevel=2, ) endpoint = f"actions/{endpoint}" endpoint_details = _KCA_POST_ENDPOINTS[endpoint] car_adapter_url = get_car_adapter_url( root_url=root_url, account_id=account_id, version=endpoint_version or int(endpoint_details["version"]), vin=vin, ) url = f"{car_adapter_url}/{endpoint}" params = {"country": country} json = { "data": { "type": data_type or endpoint_details["type"], "attributes": attributes, } } return cast( models.KamereonVehicleDataResponse, await request( websession, "POST", url, api_key, gigya_jwt, params, json, schemas.KamereonVehicleDataResponseSchema, ), )
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from abc import ABCMeta from collections import defaultdict from logging import FileHandler import torch.nn as nn from mmcv.runner.dist_utils import master_only from mmcv.utils.logging import get_logger, logger_initialized, print_log class BaseModule(nn.Module, metaclass=ABCMeta): """Base module for all modules in openmmlab. ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - ``init_cfg``: the config to control the initialization. - ``init_weights``: The function of parameter initialization and recording initialization information. - ``_params_init_info``: Used to track the parameter initialization information. This attribute only exists during executing the ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, init_cfg=None): """Initialize BaseModule, inherited from `torch.nn.Module`""" # NOTE init_cfg can be defined in different levels, but init_cfg # in low levels has a higher priority. super(BaseModule, self).__init__() # define default value of init_cfg instead of hard code # in init_weights() function self._is_init = False self.init_cfg = copy.deepcopy(init_cfg) # Backward compatibility in derived classes # if pretrained is not None: # warnings.warn('DeprecationWarning: pretrained is a deprecated \ # key, please consider using init_cfg') # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) @property def is_init(self): return self._is_init def init_weights(self): """Initialize the weights.""" is_top_level_module = False # check if it is top-level module if not hasattr(self, '_params_init_info'): # The `_params_init_info` is used to record the initialization # information of the parameters # the key should be the obj:`nn.Parameter` of model and the value # should be a dict containing # - init_info (str): The string that describes the initialization. # - tmp_mean_value (FloatTensor): The mean of the parameter, # which indicates whether the parameter has been modified. # this attribute would be deleted after all parameters # is initialized. self._params_init_info = defaultdict(dict) is_top_level_module = True # Initialize the `_params_init_info`, # When detecting the `tmp_mean_value` of # the corresponding parameter is changed, update related # initialization information for name, param in self.named_parameters(): self._params_init_info[param][ 'init_info'] = f'The value is the same before and ' \ f'after calling `init_weights` ' \ f'of {self.__class__.__name__} ' self._params_init_info[param][ 'tmp_mean_value'] = param.data.mean() # pass `params_init_info` to all submodules # All submodules share the same `params_init_info`, # so it will be updated when parameters are # modified at any level of the model. for sub_module in self.modules(): sub_module._params_init_info = self._params_init_info # Get the initialized logger, if not exist, # create a logger named `mmcv` logger_names = list(logger_initialized.keys()) logger_name = logger_names[0] if logger_names else 'mmcv' from ..cnn import initialize from ..cnn.utils.weight_init import update_init_info module_name = self.__class__.__name__ if not self._is_init: if self.init_cfg: print_log( f'initialize {module_name} with init_cfg {self.init_cfg}', logger=logger_name) initialize(self, self.init_cfg) if isinstance(self.init_cfg, dict): # prevent the parameters of # the pre-trained model # from being overwritten by # the `init_weights` if self.init_cfg['type'] == 'Pretrained': return for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights() # users may overload the `init_weights` update_init_info( m, init_info=f'Initialized by ' f'user-defined `init_weights`' f' in {m.__class__.__name__} ') self._is_init = True else: warnings.warn(f'init_weights of {self.__class__.__name__} has ' f'been called more than once.') if is_top_level_module: self._dump_init_info(logger_name) for sub_module in self.modules(): del sub_module._params_init_info @master_only def _dump_init_info(self, logger_name): """Dump the initialization information to a file named `initialization.log.json` in workdir. Args: logger_name (str): The name of logger. """ logger = get_logger(logger_name) with_file_handler = False # dump the information to the logger file if there is a `FileHandler` for handler in logger.handlers: if isinstance(handler, FileHandler): handler.stream.write( 'Name of parameter - Initialization information\n') for name, param in self.named_parameters(): handler.stream.write( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]["init_info"]} \n") handler.stream.flush() with_file_handler = True if not with_file_handler: for name, param in self.named_parameters(): print_log( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]["init_info"]} \n ", logger=logger_name) def __repr__(self): s = super().__repr__() if self.init_cfg: s += f'\ninit_cfg={self.init_cfg}' return s class Sequential(BaseModule, nn.Sequential): """Sequential module in openmmlab. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, *args, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.Sequential.__init__(self, *args) class ModuleList(BaseModule, nn.ModuleList): """ModuleList in openmmlab. Args: modules (iterable, optional): an iterable of modules to add. init_cfg (dict, optional): Initialization config dict. """ def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleList.__init__(self, modules) class ModuleDict(BaseModule, nn.ModuleDict): """ModuleDict in openmmlab. Args: modules (dict, optional): a mapping (dictionary) of (string: module) or an iterable of key-value pairs of type (string, module). init_cfg (dict, optional): Initialization config dict. """ def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleDict.__init__(self, modules)
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from abc import ABCMeta from collections import defaultdict from logging import FileHandler import torch.nn as nn from mmcv.runner.dist_utils import master_only from mmcv.utils.logging import get_logger, logger_initialized, print_log class BaseModule(nn.Module, metaclass=ABCMeta): """Base module for all modules in openmmlab. ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - ``init_cfg``: the config to control the initialization. - ``init_weights``: The function of parameter initialization and recording initialization information. - ``_params_init_info``: Used to track the parameter initialization information. This attribute only exists during executing the ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, init_cfg=None): """Initialize BaseModule, inherited from `torch.nn.Module`""" # NOTE init_cfg can be defined in different levels, but init_cfg # in low levels has a higher priority. super(BaseModule, self).__init__() # define default value of init_cfg instead of hard code # in init_weights() function self._is_init = False self.init_cfg = copy.deepcopy(init_cfg) # Backward compatibility in derived classes # if pretrained is not None: # warnings.warn('DeprecationWarning: pretrained is a deprecated \ # key, please consider using init_cfg') # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) @property def is_init(self): return self._is_init def init_weights(self): """Initialize the weights.""" is_top_level_module = False # check if it is top-level module if not hasattr(self, '_params_init_info'): # The `_params_init_info` is used to record the initialization # information of the parameters # the key should be the obj:`nn.Parameter` of model and the value # should be a dict containing # - init_info (str): The string that describes the initialization. # - tmp_mean_value (FloatTensor): The mean of the parameter, # which indicates whether the parameter has been modified. # this attribute would be deleted after all parameters # is initialized. self._params_init_info = defaultdict(dict) is_top_level_module = True # Initialize the `_params_init_info`, # When detecting the `tmp_mean_value` of # the corresponding parameter is changed, update related # initialization information for name, param in self.named_parameters(): self._params_init_info[param][ 'init_info'] = f'The value is the same before and ' \ f'after calling `init_weights` ' \ f'of {self.__class__.__name__} ' self._params_init_info[param][ 'tmp_mean_value'] = param.data.mean() # pass `params_init_info` to all submodules # All submodules share the same `params_init_info`, # so it will be updated when parameters are # modified at any level of the model. for sub_module in self.modules(): sub_module._params_init_info = self._params_init_info # Get the initialized logger, if not exist, # create a logger named `mmcv` logger_names = list(logger_initialized.keys()) logger_name = logger_names[0] if logger_names else 'mmcv' from ..cnn import initialize from ..cnn.utils.weight_init import update_init_info module_name = self.__class__.__name__ if not self._is_init: if self.init_cfg: print_log( f'initialize {module_name} with init_cfg {self.init_cfg}', logger=logger_name) initialize(self, self.init_cfg) if isinstance(self.init_cfg, dict): # prevent the parameters of # the pre-trained model # from being overwritten by # the `init_weights` if self.init_cfg['type'] == 'Pretrained': return for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights() # users may overload the `init_weights` update_init_info( m, init_info=f'Initialized by ' f'user-defined `init_weights`' f' in {m.__class__.__name__} ') self._is_init = True else: warnings.warn(f'init_weights of {self.__class__.__name__} has ' f'been called more than once.') if is_top_level_module: self._dump_init_info(logger_name) for sub_module in self.modules(): del sub_module._params_init_info @master_only def _dump_init_info(self, logger_name): """Dump the initialization information to a file named `initialization.log.json` in workdir. Args: logger_name (str): The name of logger. """ logger = get_logger(logger_name) with_file_handler = False # dump the information to the logger file if there is a `FileHandler` for handler in logger.handlers: if isinstance(handler, FileHandler): handler.stream.write( 'Name of parameter - Initialization information\n') for name, param in self.named_parameters(): handler.stream.write( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]['init_info']} \n") handler.stream.flush() with_file_handler = True if not with_file_handler: for name, param in self.named_parameters(): print_log( f'\n{name} - {param.shape}: ' f"\n{self._params_init_info[param]['init_info']} \n ", logger=logger_name) def __repr__(self): s = super().__repr__() if self.init_cfg: s += f'\ninit_cfg={self.init_cfg}' return s class Sequential(BaseModule, nn.Sequential): """Sequential module in openmmlab. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, *args, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.Sequential.__init__(self, *args) class ModuleList(BaseModule, nn.ModuleList): """ModuleList in openmmlab. Args: modules (iterable, optional): an iterable of modules to add. init_cfg (dict, optional): Initialization config dict. """ def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleList.__init__(self, modules) class ModuleDict(BaseModule, nn.ModuleDict): """ModuleDict in openmmlab. Args: modules (dict, optional): a mapping (dictionary) of (string: module) or an iterable of key-value pairs of type (string, module). init_cfg (dict, optional): Initialization config dict. """ def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleDict.__init__(self, modules)
import argparse import glob import importlib import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import gym import stable_baselines3 as sb3 # noqa: F401 import torch as th # noqa: F401 import yaml from sb3_contrib import QRDQN, TQC from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.sb2_compat.rmsprop_tf_like import RMSpropTFLike # noqa: F401 from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv, VecFrameStack, VecNormalize # For custom activation fn from torch import nn as nn # noqa: F401 pylint: disable=unused-import ALGOS = { "a2c": A2C, "ddpg": DDPG, "dqn": DQN, "ppo": PPO, "sac": SAC, "td3": TD3, # SB3 Contrib, "qrdqn": QRDQN, "tqc": TQC, } def flatten_dict_observations(env: gym.Env) -> gym.Env: assert isinstance(env.observation_space, gym.spaces.Dict) try: return gym.wrappers.FlattenObservation(env) except AttributeError: keys = env.observation_space.spaces.keys() return gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys)) def get_wrapper_class(hyperparams: Dict[str, Any]) -> Optional[Callable[[gym.Env], gym.Env]]: """ Get one or more Gym environment wrapper class specified as a hyper parameter "env_wrapper". e.g. env_wrapper: gym_minigrid.wrappers.FlatObsWrapper for multiple, specify a list: env_wrapper: - utils.wrappers.PlotActionWrapper - utils.wrappers.TimeFeatureWrapper :param hyperparams: :return: maybe a callable to wrap the environment with one or multiple gym.Wrapper """ def get_module_name(wrapper_name): return ".".join(wrapper_name.split(".")[:-1]) def get_class_name(wrapper_name): return wrapper_name.split(".")[-1] if "env_wrapper" in hyperparams.keys(): wrapper_name = hyperparams.get("env_wrapper") if wrapper_name is None: return None if not isinstance(wrapper_name, list): wrapper_names = [wrapper_name] else: wrapper_names = wrapper_name wrapper_classes = [] wrapper_kwargs = [] # Handle multiple wrappers for wrapper_name in wrapper_names: # Handle keyword arguments if isinstance(wrapper_name, dict): assert len(wrapper_name) == 1, ( "You have an error in the formatting " f"of your YAML file near {wrapper_name}. " "You should check the indentation." ) wrapper_dict = wrapper_name wrapper_name = list(wrapper_dict.keys())[0] kwargs = wrapper_dict[wrapper_name] else: kwargs = {} wrapper_module = importlib.import_module(get_module_name(wrapper_name)) wrapper_class = getattr(wrapper_module, get_class_name(wrapper_name)) wrapper_classes.append(wrapper_class) wrapper_kwargs.append(kwargs) def wrap_env(env: gym.Env) -> gym.Env: """ :param env: :return: """ for wrapper_class, kwargs in zip(wrapper_classes, wrapper_kwargs): env = wrapper_class(env, **kwargs) return env return wrap_env else: return None def get_callback_list(hyperparams: Dict[str, Any]) -> List[BaseCallback]: """ Get one or more Callback class specified as a hyper-parameter "callback". e.g. callback: stable_baselines3.common.callbacks.CheckpointCallback for multiple, specify a list: callback: - utils.callbacks.PlotActionWrapper - stable_baselines3.common.callbacks.CheckpointCallback :param hyperparams: :return: """ def get_module_name(callback_name): return ".".join(callback_name.split(".")[:-1]) def get_class_name(callback_name): return callback_name.split(".")[-1] callbacks = [] if "callback" in hyperparams.keys(): callback_name = hyperparams.get("callback") if callback_name is None: return callbacks if not isinstance(callback_name, list): callback_names = [callback_name] else: callback_names = callback_name # Handle multiple wrappers for callback_name in callback_names: # Handle keyword arguments if isinstance(callback_name, dict): assert len(callback_name) == 1, ( "You have an error in the formatting " f"of your YAML file near {callback_name}. " "You should check the indentation." ) callback_dict = callback_name callback_name = list(callback_dict.keys())[0] kwargs = callback_dict[callback_name] else: kwargs = {} callback_module = importlib.import_module(get_module_name(callback_name)) callback_class = getattr(callback_module, get_class_name(callback_name)) callbacks.append(callback_class(**kwargs)) return callbacks def create_test_env( env_id: str, n_envs: int = 1, stats_path: Optional[str] = None, seed: int = 0, log_dir: Optional[str] = None, should_render: bool = True, hyperparams: Optional[Dict[str, Any]] = None, env_kwargs: Optional[Dict[str, Any]] = None, ) -> VecEnv: """ Create environment for testing a trained agent :param env_id: :param n_envs: number of processes :param stats_path: path to folder containing saved running averaged :param seed: Seed for random number generator :param log_dir: Where to log rewards :param should_render: For Pybullet env, display the GUI :param hyperparams: Additional hyperparams (ex: n_stack) :param env_kwargs: Optional keyword argument to pass to the env constructor :return: """ # Avoid circular import from utils.exp_manager import ExperimentManager # Create the environment and wrap it if necessary env_wrapper = get_wrapper_class(hyperparams) hyperparams = {} if hyperparams is None else hyperparams if "env_wrapper" in hyperparams.keys(): del hyperparams["env_wrapper"] vec_env_kwargs = {} vec_env_cls = DummyVecEnv if n_envs > 1 or (ExperimentManager.is_bullet(env_id) and should_render) and False: # HACK: force SubprocVecEnv for Bullet env # as Pybullet envs does not follow gym.render() interface vec_env_cls = SubprocVecEnv # start_method = 'spawn' for thread safe env = make_vec_env( env_id, n_envs=n_envs, monitor_dir=log_dir, seed=seed, wrapper_class=env_wrapper, env_kwargs=env_kwargs, vec_env_cls=vec_env_cls, vec_env_kwargs=vec_env_kwargs, ) # Load saved stats for normalizing input and rewards # And optionally stack frames if stats_path is not None: if hyperparams["normalize"]: print("Loading running average") print(f"with params: {hyperparams["normalize_kwargs"]}") path_ = os.path.join(stats_path, "vecnormalize.pkl") if os.path.exists(path_): env = VecNormalize.load(path_, env) # Deactivate training and reward normalization env.training = False env.norm_reward = False else: raise ValueError(f"VecNormalize stats {path_} not found") n_stack = hyperparams.get("frame_stack", 0) if n_stack > 0: print(f"Stacking {n_stack} frames") env = VecFrameStack(env, n_stack) return env def linear_schedule(initial_value: Union[float, str]) -> Callable[[float], float]: """ Linear learning rate schedule. :param initial_value: (float or str) :return: (function) """ if isinstance(initial_value, str): initial_value = float(initial_value) def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0 :param progress_remaining: (float) :return: (float) """ return progress_remaining * initial_value return func def get_trained_models(log_folder: str) -> Dict[str, Tuple[str, str]]: """ :param log_folder: Root log folder :return: Dict representing the trained agents """ trained_models = {} for algo in os.listdir(log_folder): if not os.path.isdir(os.path.join(log_folder, algo)): continue for env_id in os.listdir(os.path.join(log_folder, algo)): # Retrieve env name env_id = env_id.split("_")[0] trained_models[f"{algo}-{env_id}"] = (algo, env_id) return trained_models def get_latest_run_id(log_path: str, env_id: str) -> int: """ Returns the latest run number for the given log name and log path, by finding the greatest number in the directories. :param log_path: path to log folder :param env_id: :return: latest run number """ max_run_id = 0 for path in glob.glob(os.path.join(log_path, env_id + "_[0-9]*")): file_name = os.path.basename(path) ext = file_name.split("_")[-1] if env_id == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id: max_run_id = int(ext) return max_run_id def get_saved_hyperparams( stats_path: str, norm_reward: bool = False, test_mode: bool = False, ) -> Tuple[Dict[str, Any], str]: """ :param stats_path: :param norm_reward: :param test_mode: :return: """ hyperparams = {} if not os.path.isdir(stats_path): stats_path = None else: config_file = os.path.join(stats_path, "config.yml") if os.path.isfile(config_file): # Load saved hyperparameters with open(os.path.join(stats_path, "config.yml"), "r") as f: hyperparams = yaml.load(f, Loader=yaml.UnsafeLoader) # pytype: disable=module-attr hyperparams["normalize"] = hyperparams.get("normalize", False) else: obs_rms_path = os.path.join(stats_path, "obs_rms.pkl") hyperparams["normalize"] = os.path.isfile(obs_rms_path) # Load normalization params if hyperparams["normalize"]: if isinstance(hyperparams["normalize"], str): normalize_kwargs = eval(hyperparams["normalize"]) if test_mode: normalize_kwargs["norm_reward"] = norm_reward else: normalize_kwargs = {"norm_obs": hyperparams["normalize"], "norm_reward": norm_reward} hyperparams["normalize_kwargs"] = normalize_kwargs return hyperparams, stats_path class StoreDict(argparse.Action): """ Custom argparse action for storing dict. In: args1:0.0 args2:"dict(a=1)" Out: {'args1': 0.0, arg2: dict(a=1)} """ def __init__(self, option_strings, dest, nargs=None, **kwargs): self._nargs = nargs super(StoreDict, self).__init__(option_strings, dest, nargs=nargs, **kwargs) def __call__(self, parser, namespace, values, option_string=None): arg_dict = {} for arguments in values: key = arguments.split(":")[0] value = ":".join(arguments.split(":")[1:]) # Evaluate the string as python code arg_dict[key] = eval(value) setattr(namespace, self.dest, arg_dict)
import argparse import glob import importlib import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import gym import stable_baselines3 as sb3 # noqa: F401 import torch as th # noqa: F401 import yaml from sb3_contrib import QRDQN, TQC from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3 from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.sb2_compat.rmsprop_tf_like import RMSpropTFLike # noqa: F401 from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv, VecFrameStack, VecNormalize # For custom activation fn from torch import nn as nn # noqa: F401 pylint: disable=unused-import ALGOS = { "a2c": A2C, "ddpg": DDPG, "dqn": DQN, "ppo": PPO, "sac": SAC, "td3": TD3, # SB3 Contrib, "qrdqn": QRDQN, "tqc": TQC, } def flatten_dict_observations(env: gym.Env) -> gym.Env: assert isinstance(env.observation_space, gym.spaces.Dict) try: return gym.wrappers.FlattenObservation(env) except AttributeError: keys = env.observation_space.spaces.keys() return gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys)) def get_wrapper_class(hyperparams: Dict[str, Any]) -> Optional[Callable[[gym.Env], gym.Env]]: """ Get one or more Gym environment wrapper class specified as a hyper parameter "env_wrapper". e.g. env_wrapper: gym_minigrid.wrappers.FlatObsWrapper for multiple, specify a list: env_wrapper: - utils.wrappers.PlotActionWrapper - utils.wrappers.TimeFeatureWrapper :param hyperparams: :return: maybe a callable to wrap the environment with one or multiple gym.Wrapper """ def get_module_name(wrapper_name): return ".".join(wrapper_name.split(".")[:-1]) def get_class_name(wrapper_name): return wrapper_name.split(".")[-1] if "env_wrapper" in hyperparams.keys(): wrapper_name = hyperparams.get("env_wrapper") if wrapper_name is None: return None if not isinstance(wrapper_name, list): wrapper_names = [wrapper_name] else: wrapper_names = wrapper_name wrapper_classes = [] wrapper_kwargs = [] # Handle multiple wrappers for wrapper_name in wrapper_names: # Handle keyword arguments if isinstance(wrapper_name, dict): assert len(wrapper_name) == 1, ( "You have an error in the formatting " f"of your YAML file near {wrapper_name}. " "You should check the indentation." ) wrapper_dict = wrapper_name wrapper_name = list(wrapper_dict.keys())[0] kwargs = wrapper_dict[wrapper_name] else: kwargs = {} wrapper_module = importlib.import_module(get_module_name(wrapper_name)) wrapper_class = getattr(wrapper_module, get_class_name(wrapper_name)) wrapper_classes.append(wrapper_class) wrapper_kwargs.append(kwargs) def wrap_env(env: gym.Env) -> gym.Env: """ :param env: :return: """ for wrapper_class, kwargs in zip(wrapper_classes, wrapper_kwargs): env = wrapper_class(env, **kwargs) return env return wrap_env else: return None def get_callback_list(hyperparams: Dict[str, Any]) -> List[BaseCallback]: """ Get one or more Callback class specified as a hyper-parameter "callback". e.g. callback: stable_baselines3.common.callbacks.CheckpointCallback for multiple, specify a list: callback: - utils.callbacks.PlotActionWrapper - stable_baselines3.common.callbacks.CheckpointCallback :param hyperparams: :return: """ def get_module_name(callback_name): return ".".join(callback_name.split(".")[:-1]) def get_class_name(callback_name): return callback_name.split(".")[-1] callbacks = [] if "callback" in hyperparams.keys(): callback_name = hyperparams.get("callback") if callback_name is None: return callbacks if not isinstance(callback_name, list): callback_names = [callback_name] else: callback_names = callback_name # Handle multiple wrappers for callback_name in callback_names: # Handle keyword arguments if isinstance(callback_name, dict): assert len(callback_name) == 1, ( "You have an error in the formatting " f"of your YAML file near {callback_name}. " "You should check the indentation." ) callback_dict = callback_name callback_name = list(callback_dict.keys())[0] kwargs = callback_dict[callback_name] else: kwargs = {} callback_module = importlib.import_module(get_module_name(callback_name)) callback_class = getattr(callback_module, get_class_name(callback_name)) callbacks.append(callback_class(**kwargs)) return callbacks def create_test_env( env_id: str, n_envs: int = 1, stats_path: Optional[str] = None, seed: int = 0, log_dir: Optional[str] = None, should_render: bool = True, hyperparams: Optional[Dict[str, Any]] = None, env_kwargs: Optional[Dict[str, Any]] = None, ) -> VecEnv: """ Create environment for testing a trained agent :param env_id: :param n_envs: number of processes :param stats_path: path to folder containing saved running averaged :param seed: Seed for random number generator :param log_dir: Where to log rewards :param should_render: For Pybullet env, display the GUI :param hyperparams: Additional hyperparams (ex: n_stack) :param env_kwargs: Optional keyword argument to pass to the env constructor :return: """ # Avoid circular import from utils.exp_manager import ExperimentManager # Create the environment and wrap it if necessary env_wrapper = get_wrapper_class(hyperparams) hyperparams = {} if hyperparams is None else hyperparams if "env_wrapper" in hyperparams.keys(): del hyperparams["env_wrapper"] vec_env_kwargs = {} vec_env_cls = DummyVecEnv if n_envs > 1 or (ExperimentManager.is_bullet(env_id) and should_render) and False: # HACK: force SubprocVecEnv for Bullet env # as Pybullet envs does not follow gym.render() interface vec_env_cls = SubprocVecEnv # start_method = 'spawn' for thread safe env = make_vec_env( env_id, n_envs=n_envs, monitor_dir=log_dir, seed=seed, wrapper_class=env_wrapper, env_kwargs=env_kwargs, vec_env_cls=vec_env_cls, vec_env_kwargs=vec_env_kwargs, ) # Load saved stats for normalizing input and rewards # And optionally stack frames if stats_path is not None: if hyperparams["normalize"]: print("Loading running average") print(f"with params: {hyperparams['normalize_kwargs']}") path_ = os.path.join(stats_path, "vecnormalize.pkl") if os.path.exists(path_): env = VecNormalize.load(path_, env) # Deactivate training and reward normalization env.training = False env.norm_reward = False else: raise ValueError(f"VecNormalize stats {path_} not found") n_stack = hyperparams.get("frame_stack", 0) if n_stack > 0: print(f"Stacking {n_stack} frames") env = VecFrameStack(env, n_stack) return env def linear_schedule(initial_value: Union[float, str]) -> Callable[[float], float]: """ Linear learning rate schedule. :param initial_value: (float or str) :return: (function) """ if isinstance(initial_value, str): initial_value = float(initial_value) def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0 :param progress_remaining: (float) :return: (float) """ return progress_remaining * initial_value return func def get_trained_models(log_folder: str) -> Dict[str, Tuple[str, str]]: """ :param log_folder: Root log folder :return: Dict representing the trained agents """ trained_models = {} for algo in os.listdir(log_folder): if not os.path.isdir(os.path.join(log_folder, algo)): continue for env_id in os.listdir(os.path.join(log_folder, algo)): # Retrieve env name env_id = env_id.split("_")[0] trained_models[f"{algo}-{env_id}"] = (algo, env_id) return trained_models def get_latest_run_id(log_path: str, env_id: str) -> int: """ Returns the latest run number for the given log name and log path, by finding the greatest number in the directories. :param log_path: path to log folder :param env_id: :return: latest run number """ max_run_id = 0 for path in glob.glob(os.path.join(log_path, env_id + "_[0-9]*")): file_name = os.path.basename(path) ext = file_name.split("_")[-1] if env_id == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id: max_run_id = int(ext) return max_run_id def get_saved_hyperparams( stats_path: str, norm_reward: bool = False, test_mode: bool = False, ) -> Tuple[Dict[str, Any], str]: """ :param stats_path: :param norm_reward: :param test_mode: :return: """ hyperparams = {} if not os.path.isdir(stats_path): stats_path = None else: config_file = os.path.join(stats_path, "config.yml") if os.path.isfile(config_file): # Load saved hyperparameters with open(os.path.join(stats_path, "config.yml"), "r") as f: hyperparams = yaml.load(f, Loader=yaml.UnsafeLoader) # pytype: disable=module-attr hyperparams["normalize"] = hyperparams.get("normalize", False) else: obs_rms_path = os.path.join(stats_path, "obs_rms.pkl") hyperparams["normalize"] = os.path.isfile(obs_rms_path) # Load normalization params if hyperparams["normalize"]: if isinstance(hyperparams["normalize"], str): normalize_kwargs = eval(hyperparams["normalize"]) if test_mode: normalize_kwargs["norm_reward"] = norm_reward else: normalize_kwargs = {"norm_obs": hyperparams["normalize"], "norm_reward": norm_reward} hyperparams["normalize_kwargs"] = normalize_kwargs return hyperparams, stats_path class StoreDict(argparse.Action): """ Custom argparse action for storing dict. In: args1:0.0 args2:"dict(a=1)" Out: {'args1': 0.0, arg2: dict(a=1)} """ def __init__(self, option_strings, dest, nargs=None, **kwargs): self._nargs = nargs super(StoreDict, self).__init__(option_strings, dest, nargs=nargs, **kwargs) def __call__(self, parser, namespace, values, option_string=None): arg_dict = {} for arguments in values: key = arguments.split(":")[0] value = ":".join(arguments.split(":")[1:]) # Evaluate the string as python code arg_dict[key] = eval(value) setattr(namespace, self.dest, arg_dict)
import os import sys import time from collections import namedtuple import pendulum from dagster import check, seven from dagster.core.definitions.run_request import InstigatorType from dagster.core.definitions.sensor_definition import DefaultSensorStatus, SensorExecutionData from dagster.core.errors import DagsterError from dagster.core.host_representation import PipelineSelector from dagster.core.instance import DagsterInstance from dagster.core.scheduler.instigation import ( InstigatorState, InstigatorStatus, SensorInstigatorData, TickData, TickStatus, ) from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus, RunsFilter from dagster.core.storage.tags import RUN_KEY_TAG, check_tags from dagster.core.telemetry import SENSOR_RUN_CREATED, hash_name, log_action from dagster.core.workspace import IWorkspace from dagster.utils import merge_dicts from dagster.utils.error import serializable_error_info_from_exc_info MIN_INTERVAL_LOOP_TIME = 5 FINISHED_TICK_STATES = [TickStatus.SKIPPED, TickStatus.SUCCESS, TickStatus.FAILURE] class DagsterSensorDaemonError(DagsterError): """Error when running the SensorDaemon""" class SkippedSensorRun(namedtuple("SkippedSensorRun", "run_key existing_run")): """Placeholder for runs that are skipped during the run_key idempotence check""" class SensorLaunchContext: def __init__(self, external_sensor, state, tick, instance, logger): self._external_sensor = external_sensor self._instance = instance self._logger = logger self._state = state self._tick = tick self._should_update_cursor_on_failure = False @property def status(self): return self._tick.status @property def logger(self): return self._logger @property def run_count(self): return len(self._tick.run_ids) def update_state(self, status, **kwargs): skip_reason = kwargs.get("skip_reason") cursor = kwargs.get("cursor") origin_run_id = kwargs.get("origin_run_id") if "skip_reason" in kwargs: del kwargs["skip_reason"] if "cursor" in kwargs: del kwargs["cursor"] if "origin_run_id" in kwargs: del kwargs["origin_run_id"] if kwargs: check.inst_param(status, "status", TickStatus) if status: self._tick = self._tick.with_status(status=status, **kwargs) if skip_reason: self._tick = self._tick.with_reason(skip_reason=skip_reason) if cursor: self._tick = self._tick.with_cursor(cursor) if origin_run_id: self._tick = self._tick.with_origin_run(origin_run_id) def add_run(self, run_id, run_key=None): self._tick = self._tick.with_run(run_id, run_key) def set_should_update_cursor_on_failure(self, should_update_cursor_on_failure: bool): self._should_update_cursor_on_failure = should_update_cursor_on_failure def _write(self): self._instance.update_tick(self._tick) if self._tick.status not in FINISHED_TICK_STATES: return should_update_cursor_and_last_run_key = ( self._tick.status != TickStatus.FAILURE ) or self._should_update_cursor_on_failure last_run_key = ( self._state.instigator_data.last_run_key if self._state.instigator_data else None ) if self._tick.run_keys and should_update_cursor_and_last_run_key: last_run_key = self._tick.run_keys[-1] cursor = self._state.instigator_data.cursor if self._state.instigator_data else None if should_update_cursor_and_last_run_key: cursor = self._tick.cursor self._instance.update_instigator_state( self._state.with_data( SensorInstigatorData( last_tick_timestamp=self._tick.timestamp, last_run_key=last_run_key, min_interval=self._external_sensor.min_interval_seconds, cursor=cursor, ) ) ) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): if exception_type and isinstance(exception_value, KeyboardInterrupt): return # Log the error if the failure wasn't an interrupt or the daemon generator stopping if exception_value and not isinstance(exception_value, GeneratorExit): error_data = serializable_error_info_from_exc_info(sys.exc_info()) self.update_state(TickStatus.FAILURE, error=error_data) self._write() self._instance.purge_ticks( self._state.instigator_origin_id, tick_status=TickStatus.SKIPPED, before=pendulum.now("UTC").subtract(days=7).timestamp(), # keep the last 7 days ) def _check_for_debug_crash(debug_crash_flags, key): if not debug_crash_flags: return kill_signal = debug_crash_flags.get(key) if not kill_signal: return os.kill(os.getpid(), kill_signal) time.sleep(10) raise Exception("Process didn't terminate after sending crash signal") RELOAD_WORKSPACE = 60 def execute_sensor_iteration_loop(instance, workspace, logger, until=None): """ Helper function that performs sensor evaluations on a tighter loop, while reusing grpc locations within a given daemon interval. Rather than relying on the daemon machinery to run the iteration loop every 30 seconds, sensors are continuously evaluated, every 5 seconds. We rely on each sensor definition's min_interval to check that sensor evaluations are spaced appropriately. """ workspace_loaded_time = pendulum.now("UTC").timestamp() workspace_iteration = 0 start_time = pendulum.now("UTC").timestamp() while True: start_time = pendulum.now("UTC").timestamp() if until and start_time >= until: # provide a way of organically ending the loop to support test environment break if start_time - workspace_loaded_time > RELOAD_WORKSPACE: workspace.cleanup() workspace_loaded_time = pendulum.now("UTC").timestamp() workspace_iteration = 0 yield from execute_sensor_iteration( instance, logger, workspace, log_verbose_checks=(workspace_iteration == 0) ) loop_duration = pendulum.now("UTC").timestamp() - start_time sleep_time = max(0, MIN_INTERVAL_LOOP_TIME - loop_duration) time.sleep(sleep_time) yield workspace_iteration += 1 def execute_sensor_iteration( instance, logger, workspace, log_verbose_checks=True, debug_crash_flags=None ): check.inst_param(workspace, "workspace", IWorkspace) check.inst_param(instance, "instance", DagsterInstance) workspace_snapshot = { location_entry.origin: location_entry for location_entry in workspace.get_workspace_snapshot().values() } all_sensor_states = { sensor_state.origin.get_id(): sensor_state for sensor_state in instance.all_instigator_state(instigator_type=InstigatorType.SENSOR) } sensors = {} for location_entry in workspace_snapshot.values(): repo_location = location_entry.repository_location if repo_location: for repo in repo_location.get_repositories().values(): for sensor in repo.get_external_sensors(): origin_id = sensor.get_external_origin().get_id() if sensor.get_current_instigator_state( all_sensor_states.get(origin_id) ).is_running: sensors[origin_id] = sensor elif location_entry.load_error and log_verbose_checks: logger.warning( f"Could not load location {location_entry.origin.location_name} to check for sensors due to the following error: {location_entry.load_error}" ) if log_verbose_checks: unloadable_sensor_states = { origin_id: sensor_state for origin_id, sensor_state in all_sensor_states.items() if origin_id not in sensors and sensor_state.status == InstigatorStatus.RUNNING } for sensor_state in unloadable_sensor_states.values(): sensor_name = sensor_state.origin.instigator_name repo_location_origin = ( sensor_state.origin.external_repository_origin.repository_location_origin ) repo_location_name = repo_location_origin.location_name repo_name = sensor_state.origin.external_repository_origin.repository_name if ( repo_location_origin not in workspace_snapshot or not workspace_snapshot[repo_location_origin].repository_location ): logger.warning( f"Sensor {sensor_name} was started from a location " f"{repo_location_name} that can no longer be found in the workspace, or has " "metadata that has changed since the sensor was started. You can turn off " "this sensor in the Dagit UI from the Status tab." ) elif not workspace_snapshot[repo_location_origin].repository_location.has_repository( repo_name ): logger.warning( f"Could not find repository {repo_name} in location {repo_location_name} to " + f"run sensor {sensor_name}. If this repository no longer exists, you can " + "turn off the sensor in the Dagit UI from the Status tab.", ) else: logger.warning( f"Could not find sensor {sensor_name} in repository {repo_name}. If this " "sensor no longer exists, you can turn it off in the Dagit UI from the " "Status tab.", ) if not sensors: if log_verbose_checks: logger.info("Not checking for any runs since no sensors have been started.") yield return now = pendulum.now("UTC") for external_sensor in sensors.values(): sensor_name = external_sensor.name sensor_debug_crash_flags = debug_crash_flags.get(sensor_name) if debug_crash_flags else None error_info = None try: sensor_state = all_sensor_states.get(external_sensor.get_external_origin().get_id()) if not sensor_state: assert external_sensor.default_status == DefaultSensorStatus.RUNNING sensor_state = InstigatorState( external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.AUTOMATICALLY_RUNNING, SensorInstigatorData(min_interval=external_sensor.min_interval_seconds), ) instance.add_instigator_state(sensor_state) elif _is_under_min_interval(sensor_state, external_sensor, now): continue tick = instance.create_tick( TickData( instigator_origin_id=sensor_state.instigator_origin_id, instigator_name=sensor_state.instigator_name, instigator_type=InstigatorType.SENSOR, status=TickStatus.STARTED, timestamp=now.timestamp(), ) ) _check_for_debug_crash(sensor_debug_crash_flags, "TICK_CREATED") with SensorLaunchContext( external_sensor, sensor_state, tick, instance, logger ) as tick_context: _check_for_debug_crash(sensor_debug_crash_flags, "TICK_HELD") yield from _evaluate_sensor( tick_context, instance, workspace, external_sensor, sensor_state, sensor_debug_crash_flags, ) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) logger.error( "Sensor daemon caught an error for sensor {sensor_name} : {error_info}".format( sensor_name=external_sensor.name, error_info=error_info.to_string(), ) ) yield error_info def _evaluate_sensor( context, instance, workspace, external_sensor, state, sensor_debug_crash_flags=None, ): context.logger.info(f"Checking for new runs for sensor: {external_sensor.name}") sensor_origin = external_sensor.get_external_origin() repository_handle = external_sensor.handle.repository_handle repo_location = workspace.get_location( sensor_origin.external_repository_origin.repository_location_origin.location_name ) sensor_runtime_data = repo_location.get_external_sensor_execution_data( instance, repository_handle, external_sensor.name, state.instigator_data.last_tick_timestamp if state.instigator_data else None, state.instigator_data.last_run_key if state.instigator_data else None, state.instigator_data.cursor if state.instigator_data else None, ) yield assert isinstance(sensor_runtime_data, SensorExecutionData) if not sensor_runtime_data.run_requests: if sensor_runtime_data.pipeline_run_reactions: for pipeline_run_reaction in sensor_runtime_data.pipeline_run_reactions: origin_run_id = pipeline_run_reaction.pipeline_run.run_id if pipeline_run_reaction.error: context.logger.error( f"Got a reaction request for run {origin_run_id} but execution errorred: {pipeline_run_reaction.error}" ) context.update_state( TickStatus.FAILURE, cursor=sensor_runtime_data.cursor, error=pipeline_run_reaction.error, ) # Since run status sensors have side effects that we don't want to repeat, # we still want to update the cursor, even though the tick failed context.set_should_update_cursor_on_failure(True) else: # log to the original pipeline run message = ( f'Sensor "{external_sensor.name}" acted on run status ' f"{pipeline_run_reaction.pipeline_run.status.value} of run {origin_run_id}." ) instance.report_engine_event( message=message, pipeline_run=pipeline_run_reaction.pipeline_run ) context.logger.info( f"Completed a reaction request for run {origin_run_id}: {message}" ) context.update_state( TickStatus.SUCCESS, cursor=sensor_runtime_data.cursor, origin_run_id=origin_run_id, ) elif sensor_runtime_data.skip_message: context.logger.info( f"Sensor {external_sensor.name} skipped: {sensor_runtime_data.skip_message}" ) context.update_state( TickStatus.SKIPPED, skip_reason=sensor_runtime_data.skip_message, cursor=sensor_runtime_data.cursor, ) else: context.logger.info(f"No run requests returned for {external_sensor.name}, skipping") context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor) yield return skipped_runs = [] for run_request in sensor_runtime_data.run_requests: target_data = external_sensor.get_target_data(run_request.job_name) pipeline_selector = PipelineSelector( location_name=repo_location.name, repository_name=sensor_origin.external_repository_origin.repository_name, pipeline_name=target_data.pipeline_name, solid_selection=target_data.solid_selection, ) external_pipeline = repo_location.get_external_pipeline(pipeline_selector) run = _get_or_create_sensor_run( context, instance, repo_location, external_sensor, external_pipeline, run_request, target_data, ) if isinstance(run, SkippedSensorRun): skipped_runs.append(run) yield continue _check_for_debug_crash(sensor_debug_crash_flags, "RUN_CREATED") error_info = None try: context.logger.info( "Launching run for {sensor_name}".format(sensor_name=external_sensor.name) ) instance.submit_run(run.run_id, workspace) context.logger.info( "Completed launch of run {run_id} for {sensor_name}".format( run_id=run.run_id, sensor_name=external_sensor.name ) ) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) context.logger.error( f"Run {run.run_id} created successfully but failed to launch: " f"{str(error_info)}" ) yield error_info _check_for_debug_crash(sensor_debug_crash_flags, "RUN_LAUNCHED") context.add_run(run_id=run.run_id, run_key=run_request.run_key) if skipped_runs: run_keys = [skipped.run_key for skipped in skipped_runs] skipped_count = len(skipped_runs) context.logger.info( f"Skipping {skipped_count} {"run" if skipped_count == 1 else "runs"} for sensor " f"{external_sensor.name} already completed with run keys: {seven.json.dumps(run_keys)}" ) if context.run_count: context.update_state(TickStatus.SUCCESS, cursor=sensor_runtime_data.cursor) else: context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor) yield def _is_under_min_interval(state, external_sensor, now): if not state.instigator_data: return False if not state.instigator_data.last_tick_timestamp: return False if not external_sensor.min_interval_seconds: return False elapsed = now.timestamp() - state.instigator_data.last_tick_timestamp return elapsed < external_sensor.min_interval_seconds def _get_or_create_sensor_run( context, instance, repo_location, external_sensor, external_pipeline, run_request, target_data ): if not run_request.run_key: return _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ) existing_runs = instance.get_runs( RunsFilter( tags=merge_dicts( PipelineRun.tags_for_sensor(external_sensor), {RUN_KEY_TAG: run_request.run_key}, ) ) ) if len(existing_runs): run = existing_runs[0] if run.status != PipelineRunStatus.NOT_STARTED: # A run already exists and was launched for this time period, # but the daemon must have crashed before the tick could be put # into a SUCCESS state context.logger.info(f"Skipping run for {run_request.run_key}, found {run.run_id}.") return SkippedSensorRun(run_key=run_request.run_key, existing_run=run) else: context.logger.info( f"Run {run.run_id} already created with the run key " f"`{run_request.run_key}` for {external_sensor.name}" ) return run context.logger.info(f"Creating new run for {external_sensor.name}") return _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ) def _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ): from dagster.daemon.daemon import get_telemetry_daemon_session_id external_execution_plan = repo_location.get_external_execution_plan( external_pipeline, run_request.run_config, target_data.mode, step_keys_to_execute=None, known_state=None, instance=instance, ) execution_plan_snapshot = external_execution_plan.execution_plan_snapshot pipeline_tags = external_pipeline.tags or {} check_tags(pipeline_tags, "pipeline_tags") tags = merge_dicts( merge_dicts(pipeline_tags, run_request.tags), PipelineRun.tags_for_sensor(external_sensor), ) if run_request.run_key: tags[RUN_KEY_TAG] = run_request.run_key log_action( instance, SENSOR_RUN_CREATED, metadata={ "DAEMON_SESSION_ID": get_telemetry_daemon_session_id(), "SENSOR_NAME_HASH": hash_name(external_sensor.name), "pipeline_name_hash": hash_name(external_pipeline.name), "repo_hash": hash_name(repo_location.name), }, ) return instance.create_run( pipeline_name=target_data.pipeline_name, run_id=None, run_config=run_request.run_config, mode=target_data.mode, solids_to_execute=external_pipeline.solids_to_execute, step_keys_to_execute=None, status=PipelineRunStatus.NOT_STARTED, solid_selection=target_data.solid_selection, root_run_id=None, parent_run_id=None, tags=tags, pipeline_snapshot=external_pipeline.pipeline_snapshot, execution_plan_snapshot=execution_plan_snapshot, parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot, external_pipeline_origin=external_pipeline.get_external_origin(), pipeline_code_origin=external_pipeline.get_python_origin(), )
import os import sys import time from collections import namedtuple import pendulum from dagster import check, seven from dagster.core.definitions.run_request import InstigatorType from dagster.core.definitions.sensor_definition import DefaultSensorStatus, SensorExecutionData from dagster.core.errors import DagsterError from dagster.core.host_representation import PipelineSelector from dagster.core.instance import DagsterInstance from dagster.core.scheduler.instigation import ( InstigatorState, InstigatorStatus, SensorInstigatorData, TickData, TickStatus, ) from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus, RunsFilter from dagster.core.storage.tags import RUN_KEY_TAG, check_tags from dagster.core.telemetry import SENSOR_RUN_CREATED, hash_name, log_action from dagster.core.workspace import IWorkspace from dagster.utils import merge_dicts from dagster.utils.error import serializable_error_info_from_exc_info MIN_INTERVAL_LOOP_TIME = 5 FINISHED_TICK_STATES = [TickStatus.SKIPPED, TickStatus.SUCCESS, TickStatus.FAILURE] class DagsterSensorDaemonError(DagsterError): """Error when running the SensorDaemon""" class SkippedSensorRun(namedtuple("SkippedSensorRun", "run_key existing_run")): """Placeholder for runs that are skipped during the run_key idempotence check""" class SensorLaunchContext: def __init__(self, external_sensor, state, tick, instance, logger): self._external_sensor = external_sensor self._instance = instance self._logger = logger self._state = state self._tick = tick self._should_update_cursor_on_failure = False @property def status(self): return self._tick.status @property def logger(self): return self._logger @property def run_count(self): return len(self._tick.run_ids) def update_state(self, status, **kwargs): skip_reason = kwargs.get("skip_reason") cursor = kwargs.get("cursor") origin_run_id = kwargs.get("origin_run_id") if "skip_reason" in kwargs: del kwargs["skip_reason"] if "cursor" in kwargs: del kwargs["cursor"] if "origin_run_id" in kwargs: del kwargs["origin_run_id"] if kwargs: check.inst_param(status, "status", TickStatus) if status: self._tick = self._tick.with_status(status=status, **kwargs) if skip_reason: self._tick = self._tick.with_reason(skip_reason=skip_reason) if cursor: self._tick = self._tick.with_cursor(cursor) if origin_run_id: self._tick = self._tick.with_origin_run(origin_run_id) def add_run(self, run_id, run_key=None): self._tick = self._tick.with_run(run_id, run_key) def set_should_update_cursor_on_failure(self, should_update_cursor_on_failure: bool): self._should_update_cursor_on_failure = should_update_cursor_on_failure def _write(self): self._instance.update_tick(self._tick) if self._tick.status not in FINISHED_TICK_STATES: return should_update_cursor_and_last_run_key = ( self._tick.status != TickStatus.FAILURE ) or self._should_update_cursor_on_failure last_run_key = ( self._state.instigator_data.last_run_key if self._state.instigator_data else None ) if self._tick.run_keys and should_update_cursor_and_last_run_key: last_run_key = self._tick.run_keys[-1] cursor = self._state.instigator_data.cursor if self._state.instigator_data else None if should_update_cursor_and_last_run_key: cursor = self._tick.cursor self._instance.update_instigator_state( self._state.with_data( SensorInstigatorData( last_tick_timestamp=self._tick.timestamp, last_run_key=last_run_key, min_interval=self._external_sensor.min_interval_seconds, cursor=cursor, ) ) ) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): if exception_type and isinstance(exception_value, KeyboardInterrupt): return # Log the error if the failure wasn't an interrupt or the daemon generator stopping if exception_value and not isinstance(exception_value, GeneratorExit): error_data = serializable_error_info_from_exc_info(sys.exc_info()) self.update_state(TickStatus.FAILURE, error=error_data) self._write() self._instance.purge_ticks( self._state.instigator_origin_id, tick_status=TickStatus.SKIPPED, before=pendulum.now("UTC").subtract(days=7).timestamp(), # keep the last 7 days ) def _check_for_debug_crash(debug_crash_flags, key): if not debug_crash_flags: return kill_signal = debug_crash_flags.get(key) if not kill_signal: return os.kill(os.getpid(), kill_signal) time.sleep(10) raise Exception("Process didn't terminate after sending crash signal") RELOAD_WORKSPACE = 60 def execute_sensor_iteration_loop(instance, workspace, logger, until=None): """ Helper function that performs sensor evaluations on a tighter loop, while reusing grpc locations within a given daemon interval. Rather than relying on the daemon machinery to run the iteration loop every 30 seconds, sensors are continuously evaluated, every 5 seconds. We rely on each sensor definition's min_interval to check that sensor evaluations are spaced appropriately. """ workspace_loaded_time = pendulum.now("UTC").timestamp() workspace_iteration = 0 start_time = pendulum.now("UTC").timestamp() while True: start_time = pendulum.now("UTC").timestamp() if until and start_time >= until: # provide a way of organically ending the loop to support test environment break if start_time - workspace_loaded_time > RELOAD_WORKSPACE: workspace.cleanup() workspace_loaded_time = pendulum.now("UTC").timestamp() workspace_iteration = 0 yield from execute_sensor_iteration( instance, logger, workspace, log_verbose_checks=(workspace_iteration == 0) ) loop_duration = pendulum.now("UTC").timestamp() - start_time sleep_time = max(0, MIN_INTERVAL_LOOP_TIME - loop_duration) time.sleep(sleep_time) yield workspace_iteration += 1 def execute_sensor_iteration( instance, logger, workspace, log_verbose_checks=True, debug_crash_flags=None ): check.inst_param(workspace, "workspace", IWorkspace) check.inst_param(instance, "instance", DagsterInstance) workspace_snapshot = { location_entry.origin: location_entry for location_entry in workspace.get_workspace_snapshot().values() } all_sensor_states = { sensor_state.origin.get_id(): sensor_state for sensor_state in instance.all_instigator_state(instigator_type=InstigatorType.SENSOR) } sensors = {} for location_entry in workspace_snapshot.values(): repo_location = location_entry.repository_location if repo_location: for repo in repo_location.get_repositories().values(): for sensor in repo.get_external_sensors(): origin_id = sensor.get_external_origin().get_id() if sensor.get_current_instigator_state( all_sensor_states.get(origin_id) ).is_running: sensors[origin_id] = sensor elif location_entry.load_error and log_verbose_checks: logger.warning( f"Could not load location {location_entry.origin.location_name} to check for sensors due to the following error: {location_entry.load_error}" ) if log_verbose_checks: unloadable_sensor_states = { origin_id: sensor_state for origin_id, sensor_state in all_sensor_states.items() if origin_id not in sensors and sensor_state.status == InstigatorStatus.RUNNING } for sensor_state in unloadable_sensor_states.values(): sensor_name = sensor_state.origin.instigator_name repo_location_origin = ( sensor_state.origin.external_repository_origin.repository_location_origin ) repo_location_name = repo_location_origin.location_name repo_name = sensor_state.origin.external_repository_origin.repository_name if ( repo_location_origin not in workspace_snapshot or not workspace_snapshot[repo_location_origin].repository_location ): logger.warning( f"Sensor {sensor_name} was started from a location " f"{repo_location_name} that can no longer be found in the workspace, or has " "metadata that has changed since the sensor was started. You can turn off " "this sensor in the Dagit UI from the Status tab." ) elif not workspace_snapshot[repo_location_origin].repository_location.has_repository( repo_name ): logger.warning( f"Could not find repository {repo_name} in location {repo_location_name} to " + f"run sensor {sensor_name}. If this repository no longer exists, you can " + "turn off the sensor in the Dagit UI from the Status tab.", ) else: logger.warning( f"Could not find sensor {sensor_name} in repository {repo_name}. If this " "sensor no longer exists, you can turn it off in the Dagit UI from the " "Status tab.", ) if not sensors: if log_verbose_checks: logger.info("Not checking for any runs since no sensors have been started.") yield return now = pendulum.now("UTC") for external_sensor in sensors.values(): sensor_name = external_sensor.name sensor_debug_crash_flags = debug_crash_flags.get(sensor_name) if debug_crash_flags else None error_info = None try: sensor_state = all_sensor_states.get(external_sensor.get_external_origin().get_id()) if not sensor_state: assert external_sensor.default_status == DefaultSensorStatus.RUNNING sensor_state = InstigatorState( external_sensor.get_external_origin(), InstigatorType.SENSOR, InstigatorStatus.AUTOMATICALLY_RUNNING, SensorInstigatorData(min_interval=external_sensor.min_interval_seconds), ) instance.add_instigator_state(sensor_state) elif _is_under_min_interval(sensor_state, external_sensor, now): continue tick = instance.create_tick( TickData( instigator_origin_id=sensor_state.instigator_origin_id, instigator_name=sensor_state.instigator_name, instigator_type=InstigatorType.SENSOR, status=TickStatus.STARTED, timestamp=now.timestamp(), ) ) _check_for_debug_crash(sensor_debug_crash_flags, "TICK_CREATED") with SensorLaunchContext( external_sensor, sensor_state, tick, instance, logger ) as tick_context: _check_for_debug_crash(sensor_debug_crash_flags, "TICK_HELD") yield from _evaluate_sensor( tick_context, instance, workspace, external_sensor, sensor_state, sensor_debug_crash_flags, ) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) logger.error( "Sensor daemon caught an error for sensor {sensor_name} : {error_info}".format( sensor_name=external_sensor.name, error_info=error_info.to_string(), ) ) yield error_info def _evaluate_sensor( context, instance, workspace, external_sensor, state, sensor_debug_crash_flags=None, ): context.logger.info(f"Checking for new runs for sensor: {external_sensor.name}") sensor_origin = external_sensor.get_external_origin() repository_handle = external_sensor.handle.repository_handle repo_location = workspace.get_location( sensor_origin.external_repository_origin.repository_location_origin.location_name ) sensor_runtime_data = repo_location.get_external_sensor_execution_data( instance, repository_handle, external_sensor.name, state.instigator_data.last_tick_timestamp if state.instigator_data else None, state.instigator_data.last_run_key if state.instigator_data else None, state.instigator_data.cursor if state.instigator_data else None, ) yield assert isinstance(sensor_runtime_data, SensorExecutionData) if not sensor_runtime_data.run_requests: if sensor_runtime_data.pipeline_run_reactions: for pipeline_run_reaction in sensor_runtime_data.pipeline_run_reactions: origin_run_id = pipeline_run_reaction.pipeline_run.run_id if pipeline_run_reaction.error: context.logger.error( f"Got a reaction request for run {origin_run_id} but execution errorred: {pipeline_run_reaction.error}" ) context.update_state( TickStatus.FAILURE, cursor=sensor_runtime_data.cursor, error=pipeline_run_reaction.error, ) # Since run status sensors have side effects that we don't want to repeat, # we still want to update the cursor, even though the tick failed context.set_should_update_cursor_on_failure(True) else: # log to the original pipeline run message = ( f'Sensor "{external_sensor.name}" acted on run status ' f"{pipeline_run_reaction.pipeline_run.status.value} of run {origin_run_id}." ) instance.report_engine_event( message=message, pipeline_run=pipeline_run_reaction.pipeline_run ) context.logger.info( f"Completed a reaction request for run {origin_run_id}: {message}" ) context.update_state( TickStatus.SUCCESS, cursor=sensor_runtime_data.cursor, origin_run_id=origin_run_id, ) elif sensor_runtime_data.skip_message: context.logger.info( f"Sensor {external_sensor.name} skipped: {sensor_runtime_data.skip_message}" ) context.update_state( TickStatus.SKIPPED, skip_reason=sensor_runtime_data.skip_message, cursor=sensor_runtime_data.cursor, ) else: context.logger.info(f"No run requests returned for {external_sensor.name}, skipping") context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor) yield return skipped_runs = [] for run_request in sensor_runtime_data.run_requests: target_data = external_sensor.get_target_data(run_request.job_name) pipeline_selector = PipelineSelector( location_name=repo_location.name, repository_name=sensor_origin.external_repository_origin.repository_name, pipeline_name=target_data.pipeline_name, solid_selection=target_data.solid_selection, ) external_pipeline = repo_location.get_external_pipeline(pipeline_selector) run = _get_or_create_sensor_run( context, instance, repo_location, external_sensor, external_pipeline, run_request, target_data, ) if isinstance(run, SkippedSensorRun): skipped_runs.append(run) yield continue _check_for_debug_crash(sensor_debug_crash_flags, "RUN_CREATED") error_info = None try: context.logger.info( "Launching run for {sensor_name}".format(sensor_name=external_sensor.name) ) instance.submit_run(run.run_id, workspace) context.logger.info( "Completed launch of run {run_id} for {sensor_name}".format( run_id=run.run_id, sensor_name=external_sensor.name ) ) except Exception: error_info = serializable_error_info_from_exc_info(sys.exc_info()) context.logger.error( f"Run {run.run_id} created successfully but failed to launch: " f"{str(error_info)}" ) yield error_info _check_for_debug_crash(sensor_debug_crash_flags, "RUN_LAUNCHED") context.add_run(run_id=run.run_id, run_key=run_request.run_key) if skipped_runs: run_keys = [skipped.run_key for skipped in skipped_runs] skipped_count = len(skipped_runs) context.logger.info( f"Skipping {skipped_count} {'run' if skipped_count == 1 else 'runs'} for sensor " f"{external_sensor.name} already completed with run keys: {seven.json.dumps(run_keys)}" ) if context.run_count: context.update_state(TickStatus.SUCCESS, cursor=sensor_runtime_data.cursor) else: context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor) yield def _is_under_min_interval(state, external_sensor, now): if not state.instigator_data: return False if not state.instigator_data.last_tick_timestamp: return False if not external_sensor.min_interval_seconds: return False elapsed = now.timestamp() - state.instigator_data.last_tick_timestamp return elapsed < external_sensor.min_interval_seconds def _get_or_create_sensor_run( context, instance, repo_location, external_sensor, external_pipeline, run_request, target_data ): if not run_request.run_key: return _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ) existing_runs = instance.get_runs( RunsFilter( tags=merge_dicts( PipelineRun.tags_for_sensor(external_sensor), {RUN_KEY_TAG: run_request.run_key}, ) ) ) if len(existing_runs): run = existing_runs[0] if run.status != PipelineRunStatus.NOT_STARTED: # A run already exists and was launched for this time period, # but the daemon must have crashed before the tick could be put # into a SUCCESS state context.logger.info(f"Skipping run for {run_request.run_key}, found {run.run_id}.") return SkippedSensorRun(run_key=run_request.run_key, existing_run=run) else: context.logger.info( f"Run {run.run_id} already created with the run key " f"`{run_request.run_key}` for {external_sensor.name}" ) return run context.logger.info(f"Creating new run for {external_sensor.name}") return _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ) def _create_sensor_run( instance, repo_location, external_sensor, external_pipeline, run_request, target_data ): from dagster.daemon.daemon import get_telemetry_daemon_session_id external_execution_plan = repo_location.get_external_execution_plan( external_pipeline, run_request.run_config, target_data.mode, step_keys_to_execute=None, known_state=None, instance=instance, ) execution_plan_snapshot = external_execution_plan.execution_plan_snapshot pipeline_tags = external_pipeline.tags or {} check_tags(pipeline_tags, "pipeline_tags") tags = merge_dicts( merge_dicts(pipeline_tags, run_request.tags), PipelineRun.tags_for_sensor(external_sensor), ) if run_request.run_key: tags[RUN_KEY_TAG] = run_request.run_key log_action( instance, SENSOR_RUN_CREATED, metadata={ "DAEMON_SESSION_ID": get_telemetry_daemon_session_id(), "SENSOR_NAME_HASH": hash_name(external_sensor.name), "pipeline_name_hash": hash_name(external_pipeline.name), "repo_hash": hash_name(repo_location.name), }, ) return instance.create_run( pipeline_name=target_data.pipeline_name, run_id=None, run_config=run_request.run_config, mode=target_data.mode, solids_to_execute=external_pipeline.solids_to_execute, step_keys_to_execute=None, status=PipelineRunStatus.NOT_STARTED, solid_selection=target_data.solid_selection, root_run_id=None, parent_run_id=None, tags=tags, pipeline_snapshot=external_pipeline.pipeline_snapshot, execution_plan_snapshot=execution_plan_snapshot, parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot, external_pipeline_origin=external_pipeline.get_external_origin(), pipeline_code_origin=external_pipeline.get_python_origin(), )
import argparse import asyncio import copy import json from argparse import Namespace from contextlib import ExitStack from typing import Optional, Sequence, Dict import os from .... import __default_host__, __resources_path__ from ....enums import replace_enum_to_str from ....importer import ImportExtensions from ....jaml.helper import complete_path from ....logging.logger import JinaLogger class DaemonClient: """ Jina Daemon client. :param host: the host address of ``jinad`` instance :param port: the port number of ``jinad`` instance :param logger: Jinalogger to log information. :param timeout: stop waiting for a response after a given number of seconds with the timeout parameter. """ kind = 'pea' # select from pea/pod, TODO: enum def __init__( self, host: str, port: int, logger: 'JinaLogger' = None, timeout: int = 5, **kwargs, ): self.logger = logger or JinaLogger(host) self.timeout = timeout # for now it is http. but it can be https or unix socket or fd # TODO: for https, the jinad server would need a tls certificate. # no changes would be required in terms of how the api gets invoked, # as requests does ssl verfication. we'd need to add some exception handling logic though base_url = f'{host}:{port}' rest_url = f'http://{base_url}' self.alive_api = f'{rest_url}/' self.upload_api = f'{rest_url}/workspaces' self.upload_api_arg = 'files' # this is defined in Daemon API upload interface if self.kind == 'pea': self.store_api = f'{rest_url}/peas' elif self.kind == 'pod': self.store_api = f'{rest_url}/pods' else: raise ValueError(f'{self.kind} is not supported') self.logstream_api = f'ws://{base_url}/logstream' @property def is_alive(self) -> bool: """ Return True if ``jinad`` is alive at remote :return: True if ``jinad`` is alive at remote else false """ with ImportExtensions(required=True): import requests try: r = requests.get(url=self.alive_api, timeout=self.timeout) return r.status_code == requests.codes.ok except requests.exceptions.RequestException as ex: self.logger.error(f'remote manager is not alive: {ex!r}') return False def get_status(self, identity: str) -> Dict: """Get status of the remote Pea / Pod :param identity: UUID string based identity for the Pea :raises: requests.exceptions.RequestException :return: json response of the remote Pea / Pod status :rtype: Dict """ with ImportExtensions(required=True): import requests try: r = requests.get(url=f'{self.store_api}/{identity}', timeout=self.timeout) rj = r.json() if r.status_code == 200: return rj raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'can\'t get status of {self.kind}: {ex!r}') def upload( self, dependencies: Sequence[str], workspace_id: Optional[str] = None ) -> str: """Upload local file dependencies to remote server by extracting from the pea_args :param dependencies: file dependencies :type dependencies: Sequence[str] :param workspace_id: Workspace to which the files will get uploaded, defaults to None :type workspace_id: str :raises: requests.exceptions.RequestException :return: json response for upload :rtype: str """ import requests with ExitStack() as file_stack: files = [ ( self.upload_api_arg, file_stack.enter_context(open(complete_path(f), 'rb')), ) for f in dependencies ] if files: try: self.logger.info(f'uploading {len(files)} file(s): {dependencies}') r = requests.post( url=self.upload_api, files=files, data={'workspace_id': workspace_id} if workspace_id else None, timeout=self.timeout, ) rj = r.json() if r.status_code == 201: return rj else: raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'fail to upload as {ex!r}') def create(self, args: 'Namespace') -> Optional[str]: """Create a remote Pea / Pod :param args: the arguments for remote Pea :type args: Namespace :raises: requests.exceptions.RequestException :return: the identity of the spawned Pea / Pod :rtype: Optional[str] """ with ImportExtensions(required=True): import requests try: payload = replace_enum_to_str(vars(self._mask_args(args))) # set timeout to None if args.timeout_ready is -1 (wait forever) r = requests.post( url=self.store_api, json=payload, timeout=args.timeout_ready if args.timeout_ready != -1 else None, ) rj = r.json() if r.status_code == 201: return rj elif r.status_code == 400: # known internal error rj_body = '\n'.join(j for j in rj['body']) self.logger.error(f'{rj['detail']}\n{rj_body}') elif r.status_code == 422: self.logger.error( 'your payload is not correct, please follow the error message and double check' ) raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'fail to create as {ex!r}') async def logstream(self, workspace_id: str, log_id: str): """Websocket log stream from remote Pea / Pod :param workspace_id: the identity of the workspace :type workspace_id: str :param log_id: the identity of that Pea / Pod :type log_id: str """ with ImportExtensions(required=True): import websockets remote_log_config = os.path.join(__resources_path__, 'logging.remote.yml') all_remote_loggers = {} try: async with websockets.connect( f'{self.logstream_api}/{workspace_id}/{log_id}' ) as websocket: async for log_line in websocket: try: ll = json.loads(log_line) name = ll['name'] if name not in all_remote_loggers: all_remote_loggers[name] = JinaLogger( context=ll['host'], log_config=remote_log_config ) all_remote_loggers[name].info( '{host} {name} {type} {message}'.format_map(ll) ) except json.decoder.JSONDecodeError: continue except websockets.exceptions.ConnectionClosedOK: self.logger.warning(f'log streaming is disconnected') except websockets.exceptions.WebSocketException as e: self.logger.error( f'log streaming is disabled, you won\'t see logs on the remote\n Reason: {e!r}' ) except asyncio.CancelledError: self.logger.info(f'log streaming is cancelled') finally: for l in all_remote_loggers.values(): l.close() def delete(self, remote_id: str, **kwargs) -> bool: """ Delete a remote pea/pod :param remote_id: the identity of that pea/pod :param kwargs: keyword arguments :return: True if the deletion is successful """ with ImportExtensions(required=True): import requests try: url = f'{self.store_api}/{remote_id}' r = requests.delete(url=url, timeout=self.timeout) return r.status_code == 200 except requests.exceptions.RequestException as ex: self.logger.error(f'fail to delete {remote_id} as {ex!r}') return False def _mask_args(self, args: 'argparse.Namespace'): _args = copy.deepcopy(args) # reset the runtime to ZEDRuntime or ContainerRuntime if _args.runtime_cls == 'JinadRuntime': if _args.uses.startswith('docker://'): _args.runtime_cls = 'ContainerRuntime' else: _args.runtime_cls = 'ZEDRuntime' # reset the host default host # TODO:/NOTE this prevents jumping from remote to another remote (Han: 2021.1.17) _args.host = __default_host__ _args.log_config = '' # do not use local log_config _args.upload_files = [] # reset upload files _args.noblock_on_start = False # wait until start success changes = [] for k, v in vars(_args).items(): if v != getattr(args, k): changes.append(f'{k:>30s}: {str(getattr(args, k)):30s} -> {str(v):30s}') if changes: changes = [ 'note the following arguments have been masked or altered for remote purpose:' ] + changes self.logger.warning('\n'.join(changes)) return _args class PeaDaemonClient(DaemonClient): """Pea API, we might have different endpoints for peas & pods later""" kind = 'pea' class PodDaemonClient(DaemonClient): """Pod API, we might have different endpoints for peas & pods later""" kind = 'pod'
import argparse import asyncio import copy import json from argparse import Namespace from contextlib import ExitStack from typing import Optional, Sequence, Dict import os from .... import __default_host__, __resources_path__ from ....enums import replace_enum_to_str from ....importer import ImportExtensions from ....jaml.helper import complete_path from ....logging.logger import JinaLogger class DaemonClient: """ Jina Daemon client. :param host: the host address of ``jinad`` instance :param port: the port number of ``jinad`` instance :param logger: Jinalogger to log information. :param timeout: stop waiting for a response after a given number of seconds with the timeout parameter. """ kind = 'pea' # select from pea/pod, TODO: enum def __init__( self, host: str, port: int, logger: 'JinaLogger' = None, timeout: int = 5, **kwargs, ): self.logger = logger or JinaLogger(host) self.timeout = timeout # for now it is http. but it can be https or unix socket or fd # TODO: for https, the jinad server would need a tls certificate. # no changes would be required in terms of how the api gets invoked, # as requests does ssl verfication. we'd need to add some exception handling logic though base_url = f'{host}:{port}' rest_url = f'http://{base_url}' self.alive_api = f'{rest_url}/' self.upload_api = f'{rest_url}/workspaces' self.upload_api_arg = 'files' # this is defined in Daemon API upload interface if self.kind == 'pea': self.store_api = f'{rest_url}/peas' elif self.kind == 'pod': self.store_api = f'{rest_url}/pods' else: raise ValueError(f'{self.kind} is not supported') self.logstream_api = f'ws://{base_url}/logstream' @property def is_alive(self) -> bool: """ Return True if ``jinad`` is alive at remote :return: True if ``jinad`` is alive at remote else false """ with ImportExtensions(required=True): import requests try: r = requests.get(url=self.alive_api, timeout=self.timeout) return r.status_code == requests.codes.ok except requests.exceptions.RequestException as ex: self.logger.error(f'remote manager is not alive: {ex!r}') return False def get_status(self, identity: str) -> Dict: """Get status of the remote Pea / Pod :param identity: UUID string based identity for the Pea :raises: requests.exceptions.RequestException :return: json response of the remote Pea / Pod status :rtype: Dict """ with ImportExtensions(required=True): import requests try: r = requests.get(url=f'{self.store_api}/{identity}', timeout=self.timeout) rj = r.json() if r.status_code == 200: return rj raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'can\'t get status of {self.kind}: {ex!r}') def upload( self, dependencies: Sequence[str], workspace_id: Optional[str] = None ) -> str: """Upload local file dependencies to remote server by extracting from the pea_args :param dependencies: file dependencies :type dependencies: Sequence[str] :param workspace_id: Workspace to which the files will get uploaded, defaults to None :type workspace_id: str :raises: requests.exceptions.RequestException :return: json response for upload :rtype: str """ import requests with ExitStack() as file_stack: files = [ ( self.upload_api_arg, file_stack.enter_context(open(complete_path(f), 'rb')), ) for f in dependencies ] if files: try: self.logger.info(f'uploading {len(files)} file(s): {dependencies}') r = requests.post( url=self.upload_api, files=files, data={'workspace_id': workspace_id} if workspace_id else None, timeout=self.timeout, ) rj = r.json() if r.status_code == 201: return rj else: raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'fail to upload as {ex!r}') def create(self, args: 'Namespace') -> Optional[str]: """Create a remote Pea / Pod :param args: the arguments for remote Pea :type args: Namespace :raises: requests.exceptions.RequestException :return: the identity of the spawned Pea / Pod :rtype: Optional[str] """ with ImportExtensions(required=True): import requests try: payload = replace_enum_to_str(vars(self._mask_args(args))) # set timeout to None if args.timeout_ready is -1 (wait forever) r = requests.post( url=self.store_api, json=payload, timeout=args.timeout_ready if args.timeout_ready != -1 else None, ) rj = r.json() if r.status_code == 201: return rj elif r.status_code == 400: # known internal error rj_body = '\n'.join(j for j in rj['body']) self.logger.error(f'{rj["detail"]}\n{rj_body}') elif r.status_code == 422: self.logger.error( 'your payload is not correct, please follow the error message and double check' ) raise requests.exceptions.RequestException(rj) except requests.exceptions.RequestException as ex: self.logger.error(f'fail to create as {ex!r}') async def logstream(self, workspace_id: str, log_id: str): """Websocket log stream from remote Pea / Pod :param workspace_id: the identity of the workspace :type workspace_id: str :param log_id: the identity of that Pea / Pod :type log_id: str """ with ImportExtensions(required=True): import websockets remote_log_config = os.path.join(__resources_path__, 'logging.remote.yml') all_remote_loggers = {} try: async with websockets.connect( f'{self.logstream_api}/{workspace_id}/{log_id}' ) as websocket: async for log_line in websocket: try: ll = json.loads(log_line) name = ll['name'] if name not in all_remote_loggers: all_remote_loggers[name] = JinaLogger( context=ll['host'], log_config=remote_log_config ) all_remote_loggers[name].info( '{host} {name} {type} {message}'.format_map(ll) ) except json.decoder.JSONDecodeError: continue except websockets.exceptions.ConnectionClosedOK: self.logger.warning(f'log streaming is disconnected') except websockets.exceptions.WebSocketException as e: self.logger.error( f'log streaming is disabled, you won\'t see logs on the remote\n Reason: {e!r}' ) except asyncio.CancelledError: self.logger.info(f'log streaming is cancelled') finally: for l in all_remote_loggers.values(): l.close() def delete(self, remote_id: str, **kwargs) -> bool: """ Delete a remote pea/pod :param remote_id: the identity of that pea/pod :param kwargs: keyword arguments :return: True if the deletion is successful """ with ImportExtensions(required=True): import requests try: url = f'{self.store_api}/{remote_id}' r = requests.delete(url=url, timeout=self.timeout) return r.status_code == 200 except requests.exceptions.RequestException as ex: self.logger.error(f'fail to delete {remote_id} as {ex!r}') return False def _mask_args(self, args: 'argparse.Namespace'): _args = copy.deepcopy(args) # reset the runtime to ZEDRuntime or ContainerRuntime if _args.runtime_cls == 'JinadRuntime': if _args.uses.startswith('docker://'): _args.runtime_cls = 'ContainerRuntime' else: _args.runtime_cls = 'ZEDRuntime' # reset the host default host # TODO:/NOTE this prevents jumping from remote to another remote (Han: 2021.1.17) _args.host = __default_host__ _args.log_config = '' # do not use local log_config _args.upload_files = [] # reset upload files _args.noblock_on_start = False # wait until start success changes = [] for k, v in vars(_args).items(): if v != getattr(args, k): changes.append(f'{k:>30s}: {str(getattr(args, k)):30s} -> {str(v):30s}') if changes: changes = [ 'note the following arguments have been masked or altered for remote purpose:' ] + changes self.logger.warning('\n'.join(changes)) return _args class PeaDaemonClient(DaemonClient): """Pea API, we might have different endpoints for peas & pods later""" kind = 'pea' class PodDaemonClient(DaemonClient): """Pod API, we might have different endpoints for peas & pods later""" kind = 'pod'
import datetime import re import sys import time import types import yaml from botocore.exceptions import ClientError from ..cluster.object import AWSCluster from ..eks.service import DEFAULT_KUBERNETES_VERSION, SUPPORTED_KUBERNETES_VERSIONS from ..exceptions import AwsClusterSharePermissionError, AwsPermissionsError, ClusterDestroyError, OrchestrateException from ..node_groups import ALL_NODE_GROUP_TYPES, NODE_GROUP_TYPE_CPU, NODE_GROUP_TYPE_GPU, NODE_GROUP_TYPE_SYSTEM from ..paths import get_executable_path from ..provider.constants import Provider from ..provider.interface import ProviderInterface def is_cuda_gpu_instance_type(instance_type): prefix, _ = instance_type.split('.', 1) return prefix in ('p4d', 'p3', 'p3dn', 'p2', 'g4dn', 'g3') def catch_aws_permissions_errors(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ClientError as e: code = e.response['Error']['Code'] http_status_code = e.response['ResponseMetadata']['HTTPStatusCode'] if http_status_code == 403 or code in ('AccessDeniedException', 'UnauthorizedOperation'): raise AwsPermissionsError(e) from e raise return wrapper def make_role_config_map(node_instance_role_arn, cluster_access_role_arn, cluster_access_role_name): map_roles = [ { "rolearn": node_instance_role_arn, "username": "system:node:{{EC2PrivateDNSName}}", "groups": ["system:bootstrappers", "system:nodes"], }, { "rolearn": cluster_access_role_arn, "username": cluster_access_role_name, "groups": ["system:masters"], }, ] return { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "aws-auth", "namespace": "kube-system", }, "data": { "mapRoles": yaml.dump(map_roles), }, } class AwsService(ProviderInterface): def __init__(self, services, aws_services): super().__init__(services) self.aws_services = aws_services def __getattribute__(self, name): attr = super().__getattribute__(name) if isinstance(attr, types.MethodType): attr = catch_aws_permissions_errors(attr) return attr def describe_kubernetes_cluster(self, cluster_name): try: return self.aws_services.eks_service.describe_cluster(cluster_name=cluster_name)['cluster'] except self.aws_services.eks_service.client.exceptions.ResourceNotFoundException as e: raise OrchestrateException( f"We cannot find an EKS cluster named '{cluster_name}' using your current AWS credentials." " Did someone delete this cluster?" ) from e def validate_cluster_options(self, cluster_name, node_groups_config, kubernetes_version): if kubernetes_version: assert kubernetes_version in SUPPORTED_KUBERNETES_VERSIONS, ( 'Unsupported kubernetes version for EKS:' f' {kubernetes_version}. Must be one of: {SUPPORTED_KUBERNETES_VERSIONS}' ) cpu_nodes_config = node_groups_config.get(NODE_GROUP_TYPE_CPU) gpu_nodes_config = node_groups_config.get(NODE_GROUP_TYPE_GPU) assert cpu_nodes_config or gpu_nodes_config, "Looks like your cluster config file is not" \ " asking us to spin up any CPU or GPU machines." name_regex = '^[a-zA-Z][-a-zA-Z0-9]*$' assert cluster_name and re.match(name_regex, cluster_name), \ 'Cluster names for AWS must match the regex: /' + name_regex + '/' if gpu_nodes_config: gpu_instance_type = gpu_nodes_config['instance_type'] assert is_cuda_gpu_instance_type(gpu_instance_type), ( f"GPUs are not supported on the instance type ({gpu_instance_type})" ) def _handle_stack_event(self, _, event): resource_status = event["ResourceStatus"] logical_id = event["LogicalResourceId"] print(f"{resource_status} {event["ResourceType"]} {logical_id} {event["PhysicalResourceId"]}") if resource_status.endswith("_FAILED"): print(f"Error {resource_status}: {logical_id}: {event["ResourceStatusReason"]}", file=sys.stderr) def get_node_groups(self, options): return { node_group_type: options.get(node_group_type) or {} for node_group_type in ALL_NODE_GROUP_TYPES } def _create_or_update_kubernetes_cluster(self, options, update): start_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) cluster_name = options['cluster_name'] kubernetes_version = options.get('kubernetes_version') or DEFAULT_KUBERNETES_VERSION node_groups = self.get_node_groups(options) self.validate_cluster_options(cluster_name, node_groups, kubernetes_version) aws_options = options.get('aws') or {} additional_policies = aws_options.get('additional_policies') or [] common_kwargs = dict( cluster_name=cluster_name, system_node_config=node_groups[NODE_GROUP_TYPE_SYSTEM], cpu_node_config=node_groups[NODE_GROUP_TYPE_CPU], gpu_node_config=node_groups[NODE_GROUP_TYPE_GPU], key_name=self.aws_services.ec2_service.ensure_key_pair_for_cluster(cluster_name).name, kubernetes_version=kubernetes_version, ) if update: eks_cluster_stack = self.aws_services.cloudformation_service.update_eks_cluster_stack( event_handler=self._handle_stack_event, **common_kwargs, ) else: try: eks_cluster_stack = self.aws_services.cloudformation_service.ensure_eks_cluster_stack( **common_kwargs, ) self.aws_services.cloudformation_service.wait_for_stack_create_complete( eks_cluster_stack.name, event_handler=self._handle_stack_event, after=start_time, ) except Exception as e: print("*" * 50) print("ERROR: encountered an error creating EKS cluster; tearing down resources") print("*" * 50) # TODO(dan): can we catch something more fine-grained here? # NOTE(dan): since we're just raising here anyway, we don't need to try-except? self.aws_services.cloudformation_service.ensure_eks_cluster_stack_deleted( cluster_name, self._handle_stack_event, ) raise e eks_cluster_stack.reload() eks_cluster_stack_outputs = { o['OutputKey']: o['OutputValue'] for o in eks_cluster_stack.outputs } node_instance_role_arn = eks_cluster_stack_outputs["NodeInstanceRoleArn"] for policy_arn in additional_policies: self.aws_services.iam_service.attach_policy(node_instance_role_arn, policy_arn) # NOTE(taylor): no reason to update the autoscaler role stack yet, just create it if it doesn't already exist eks_cluster = self.aws_services.eks_service.describe_cluster(cluster_name) self.aws_services.iam_service.ensure_eks_oidc_provider(eks_cluster) eks_cluster_autoscaler_role_stack = ( self.aws_services.cloudformation_service.ensure_eks_cluster_autoscaler_role_stack( cluster_name=cluster_name, cluster_oidc_provider_url=eks_cluster["cluster"]["identity"]["oidc"]["issuer"], ) ) self.aws_services.cloudformation_service.wait_for_stack_create_complete( eks_cluster_autoscaler_role_stack.name, event_handler=self._handle_stack_event, after=start_time, ) if not update: self._connect_kubernetes_cluster(cluster_name=cluster_name, ignore_role=True) self.test_kubernetes_cluster(cluster_name=cluster_name, ignore_role=True) # NOTE(taylor): no reason to update the aws-auth config map yet role_arn = eks_cluster_stack_outputs["ClusterAccessRoleArn"] role_name = eks_cluster_stack_outputs["ClusterAccessRoleName"] role_config_map = make_role_config_map( node_instance_role_arn=node_instance_role_arn, cluster_access_role_arn=role_arn, cluster_access_role_name=role_name, ) self.services.kubernetes_service.ensure_config_map(role_config_map) self._disconnect_kubernetes_cluster(cluster_name=cluster_name) print('Testing your kubernetes configuration, you may see an error below but we should be able to resolve it...') self._connect_kubernetes_cluster(cluster_name=cluster_name) print('Successfully tested your kubernetes configuration, if you saw any errors above you may ignore them...') self._test_cluster_access_role(cluster_name=cluster_name, retries=3) # Note(Nakul): We disconnect and reconnect to solve an intermittent issue where the kubernetes python client # ends up with an empty api key. This is a temporary fix while we resolve the bug. This solves the issue by # reloading the key from the config file a second time which I found out works simply by some trial and error. self._disconnect_kubernetes_cluster(cluster_name=cluster_name) self._connect_kubernetes_cluster(cluster_name=cluster_name) self.test_kubernetes_cluster(cluster_name=cluster_name) self.services.kubernetes_service.ensure_plugins(cluster_name, Provider.AWS) print(self._node_access_instructions(cluster_name)) return self.create_cluster_object( services=self.services, name=cluster_name, registry=None, ) def create_kubernetes_cluster(self, options): return self._create_or_update_kubernetes_cluster(options, update=False) def update_kubernetes_cluster(self, options): return self._create_or_update_kubernetes_cluster(options, update=True) def _test_cluster_access_role(self, cluster_name, retries=0, wait_time=5): cluster_access_role_arn = self.aws_services.iam_service.get_cluster_access_role_arn(cluster_name) for try_number in range(retries + 1): try: self.aws_services.sts_service.assume_role(role_arn=cluster_access_role_arn) except ClientError as ce: if try_number >= retries: raise AwsClusterSharePermissionError( f"You do not have permission to use the role '{cluster_access_role_arn}' for accessing this cluster.\n" "Please read the SigOpt documentation for sharing clusters: " "https://app.sigopt.com/docs/orchestrate/deep_dive#cluster_sharing" ) from ce time.sleep(wait_time) def _connect_kubernetes_cluster(self, cluster_name, ignore_role=False): kubeconfig = self.create_kubeconfig(cluster_name, ignore_role) self.services.kubernetes_service.write_config( cluster_name=cluster_name, data=kubeconfig, ) def test_kubernetes_cluster(self, cluster_name, ignore_role=False): if not ignore_role: self._test_cluster_access_role(cluster_name=cluster_name, retries=3) self.services.kubernetes_service.test_config() def _disconnect_kubernetes_cluster(self, cluster_name): self.services.kubernetes_service.ensure_config_deleted(cluster_name=cluster_name) def create_kubeconfig(self, cluster_name, ignore_role=False): cluster = self.describe_kubernetes_cluster(cluster_name) if ignore_role: cluster_access_role_arn = None else: cluster_access_role_arn = self.aws_services.iam_service.get_cluster_access_role_arn(cluster_name) # TODO(alexandra): optional role_arn is NOT the role ARN used to create the cluster # See Step 2 of https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html kubeconfig = self.services.resource_service.load_yaml("eks", "kubeconfig.yml") kubeconfig["clusters"][0]["cluster"] = { "server": cluster["endpoint"], "certificate-authority-data": cluster["certificateAuthority"]["data"], } command_args = ["token", "-i", cluster_name] if cluster_access_role_arn: command_args.extend(["-r", cluster_access_role_arn]) user = { "exec": { "apiVersion": "client.authentication.k8s.io/v1alpha1", "command": get_executable_path("aws-iam-authenticator"), "args": command_args, }, } kubeconfig["users"][0]["user"] = user return kubeconfig def destroy_kubernetes_cluster(self, cluster_name): self.services.kubernetes_service.ensure_config_deleted(cluster_name) self.aws_services.ec2_service.ensure_key_pair_for_cluster_deleted(cluster_name) try: instance_role_arn = self.aws_services.cloudformation_service.get_node_instance_role_arn(cluster_name) if instance_role_arn: instance_role = self.aws_services.iam_service.get_role_from_arn(instance_role_arn) for policy in instance_role.attached_policies.all(): instance_role.detach_policy(PolicyArn=policy.arn) except ClientError: pass try: eks_cluster = self.aws_services.eks_service.describe_cluster(cluster_name) self.aws_services.iam_service.ensure_eks_oidc_provider_deleted(eks_cluster) except self.aws_services.eks_service.client.exceptions.ResourceNotFoundException: pass try: self.aws_services.cloudformation_service.ensure_eks_cluster_autoscaler_role_stack_deleted( cluster_name, event_handler=self._handle_stack_event, ) self.aws_services.cloudformation_service.ensure_eks_cluster_stack_deleted( cluster_name, event_handler=self._handle_stack_event, ) except Exception as e: raise ClusterDestroyError from e def _node_access_instructions(self, cluster_name): filename = self.aws_services.ec2_service.key_pair_location(cluster_name) return ( '*Optional:' '\n\tTo ssh into any ec2 node in your cluster, use the username `ec2-user` with the key pair located at:' f'\n\t\t{filename}' '\n\tExample:' f'\n\t\tssh -i {filename} ec2-user@<node_dns_name>' '\n\tYou may be required to change security groups on your ec2 instances' '\n\tInstructions: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html' ) def create_cluster_object(self, services, name, registry): return AWSCluster( services=services, name=name, registry=registry, )
import datetime import re import sys import time import types import yaml from botocore.exceptions import ClientError from ..cluster.object import AWSCluster from ..eks.service import DEFAULT_KUBERNETES_VERSION, SUPPORTED_KUBERNETES_VERSIONS from ..exceptions import AwsClusterSharePermissionError, AwsPermissionsError, ClusterDestroyError, OrchestrateException from ..node_groups import ALL_NODE_GROUP_TYPES, NODE_GROUP_TYPE_CPU, NODE_GROUP_TYPE_GPU, NODE_GROUP_TYPE_SYSTEM from ..paths import get_executable_path from ..provider.constants import Provider from ..provider.interface import ProviderInterface def is_cuda_gpu_instance_type(instance_type): prefix, _ = instance_type.split('.', 1) return prefix in ('p4d', 'p3', 'p3dn', 'p2', 'g4dn', 'g3') def catch_aws_permissions_errors(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ClientError as e: code = e.response['Error']['Code'] http_status_code = e.response['ResponseMetadata']['HTTPStatusCode'] if http_status_code == 403 or code in ('AccessDeniedException', 'UnauthorizedOperation'): raise AwsPermissionsError(e) from e raise return wrapper def make_role_config_map(node_instance_role_arn, cluster_access_role_arn, cluster_access_role_name): map_roles = [ { "rolearn": node_instance_role_arn, "username": "system:node:{{EC2PrivateDNSName}}", "groups": ["system:bootstrappers", "system:nodes"], }, { "rolearn": cluster_access_role_arn, "username": cluster_access_role_name, "groups": ["system:masters"], }, ] return { "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "aws-auth", "namespace": "kube-system", }, "data": { "mapRoles": yaml.dump(map_roles), }, } class AwsService(ProviderInterface): def __init__(self, services, aws_services): super().__init__(services) self.aws_services = aws_services def __getattribute__(self, name): attr = super().__getattribute__(name) if isinstance(attr, types.MethodType): attr = catch_aws_permissions_errors(attr) return attr def describe_kubernetes_cluster(self, cluster_name): try: return self.aws_services.eks_service.describe_cluster(cluster_name=cluster_name)['cluster'] except self.aws_services.eks_service.client.exceptions.ResourceNotFoundException as e: raise OrchestrateException( f"We cannot find an EKS cluster named '{cluster_name}' using your current AWS credentials." " Did someone delete this cluster?" ) from e def validate_cluster_options(self, cluster_name, node_groups_config, kubernetes_version): if kubernetes_version: assert kubernetes_version in SUPPORTED_KUBERNETES_VERSIONS, ( 'Unsupported kubernetes version for EKS:' f' {kubernetes_version}. Must be one of: {SUPPORTED_KUBERNETES_VERSIONS}' ) cpu_nodes_config = node_groups_config.get(NODE_GROUP_TYPE_CPU) gpu_nodes_config = node_groups_config.get(NODE_GROUP_TYPE_GPU) assert cpu_nodes_config or gpu_nodes_config, "Looks like your cluster config file is not" \ " asking us to spin up any CPU or GPU machines." name_regex = '^[a-zA-Z][-a-zA-Z0-9]*$' assert cluster_name and re.match(name_regex, cluster_name), \ 'Cluster names for AWS must match the regex: /' + name_regex + '/' if gpu_nodes_config: gpu_instance_type = gpu_nodes_config['instance_type'] assert is_cuda_gpu_instance_type(gpu_instance_type), ( f"GPUs are not supported on the instance type ({gpu_instance_type})" ) def _handle_stack_event(self, _, event): resource_status = event["ResourceStatus"] logical_id = event["LogicalResourceId"] print(f"{resource_status} {event['ResourceType']} {logical_id} {event['PhysicalResourceId']}") if resource_status.endswith("_FAILED"): print(f"Error {resource_status}: {logical_id}: {event['ResourceStatusReason']}", file=sys.stderr) def get_node_groups(self, options): return { node_group_type: options.get(node_group_type) or {} for node_group_type in ALL_NODE_GROUP_TYPES } def _create_or_update_kubernetes_cluster(self, options, update): start_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) cluster_name = options['cluster_name'] kubernetes_version = options.get('kubernetes_version') or DEFAULT_KUBERNETES_VERSION node_groups = self.get_node_groups(options) self.validate_cluster_options(cluster_name, node_groups, kubernetes_version) aws_options = options.get('aws') or {} additional_policies = aws_options.get('additional_policies') or [] common_kwargs = dict( cluster_name=cluster_name, system_node_config=node_groups[NODE_GROUP_TYPE_SYSTEM], cpu_node_config=node_groups[NODE_GROUP_TYPE_CPU], gpu_node_config=node_groups[NODE_GROUP_TYPE_GPU], key_name=self.aws_services.ec2_service.ensure_key_pair_for_cluster(cluster_name).name, kubernetes_version=kubernetes_version, ) if update: eks_cluster_stack = self.aws_services.cloudformation_service.update_eks_cluster_stack( event_handler=self._handle_stack_event, **common_kwargs, ) else: try: eks_cluster_stack = self.aws_services.cloudformation_service.ensure_eks_cluster_stack( **common_kwargs, ) self.aws_services.cloudformation_service.wait_for_stack_create_complete( eks_cluster_stack.name, event_handler=self._handle_stack_event, after=start_time, ) except Exception as e: print("*" * 50) print("ERROR: encountered an error creating EKS cluster; tearing down resources") print("*" * 50) # TODO(dan): can we catch something more fine-grained here? # NOTE(dan): since we're just raising here anyway, we don't need to try-except? self.aws_services.cloudformation_service.ensure_eks_cluster_stack_deleted( cluster_name, self._handle_stack_event, ) raise e eks_cluster_stack.reload() eks_cluster_stack_outputs = { o['OutputKey']: o['OutputValue'] for o in eks_cluster_stack.outputs } node_instance_role_arn = eks_cluster_stack_outputs["NodeInstanceRoleArn"] for policy_arn in additional_policies: self.aws_services.iam_service.attach_policy(node_instance_role_arn, policy_arn) # NOTE(taylor): no reason to update the autoscaler role stack yet, just create it if it doesn't already exist eks_cluster = self.aws_services.eks_service.describe_cluster(cluster_name) self.aws_services.iam_service.ensure_eks_oidc_provider(eks_cluster) eks_cluster_autoscaler_role_stack = ( self.aws_services.cloudformation_service.ensure_eks_cluster_autoscaler_role_stack( cluster_name=cluster_name, cluster_oidc_provider_url=eks_cluster["cluster"]["identity"]["oidc"]["issuer"], ) ) self.aws_services.cloudformation_service.wait_for_stack_create_complete( eks_cluster_autoscaler_role_stack.name, event_handler=self._handle_stack_event, after=start_time, ) if not update: self._connect_kubernetes_cluster(cluster_name=cluster_name, ignore_role=True) self.test_kubernetes_cluster(cluster_name=cluster_name, ignore_role=True) # NOTE(taylor): no reason to update the aws-auth config map yet role_arn = eks_cluster_stack_outputs["ClusterAccessRoleArn"] role_name = eks_cluster_stack_outputs["ClusterAccessRoleName"] role_config_map = make_role_config_map( node_instance_role_arn=node_instance_role_arn, cluster_access_role_arn=role_arn, cluster_access_role_name=role_name, ) self.services.kubernetes_service.ensure_config_map(role_config_map) self._disconnect_kubernetes_cluster(cluster_name=cluster_name) print('Testing your kubernetes configuration, you may see an error below but we should be able to resolve it...') self._connect_kubernetes_cluster(cluster_name=cluster_name) print('Successfully tested your kubernetes configuration, if you saw any errors above you may ignore them...') self._test_cluster_access_role(cluster_name=cluster_name, retries=3) # Note(Nakul): We disconnect and reconnect to solve an intermittent issue where the kubernetes python client # ends up with an empty api key. This is a temporary fix while we resolve the bug. This solves the issue by # reloading the key from the config file a second time which I found out works simply by some trial and error. self._disconnect_kubernetes_cluster(cluster_name=cluster_name) self._connect_kubernetes_cluster(cluster_name=cluster_name) self.test_kubernetes_cluster(cluster_name=cluster_name) self.services.kubernetes_service.ensure_plugins(cluster_name, Provider.AWS) print(self._node_access_instructions(cluster_name)) return self.create_cluster_object( services=self.services, name=cluster_name, registry=None, ) def create_kubernetes_cluster(self, options): return self._create_or_update_kubernetes_cluster(options, update=False) def update_kubernetes_cluster(self, options): return self._create_or_update_kubernetes_cluster(options, update=True) def _test_cluster_access_role(self, cluster_name, retries=0, wait_time=5): cluster_access_role_arn = self.aws_services.iam_service.get_cluster_access_role_arn(cluster_name) for try_number in range(retries + 1): try: self.aws_services.sts_service.assume_role(role_arn=cluster_access_role_arn) except ClientError as ce: if try_number >= retries: raise AwsClusterSharePermissionError( f"You do not have permission to use the role '{cluster_access_role_arn}' for accessing this cluster.\n" "Please read the SigOpt documentation for sharing clusters: " "https://app.sigopt.com/docs/orchestrate/deep_dive#cluster_sharing" ) from ce time.sleep(wait_time) def _connect_kubernetes_cluster(self, cluster_name, ignore_role=False): kubeconfig = self.create_kubeconfig(cluster_name, ignore_role) self.services.kubernetes_service.write_config( cluster_name=cluster_name, data=kubeconfig, ) def test_kubernetes_cluster(self, cluster_name, ignore_role=False): if not ignore_role: self._test_cluster_access_role(cluster_name=cluster_name, retries=3) self.services.kubernetes_service.test_config() def _disconnect_kubernetes_cluster(self, cluster_name): self.services.kubernetes_service.ensure_config_deleted(cluster_name=cluster_name) def create_kubeconfig(self, cluster_name, ignore_role=False): cluster = self.describe_kubernetes_cluster(cluster_name) if ignore_role: cluster_access_role_arn = None else: cluster_access_role_arn = self.aws_services.iam_service.get_cluster_access_role_arn(cluster_name) # TODO(alexandra): optional role_arn is NOT the role ARN used to create the cluster # See Step 2 of https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html kubeconfig = self.services.resource_service.load_yaml("eks", "kubeconfig.yml") kubeconfig["clusters"][0]["cluster"] = { "server": cluster["endpoint"], "certificate-authority-data": cluster["certificateAuthority"]["data"], } command_args = ["token", "-i", cluster_name] if cluster_access_role_arn: command_args.extend(["-r", cluster_access_role_arn]) user = { "exec": { "apiVersion": "client.authentication.k8s.io/v1alpha1", "command": get_executable_path("aws-iam-authenticator"), "args": command_args, }, } kubeconfig["users"][0]["user"] = user return kubeconfig def destroy_kubernetes_cluster(self, cluster_name): self.services.kubernetes_service.ensure_config_deleted(cluster_name) self.aws_services.ec2_service.ensure_key_pair_for_cluster_deleted(cluster_name) try: instance_role_arn = self.aws_services.cloudformation_service.get_node_instance_role_arn(cluster_name) if instance_role_arn: instance_role = self.aws_services.iam_service.get_role_from_arn(instance_role_arn) for policy in instance_role.attached_policies.all(): instance_role.detach_policy(PolicyArn=policy.arn) except ClientError: pass try: eks_cluster = self.aws_services.eks_service.describe_cluster(cluster_name) self.aws_services.iam_service.ensure_eks_oidc_provider_deleted(eks_cluster) except self.aws_services.eks_service.client.exceptions.ResourceNotFoundException: pass try: self.aws_services.cloudformation_service.ensure_eks_cluster_autoscaler_role_stack_deleted( cluster_name, event_handler=self._handle_stack_event, ) self.aws_services.cloudformation_service.ensure_eks_cluster_stack_deleted( cluster_name, event_handler=self._handle_stack_event, ) except Exception as e: raise ClusterDestroyError from e def _node_access_instructions(self, cluster_name): filename = self.aws_services.ec2_service.key_pair_location(cluster_name) return ( '*Optional:' '\n\tTo ssh into any ec2 node in your cluster, use the username `ec2-user` with the key pair located at:' f'\n\t\t{filename}' '\n\tExample:' f'\n\t\tssh -i {filename} ec2-user@<node_dns_name>' '\n\tYou may be required to change security groups on your ec2 instances' '\n\tInstructions: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html' ) def create_cluster_object(self, services, name, registry): return AWSCluster( services=services, name=name, registry=registry, )
# type: ignore # -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import sys import pathlib import configparser BASE_DIR = pathlib.Path(__file__).resolve().parent.parent.parent # parse setup.cfg to gather metadata info (reduce redundancy of static info) config = configparser.ConfigParser() config.read(BASE_DIR.joinpath("setup.cfg").as_posix()) try: metadata = config["metadata"] except KeyError: raise KeyError("cannot run sphinx if setup.cfg is missing [metadata] section") title = metadata["name"].title().replace("_", " ").replace("-", " ") title_filename = title.replace(" ", "") sys.path.insert(0, BASE_DIR.joinpath("src").as_posix()) # -- Project information ----------------------------------------------------- project = title copyright = f"2020, {metadata["author"]}" author = metadata["author"] # The short X.Y version version = metadata["version"] # The full version, including alpha/beta/rc tags release = metadata["version"] # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. # exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "description": metadata["description"], "github_user": "modist-io", "github_repo": "modist-api", "github_type": "star", "page_width": "1000px", "sidebar_width": "220px", "sidebar_collapse": True, "fixed_sidebar": True, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = f"doc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { "papersize": "letterpaper", "pointsize": "10pt", "preamble": "", "figure_align": "htbp", } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, f"{title_filename}.tex", f"{title} Documentation", metadata["author"], "manual", ) ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, metadata["name"].replace("_", "").replace("-", ""), f"{title} Documentation", [author], 1, ) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, title_filename, f"{title} Documentation", author, title_filename, metadata["description"], "Miscellaneous", ) ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {"python": ("https://docs.python.org/3.7/", None)}
# type: ignore # -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import sys import pathlib import configparser BASE_DIR = pathlib.Path(__file__).resolve().parent.parent.parent # parse setup.cfg to gather metadata info (reduce redundancy of static info) config = configparser.ConfigParser() config.read(BASE_DIR.joinpath("setup.cfg").as_posix()) try: metadata = config["metadata"] except KeyError: raise KeyError("cannot run sphinx if setup.cfg is missing [metadata] section") title = metadata["name"].title().replace("_", " ").replace("-", " ") title_filename = title.replace(" ", "") sys.path.insert(0, BASE_DIR.joinpath("src").as_posix()) # -- Project information ----------------------------------------------------- project = title copyright = f"2020, {metadata['author']}" author = metadata["author"] # The short X.Y version version = metadata["version"] # The full version, including alpha/beta/rc tags release = metadata["version"] # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. # exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "description": metadata["description"], "github_user": "modist-io", "github_repo": "modist-api", "github_type": "star", "page_width": "1000px", "sidebar_width": "220px", "sidebar_collapse": True, "fixed_sidebar": True, } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = f"doc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { "papersize": "letterpaper", "pointsize": "10pt", "preamble": "", "figure_align": "htbp", } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, f"{title_filename}.tex", f"{title} Documentation", metadata["author"], "manual", ) ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, metadata["name"].replace("_", "").replace("-", ""), f"{title} Documentation", [author], 1, ) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, title_filename, f"{title} Documentation", author, title_filename, metadata["description"], "Miscellaneous", ) ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {"python": ("https://docs.python.org/3.7/", None)}
from datetime import datetime from typing import Dict, List, Union from urllib.parse import quote from .woql_library import WOQLLib from .woql_query import WOQLQuery WOQLTYPE_TO_PYTYPE = { "string": str, "boolean": bool, "integer": int, "decimal": float, "dateTime": datetime, } class WOQLClass: def __init__( self, obj_id: str, label: str = None, description: str = None, obj_property: dict = None, ): """The WOQLClass constructor. Parameters ---------- obj_id: str Object id for which the instance is created. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. obj_property: dict, optional Optional properties which can be provided during initialization. Examples -------- >>> woql_object_prop_des = WOQLClass( "Journey", label="Journey Object", description="A car Journey object." obj_property={ "Duration": { "type": "dateTime", "label": "Journey Duration", "description": "Journey duration in minutes.", } }, ) """ self.id = obj_id self._label = label self._description = description if obj_property is not None: self._property = obj_property else: self._property = {} self.query_obj = WOQLQuery().doctype( obj_id, label=label, description=description ) if obj_property is not None: for key, item in obj_property.items(): self.query_obj = self.query_obj.property( key, item.get("type"), item.get("label"), item.get("description") ) def __str__(self): """Returns the id of the instance Returns ------- str """ return self.id def __eq__(self, other): """To check if the object is equal to the given object. Parameters ---------- other: WOQLClass The other object which has to be compared with. Returns ------- bool """ if isinstance(other, WOQLClass): return self.id == other.id else: return False @property def label(self) -> str: """Returns the label of the instance. Returns ------- str """ if self._label is None: return "" return self._label @label.setter def label(self, label: str): """Sets the label for this instance. Parameters ---------- label: str Label which needs to be set. """ self.query_obj = self.query_obj.label(label) self._label = label @property def description(self) -> str: """Returns the description of the instance. Returns ------- str """ if self._description is None: return "" return self._description @description.setter def description(self, description: str): """Sets the description for this instance. Parameters ---------- description: str description which needs to be set. """ self.query_obj = self.query_obj.description(description) self._description = description def add_property( self, pro_id: str, property_type: Union[str, "WOQLClass"], label: str = None, description: str = None, ): """Adds the given properties to the instance. Parameters ---------- pro_id: str Property id property_type: str (or) WOQLClass Properties to be given in json or as WOQLClass instance. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. Returns ------- WOQLClass """ if isinstance(property_type, str): self.query_obj = self.query_obj.property( pro_id, property_type, label, description ) elif isinstance(property_type, WOQLClass): self.query_obj = self.query_obj.property( pro_id, property_type.id, label, description ) else: raise ValueError("property_type needs to be either string or WOQLClass") self._property[pro_id] = { "type": property_type, "label": label, "description": description, } return self def to_dict( self, ): """Returns the query_object as a dict. Returns ------- dict """ return self.query_obj.to_dict() def to_json(self): """Returns the query_object as a json. Returns ------- str """ return self.query_obj.to_json() class WOQLObj: def __init__( self, obj_id: str, obj_type: WOQLClass, label: str = None, description: str = None, obj_property: dict = None, ): """The WOQLObj constructor. Parameters ---------- obj_id: str Object id for which the instance is created. obj_type: WOQLClass WOQLClass instance which has the properties. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. obj_property: dict, optional Optional properties which can be provided during initialization. Examples -------- >>> my_id = "my_journey" >>> my_label = "My Journey" >>> my_des = "This is my journey to work" >>> my_prop = {"Duration": {"value": 30}} >>> journey_class = WOQLClass("Journey") >>> journey_class.add_property( ... "Duration", ... "integer", ... label="Journey Duration", ... description="Journey duration in minutes.", ... ) <terminusdb_client.woqlquery.smart_query.WOQLClass object at 0x1085618d0> >>> woql_obj = WOQLObj(my_id, journey_class, my_label, my_des, obj_property=my_prop) """ self.id = obj_id self._type = obj_type self.woql_id = self._idgen() self.label = label self.description = description self.query_obj = WOQLQuery().insert( self.woql_id, obj_type.id, label=label, description=description ) if obj_property is not None: for pro_id, prop in obj_property.items(): prop_val = prop.get("value") self._check_prop(pro_id, prop_val) self.query_obj = self.query_obj.property( pro_id, prop_val, prop.get("label"), prop.get("description") ) self._property = obj_property else: self._property = {} def __str__(self): """Returns the id of the instance Returns ------- str """ return self.id def _idgen(self) -> str: """Returns the id which is generated in the below format. doc:{[WOQLClass_type_id]_[WOQLObj_id]} Returns ------- str """ # mimic what a idgen would do in the back end # TODO: quote the ids ot make it url firendly return f"doc:{quote(self._type.id)}_{quote(self.id)}" def _check_prop(self, pro_id: str, pro_value): """Check if the given property's value belongs to the correct data type. Parameters ---------- pro_id: str Property id pro_value Property value Returns ------- none or raises exception """ prop = self._type._property.get(pro_id) if prop is None: raise ValueError(f"No {pro_id} property in {self._type.id}") if isinstance(pro_value, WOQLObj): if pro_value._type != prop["type"]: raise ValueError( f"{pro_id} property in {self._type.id} is of type {prop["type"]} not {pro_value._type}" ) else: if not isinstance(pro_value, WOQLTYPE_TO_PYTYPE[prop["type"]]): raise ValueError( f"{pro_id} property in {self._type.id} is of type {prop["type"]} not {type(pro_value)}" ) def add_property( self, pro_id: str, pro_value, label: str = None, description: str = None ): """Adds the given properties to the instance. Parameters ---------- pro_id: str Property id pro_value: any Property value which needs to be added. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. Returns ------- WOQLObj """ # check if the pro_value matches the property of the self._type self._check_prop(pro_id, pro_value) # add new prop in self._property self._property[pro_id] = { "value": pro_value, "label": label, "description": description, } # add to query_obj self.query_obj = self.query_obj.property(pro_id, pro_value, label, description) return self def to_dict( self, ): """Returns the query_object as a dict. Returns ------- dict """ return self.query_obj.to_dict() def to_json(self): """Returns the query_object as a json. Returns ------- str """ return self.query_obj.to_json() class TerminusDB: def __init__( self, server_url: str, db_id: str, key: str = "root", account: str = "admin", user: str = "admin", db_label: str = None, db_description: str = None, **kwargs, ): """The TerminusDB constructor. Parameters ---------- server_url: str The url of which the TerminusDB server is running. db_id: str Unique identifier of the database. key: str, optional, default = "root" account: str, optional, default = "admin" ID of the organization in which to create the DB (defaults to 'admin') user: str, optional, default = "admin" db_label: str, optional, default = None Optional label to be given for the db. db_description: str, optional, default = None Optional description to be given for the db. **kwargs Configuration options used to construct a :class:`ConnectionConfig` instance. Passing insecure=True will skip HTTPS certificate checking. """ from ..woqlclient import WOQLClient self._client = WOQLClient(server_url, **kwargs) self._client.connect(key=key, account=account, user=user) existing = self._client.get_database(db_id, self._client.account()) self.classes: Dict[str, WOQLClass] = {} if not existing: self._client.create_database(db_id, account, db_label, db_description) else: self._client.set_db(db_id) # get all classes from db and store them cls_result = WOQLLib().classes().execute(self._client) for item in cls_result["bindings"]: class_id = item["Class ID"].split("#")[-1] class_name = item["Class Name"]["@value"] class_des = item["Description"]["@value"] self.classes[class_id] = WOQLClass(class_id, class_name, class_des) # get all peoperties from db and add to classes prop_result = WOQLLib().property().execute(self._client) for item in prop_result["bindings"]: prop_domain = item["Property Domain"].split("#")[-1] prop_id = item["Property ID"].split("#")[-1] prop_name = item["Property Name"]["@value"] prop_des = item["Property Description"]["@value"] prop_type = item["Property Domain"].split("#")[-1] if item["Property Type"]["@value"] == "Object": prop_type = self.classes[prop_type] self.classes[prop_domain].add_property( prop_id, prop_type, prop_name, prop_des ) def add_class(self, obj: Union[WOQLClass, List[WOQLClass]]): """Adds one or more WOQLClass types. Parameters ---------- obj: WOQLClass or a list of WOQLClass instances """ if isinstance(obj, WOQLClass): self.classes[obj.id] = obj return obj.query_obj.execute(self._client) elif isinstance(obj, list): for item in obj: self.classes[item.id] = item return WOQLQuery().woql_and(*obj).execute(self._client) else: raise ValueError( "object(s) added need to be WOQLClass object or a list of WOQLClass objects." ) def add_object(self, obj: Union[WOQLObj, List[WOQLObj]]): """Adds one or more WOQLObj. Parameters ---------- obj: WOQLObj or a list of WOQLObj instances Returns ------- """ if isinstance(obj, WOQLObj): # check if class is in db if obj._type.id in self.classes: return obj.query_obj.execute(self._client) else: raise ValueError("Class of object(s) is not in the schema.") elif isinstance(obj, list): for item in obj: self.classes[item.id] = item return WOQLQuery().woql_and(*obj).execute(self._client) else: raise ValueError( "Object(s) added need to be WOQLClass object or a list of WOQLClass objects." ) def run(self, query: Union[WOQLQuery, Dict]): """Runs a query either in WOQLQuery format or json_ld in dictionary presentation Parameters ---------- query: WOQLQuery or Dict Query which has to be executed. Returns ------- The output of the query. """ return self._client.query(query)
from datetime import datetime from typing import Dict, List, Union from urllib.parse import quote from .woql_library import WOQLLib from .woql_query import WOQLQuery WOQLTYPE_TO_PYTYPE = { "string": str, "boolean": bool, "integer": int, "decimal": float, "dateTime": datetime, } class WOQLClass: def __init__( self, obj_id: str, label: str = None, description: str = None, obj_property: dict = None, ): """The WOQLClass constructor. Parameters ---------- obj_id: str Object id for which the instance is created. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. obj_property: dict, optional Optional properties which can be provided during initialization. Examples -------- >>> woql_object_prop_des = WOQLClass( "Journey", label="Journey Object", description="A car Journey object." obj_property={ "Duration": { "type": "dateTime", "label": "Journey Duration", "description": "Journey duration in minutes.", } }, ) """ self.id = obj_id self._label = label self._description = description if obj_property is not None: self._property = obj_property else: self._property = {} self.query_obj = WOQLQuery().doctype( obj_id, label=label, description=description ) if obj_property is not None: for key, item in obj_property.items(): self.query_obj = self.query_obj.property( key, item.get("type"), item.get("label"), item.get("description") ) def __str__(self): """Returns the id of the instance Returns ------- str """ return self.id def __eq__(self, other): """To check if the object is equal to the given object. Parameters ---------- other: WOQLClass The other object which has to be compared with. Returns ------- bool """ if isinstance(other, WOQLClass): return self.id == other.id else: return False @property def label(self) -> str: """Returns the label of the instance. Returns ------- str """ if self._label is None: return "" return self._label @label.setter def label(self, label: str): """Sets the label for this instance. Parameters ---------- label: str Label which needs to be set. """ self.query_obj = self.query_obj.label(label) self._label = label @property def description(self) -> str: """Returns the description of the instance. Returns ------- str """ if self._description is None: return "" return self._description @description.setter def description(self, description: str): """Sets the description for this instance. Parameters ---------- description: str description which needs to be set. """ self.query_obj = self.query_obj.description(description) self._description = description def add_property( self, pro_id: str, property_type: Union[str, "WOQLClass"], label: str = None, description: str = None, ): """Adds the given properties to the instance. Parameters ---------- pro_id: str Property id property_type: str (or) WOQLClass Properties to be given in json or as WOQLClass instance. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. Returns ------- WOQLClass """ if isinstance(property_type, str): self.query_obj = self.query_obj.property( pro_id, property_type, label, description ) elif isinstance(property_type, WOQLClass): self.query_obj = self.query_obj.property( pro_id, property_type.id, label, description ) else: raise ValueError("property_type needs to be either string or WOQLClass") self._property[pro_id] = { "type": property_type, "label": label, "description": description, } return self def to_dict( self, ): """Returns the query_object as a dict. Returns ------- dict """ return self.query_obj.to_dict() def to_json(self): """Returns the query_object as a json. Returns ------- str """ return self.query_obj.to_json() class WOQLObj: def __init__( self, obj_id: str, obj_type: WOQLClass, label: str = None, description: str = None, obj_property: dict = None, ): """The WOQLObj constructor. Parameters ---------- obj_id: str Object id for which the instance is created. obj_type: WOQLClass WOQLClass instance which has the properties. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. obj_property: dict, optional Optional properties which can be provided during initialization. Examples -------- >>> my_id = "my_journey" >>> my_label = "My Journey" >>> my_des = "This is my journey to work" >>> my_prop = {"Duration": {"value": 30}} >>> journey_class = WOQLClass("Journey") >>> journey_class.add_property( ... "Duration", ... "integer", ... label="Journey Duration", ... description="Journey duration in minutes.", ... ) <terminusdb_client.woqlquery.smart_query.WOQLClass object at 0x1085618d0> >>> woql_obj = WOQLObj(my_id, journey_class, my_label, my_des, obj_property=my_prop) """ self.id = obj_id self._type = obj_type self.woql_id = self._idgen() self.label = label self.description = description self.query_obj = WOQLQuery().insert( self.woql_id, obj_type.id, label=label, description=description ) if obj_property is not None: for pro_id, prop in obj_property.items(): prop_val = prop.get("value") self._check_prop(pro_id, prop_val) self.query_obj = self.query_obj.property( pro_id, prop_val, prop.get("label"), prop.get("description") ) self._property = obj_property else: self._property = {} def __str__(self): """Returns the id of the instance Returns ------- str """ return self.id def _idgen(self) -> str: """Returns the id which is generated in the below format. doc:{[WOQLClass_type_id]_[WOQLObj_id]} Returns ------- str """ # mimic what a idgen would do in the back end # TODO: quote the ids ot make it url firendly return f"doc:{quote(self._type.id)}_{quote(self.id)}" def _check_prop(self, pro_id: str, pro_value): """Check if the given property's value belongs to the correct data type. Parameters ---------- pro_id: str Property id pro_value Property value Returns ------- none or raises exception """ prop = self._type._property.get(pro_id) if prop is None: raise ValueError(f"No {pro_id} property in {self._type.id}") if isinstance(pro_value, WOQLObj): if pro_value._type != prop["type"]: raise ValueError( f"{pro_id} property in {self._type.id} is of type {prop['type']} not {pro_value._type}" ) else: if not isinstance(pro_value, WOQLTYPE_TO_PYTYPE[prop["type"]]): raise ValueError( f"{pro_id} property in {self._type.id} is of type {prop['type']} not {type(pro_value)}" ) def add_property( self, pro_id: str, pro_value, label: str = None, description: str = None ): """Adds the given properties to the instance. Parameters ---------- pro_id: str Property id pro_value: any Property value which needs to be added. label: str, optional Optional label to be given for the instance. description: str, optional Optional description to be given for the instance. Returns ------- WOQLObj """ # check if the pro_value matches the property of the self._type self._check_prop(pro_id, pro_value) # add new prop in self._property self._property[pro_id] = { "value": pro_value, "label": label, "description": description, } # add to query_obj self.query_obj = self.query_obj.property(pro_id, pro_value, label, description) return self def to_dict( self, ): """Returns the query_object as a dict. Returns ------- dict """ return self.query_obj.to_dict() def to_json(self): """Returns the query_object as a json. Returns ------- str """ return self.query_obj.to_json() class TerminusDB: def __init__( self, server_url: str, db_id: str, key: str = "root", account: str = "admin", user: str = "admin", db_label: str = None, db_description: str = None, **kwargs, ): """The TerminusDB constructor. Parameters ---------- server_url: str The url of which the TerminusDB server is running. db_id: str Unique identifier of the database. key: str, optional, default = "root" account: str, optional, default = "admin" ID of the organization in which to create the DB (defaults to 'admin') user: str, optional, default = "admin" db_label: str, optional, default = None Optional label to be given for the db. db_description: str, optional, default = None Optional description to be given for the db. **kwargs Configuration options used to construct a :class:`ConnectionConfig` instance. Passing insecure=True will skip HTTPS certificate checking. """ from ..woqlclient import WOQLClient self._client = WOQLClient(server_url, **kwargs) self._client.connect(key=key, account=account, user=user) existing = self._client.get_database(db_id, self._client.account()) self.classes: Dict[str, WOQLClass] = {} if not existing: self._client.create_database(db_id, account, db_label, db_description) else: self._client.set_db(db_id) # get all classes from db and store them cls_result = WOQLLib().classes().execute(self._client) for item in cls_result["bindings"]: class_id = item["Class ID"].split("#")[-1] class_name = item["Class Name"]["@value"] class_des = item["Description"]["@value"] self.classes[class_id] = WOQLClass(class_id, class_name, class_des) # get all peoperties from db and add to classes prop_result = WOQLLib().property().execute(self._client) for item in prop_result["bindings"]: prop_domain = item["Property Domain"].split("#")[-1] prop_id = item["Property ID"].split("#")[-1] prop_name = item["Property Name"]["@value"] prop_des = item["Property Description"]["@value"] prop_type = item["Property Domain"].split("#")[-1] if item["Property Type"]["@value"] == "Object": prop_type = self.classes[prop_type] self.classes[prop_domain].add_property( prop_id, prop_type, prop_name, prop_des ) def add_class(self, obj: Union[WOQLClass, List[WOQLClass]]): """Adds one or more WOQLClass types. Parameters ---------- obj: WOQLClass or a list of WOQLClass instances """ if isinstance(obj, WOQLClass): self.classes[obj.id] = obj return obj.query_obj.execute(self._client) elif isinstance(obj, list): for item in obj: self.classes[item.id] = item return WOQLQuery().woql_and(*obj).execute(self._client) else: raise ValueError( "object(s) added need to be WOQLClass object or a list of WOQLClass objects." ) def add_object(self, obj: Union[WOQLObj, List[WOQLObj]]): """Adds one or more WOQLObj. Parameters ---------- obj: WOQLObj or a list of WOQLObj instances Returns ------- """ if isinstance(obj, WOQLObj): # check if class is in db if obj._type.id in self.classes: return obj.query_obj.execute(self._client) else: raise ValueError("Class of object(s) is not in the schema.") elif isinstance(obj, list): for item in obj: self.classes[item.id] = item return WOQLQuery().woql_and(*obj).execute(self._client) else: raise ValueError( "Object(s) added need to be WOQLClass object or a list of WOQLClass objects." ) def run(self, query: Union[WOQLQuery, Dict]): """Runs a query either in WOQLQuery format or json_ld in dictionary presentation Parameters ---------- query: WOQLQuery or Dict Query which has to be executed. Returns ------- The output of the query. """ return self._client.query(query)
import os import shutil import subprocess import yaml from typing import List, Optional def get_unity_executable_path(): UNITY_VERSION = os.environ["UNITY_VERSION"] BOKKEN_UNITY = f"/Users/bokken/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" HUB_UNITY = ( f"/Applications/Unity/Hub/Editor/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" ) if os.path.exists(BOKKEN_UNITY): return BOKKEN_UNITY if os.path.exists(HUB_UNITY): return HUB_UNITY raise FileNotFoundError("Can't find bokken or hub executables") def get_base_path(): # We might need to do some more work here if the working directory ever changes # E.g. take the full path and back out the main module main. # But for now, this should work return os.getcwd() def get_base_output_path(): """" Returns the artifact folder to use for yamato jobs. """ return os.path.join(get_base_path(), "artifacts") def run_standalone_build( base_path: str, verbose: bool = False, output_path: str = None, scene_path: str = None, log_output_path: str = f"{get_base_output_path()}/standalone_build.txt", ) -> int: """ Run BuildStandalonePlayerOSX test to produce a player. The location defaults to artifacts/standalone_build/testPlayer. """ unity_exe = get_unity_executable_path() print(f"Running BuildStandalonePlayerOSX via {unity_exe}") test_args = [ unity_exe, "-projectPath", f"{base_path}/Project", "-batchmode", "-executeMethod", "Unity.MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX", ] os.makedirs(os.path.dirname(log_output_path), exist_ok=True) subprocess.run(["touch", log_output_path]) test_args += ["-logfile", log_output_path] if output_path is not None: output_path = os.path.join(get_base_output_path(), output_path) test_args += ["--mlagents-build-output-path", output_path] os.makedirs(os.path.dirname(output_path), exist_ok=True) if scene_path is not None: test_args += ["--mlagents-build-scene-path", scene_path] print(f"{" ".join(test_args)} ...") timeout = 30 * 60 # 30 minutes, just in case res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout) # Copy the default build name into the artifacts folder. if output_path is None and res.returncode == 0: shutil.move( os.path.join(base_path, "Project", "testPlayer.app"), os.path.join(get_base_output_path(), "testPlayer.app"), ) # Print if we fail or want verbosity. if verbose or res.returncode != 0: subprocess.run(["cat", log_output_path]) return res.returncode def init_venv( mlagents_python_version: str = None, extra_packages: Optional[List[str]] = None ) -> str: """ Set up the virtual environment, and return the venv path. :param mlagents_python_version: The version of mlagents python packcage to install. If None, will do a local install, otherwise will install from pypi :return: """ # Use a different venv path for different versions venv_path = "venv" if mlagents_python_version: venv_path += "_" + mlagents_python_version # Set up the venv and install mlagents subprocess.check_call(f"python -m venv {venv_path}", shell=True) pip_commands = [ "--upgrade pip", "--upgrade setuptools", # TODO build these and publish to internal pypi "~/tensorflow_pkg/tensorflow-2.0.0-cp37-cp37m-macosx_10_14_x86_64.whl", ] if mlagents_python_version: # install from pypi pip_commands += [ f"mlagents=={mlagents_python_version}", f"gym-unity=={mlagents_python_version}", ] else: # Local install pip_commands += ["-e ./ml-agents-envs", "-e ./ml-agents", "-e ./gym-unity"] if extra_packages: pip_commands += extra_packages for cmd in pip_commands: subprocess.check_call( f"source {venv_path}/bin/activate; python -m pip install -q {cmd}", shell=True, ) return venv_path def checkout_csharp_version(csharp_version): """ Checks out the specific git revision (usually a tag) for the C# package and Project. If csharp_version is None, no changes are made. :param csharp_version: :return: """ if csharp_version is None: return csharp_tag = f"com.unity.ml-agents_{csharp_version}" csharp_dirs = ["com.unity.ml-agents", "Project"] for csharp_dir in csharp_dirs: subprocess.check_call(f"rm -rf {csharp_dir}", shell=True) subprocess.check_call(f"git checkout {csharp_tag} -- {csharp_dir}", shell=True) def undo_git_checkout(): """ Clean up the git working directory. """ subprocess.check_call("git reset HEAD .", shell=True) subprocess.check_call("git checkout -- .", shell=True) # Ensure the cache isn't polluted with old compiled assemblies. subprocess.check_call(f"rm -rf Project/Library", shell=True) def override_config_file(src_path, dest_path, **kwargs): """ Override settings in a trainer config file. For example, override_config_file(src_path, dest_path, max_steps=42) will copy the config file at src_path to dest_path, but override the max_steps field to 42 for all brains. """ with open(src_path) as f: configs = yaml.safe_load(f) behavior_configs = configs["behaviors"] for config in behavior_configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f) def override_legacy_config_file(python_version, src_path, dest_path, **kwargs): """ Override settings in a trainer config file, using an old version of the src_path. For example, override_config_file("0.16.0", src_path, dest_path, max_steps=42) will sync the file at src_path from version 0.16.0, copy it to dest_path, and override the max_steps field to 42 for all brains. """ # Sync the old version of the file python_tag = f"python-packages_{python_version}" subprocess.check_call(f"git checkout {python_tag} -- {src_path}", shell=True) with open(src_path) as f: configs = yaml.safe_load(f) for config in configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f)
import os import shutil import subprocess import yaml from typing import List, Optional def get_unity_executable_path(): UNITY_VERSION = os.environ["UNITY_VERSION"] BOKKEN_UNITY = f"/Users/bokken/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" HUB_UNITY = ( f"/Applications/Unity/Hub/Editor/{UNITY_VERSION}/Unity.app/Contents/MacOS/Unity" ) if os.path.exists(BOKKEN_UNITY): return BOKKEN_UNITY if os.path.exists(HUB_UNITY): return HUB_UNITY raise FileNotFoundError("Can't find bokken or hub executables") def get_base_path(): # We might need to do some more work here if the working directory ever changes # E.g. take the full path and back out the main module main. # But for now, this should work return os.getcwd() def get_base_output_path(): """" Returns the artifact folder to use for yamato jobs. """ return os.path.join(get_base_path(), "artifacts") def run_standalone_build( base_path: str, verbose: bool = False, output_path: str = None, scene_path: str = None, log_output_path: str = f"{get_base_output_path()}/standalone_build.txt", ) -> int: """ Run BuildStandalonePlayerOSX test to produce a player. The location defaults to artifacts/standalone_build/testPlayer. """ unity_exe = get_unity_executable_path() print(f"Running BuildStandalonePlayerOSX via {unity_exe}") test_args = [ unity_exe, "-projectPath", f"{base_path}/Project", "-batchmode", "-executeMethod", "Unity.MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX", ] os.makedirs(os.path.dirname(log_output_path), exist_ok=True) subprocess.run(["touch", log_output_path]) test_args += ["-logfile", log_output_path] if output_path is not None: output_path = os.path.join(get_base_output_path(), output_path) test_args += ["--mlagents-build-output-path", output_path] os.makedirs(os.path.dirname(output_path), exist_ok=True) if scene_path is not None: test_args += ["--mlagents-build-scene-path", scene_path] print(f"{' '.join(test_args)} ...") timeout = 30 * 60 # 30 minutes, just in case res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout) # Copy the default build name into the artifacts folder. if output_path is None and res.returncode == 0: shutil.move( os.path.join(base_path, "Project", "testPlayer.app"), os.path.join(get_base_output_path(), "testPlayer.app"), ) # Print if we fail or want verbosity. if verbose or res.returncode != 0: subprocess.run(["cat", log_output_path]) return res.returncode def init_venv( mlagents_python_version: str = None, extra_packages: Optional[List[str]] = None ) -> str: """ Set up the virtual environment, and return the venv path. :param mlagents_python_version: The version of mlagents python packcage to install. If None, will do a local install, otherwise will install from pypi :return: """ # Use a different venv path for different versions venv_path = "venv" if mlagents_python_version: venv_path += "_" + mlagents_python_version # Set up the venv and install mlagents subprocess.check_call(f"python -m venv {venv_path}", shell=True) pip_commands = [ "--upgrade pip", "--upgrade setuptools", # TODO build these and publish to internal pypi "~/tensorflow_pkg/tensorflow-2.0.0-cp37-cp37m-macosx_10_14_x86_64.whl", ] if mlagents_python_version: # install from pypi pip_commands += [ f"mlagents=={mlagents_python_version}", f"gym-unity=={mlagents_python_version}", ] else: # Local install pip_commands += ["-e ./ml-agents-envs", "-e ./ml-agents", "-e ./gym-unity"] if extra_packages: pip_commands += extra_packages for cmd in pip_commands: subprocess.check_call( f"source {venv_path}/bin/activate; python -m pip install -q {cmd}", shell=True, ) return venv_path def checkout_csharp_version(csharp_version): """ Checks out the specific git revision (usually a tag) for the C# package and Project. If csharp_version is None, no changes are made. :param csharp_version: :return: """ if csharp_version is None: return csharp_tag = f"com.unity.ml-agents_{csharp_version}" csharp_dirs = ["com.unity.ml-agents", "Project"] for csharp_dir in csharp_dirs: subprocess.check_call(f"rm -rf {csharp_dir}", shell=True) subprocess.check_call(f"git checkout {csharp_tag} -- {csharp_dir}", shell=True) def undo_git_checkout(): """ Clean up the git working directory. """ subprocess.check_call("git reset HEAD .", shell=True) subprocess.check_call("git checkout -- .", shell=True) # Ensure the cache isn't polluted with old compiled assemblies. subprocess.check_call(f"rm -rf Project/Library", shell=True) def override_config_file(src_path, dest_path, **kwargs): """ Override settings in a trainer config file. For example, override_config_file(src_path, dest_path, max_steps=42) will copy the config file at src_path to dest_path, but override the max_steps field to 42 for all brains. """ with open(src_path) as f: configs = yaml.safe_load(f) behavior_configs = configs["behaviors"] for config in behavior_configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f) def override_legacy_config_file(python_version, src_path, dest_path, **kwargs): """ Override settings in a trainer config file, using an old version of the src_path. For example, override_config_file("0.16.0", src_path, dest_path, max_steps=42) will sync the file at src_path from version 0.16.0, copy it to dest_path, and override the max_steps field to 42 for all brains. """ # Sync the old version of the file python_tag = f"python-packages_{python_version}" subprocess.check_call(f"git checkout {python_tag} -- {src_path}", shell=True) with open(src_path) as f: configs = yaml.safe_load(f) for config in configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f)
import mysql.connector def main(): connection = mysql.connector.connect( host='lahman.csw1rmup8ri6.us-east-1.rds.amazonaws.com', user='python', passwd='python', db='lahmansbaseballdb' ) cursor = connection.cursor(prepared=True) query = """SELECT p.nameFirst, p.nameLast, b.HR, t.name AS team, b.yearID FROM batting b JOIN people p ON p.playerID = b.playerID JOIN teams t ON t.ID = b.team_ID WHERE b.yearID = %s ORDER BY b.HR DESC LIMIT 5;""" checking = True while checking: year_id = int(input('Enter a year (0 to quit): ')) if year_id == 0: break cursor.execute(query, [year_id]) results = cursor.fetchall() for i, result in enumerate(results , 1): row = dict(zip(cursor.column_names, result)) name = f"{row["nameFirst"]} {row["nameLast"]}" print(f"{i}. {name}: {row["HR"]}") cursor.close() connection.close() main()
import mysql.connector def main(): connection = mysql.connector.connect( host='lahman.csw1rmup8ri6.us-east-1.rds.amazonaws.com', user='python', passwd='python', db='lahmansbaseballdb' ) cursor = connection.cursor(prepared=True) query = """SELECT p.nameFirst, p.nameLast, b.HR, t.name AS team, b.yearID FROM batting b JOIN people p ON p.playerID = b.playerID JOIN teams t ON t.ID = b.team_ID WHERE b.yearID = %s ORDER BY b.HR DESC LIMIT 5;""" checking = True while checking: year_id = int(input('Enter a year (0 to quit): ')) if year_id == 0: break cursor.execute(query, [year_id]) results = cursor.fetchall() for i, result in enumerate(results , 1): row = dict(zip(cursor.column_names, result)) name = f"{row['nameFirst']} {row['nameLast']}" print(f"{i}. {name}: {row['HR']}") cursor.close() connection.close() main()
import argparse import json parser = argparse.ArgumentParser(description="Configuration Generator") parser.add_argument( "-f", "--file-location", help="The configuration location to be stored in. Must be " "a json file. Example: 'bfb11.json'", type=argparse.FileType("w+"), default="config.json", ) args = parser.parse_args() # Prepare variables config = {"info": None, "videoID": None, "token": None, "characters": {}} alphabeeet = "abcdefghijklmnopqrstuvwxyz" # Functions # noinspection PyUnboundLocalVariable def ask(q, default=None): """ Quick and easy ask function :param q: Question to ask :param default: Default answer, leave blank to set as mandatory :return: str(reply) """ resolve = False while not resolve: t = input(f'{q} [{'mandatory' if default is None else default}]: ') if t == "": if default is None: print( "This is a mandatory question! Please at least write something." ) else: t = default resolve = True else: resolve = True return t def choice(text: str, ch: list): """ Quick and easy choice asker :param text: Question to ask user :param ch: list: choices :return: str(reply) """ resolve = False chs = "/".join(ch) while not resolve: t = ask(f"{text} [{chs}]") if t.lower() not in ch: print( "Please pick an option in square brackets and write it exactly as prompted." ) else: return t # 7 years old played at this time o_O # Main print("Hello and welcome to this quick and easy config file generator.") print("You will be asked a few questions. Please answer them all.") input("Press Return or Enter to continue... ") print( "First, please give me a short description of the configuration. (i.e. bfb4)" ) config["info"] = ask( "This will help to distinguish between configurations when sharing it.") print( 'Next, please give me the video URL or ID (the part after "/watch?v=" or "youtu.be/", looks like "vL2oRzWrSgE")' ) # Could be ['video_id'], ['https:youtube.com', 'watch?v=Video_ID'] or ['https:youtu.be','fkjdsjkfgadsfk'] p = ask("Video ID").replace("//", "").split("/") if len(p) == 1: config["videoID"] = p[0] else: config["videoID"] = p[1].replace("watch?v=", "") print( "Next, please give me your google api token (due to an earlier incident of someone stealing ny api token, you " "must create your own api token to use this service. Instructions will be in" "https://github.com/kcomain/bfbVoteCounter/wiki/Getting-your-Google-API-token" ) config["token"] = ask("Google API Token") print( "Then, please give me the mode. There will be 2 valid modes, auto and custom.\n" "- Auto: this mode will be the mode you want to use if you're going to use this in a normal eliminating video.\n" "- Custom: this mode will most likely be used in favorite character voting screens. More inputs are needed." ) mode = choice("Which mode do you want to use?", ["auto", "custom"]) if mode == "auto": # Fun part print("Now, I need you to give me the characters.") count = 0 while True: name = ask( f"Character name: ([{alphabeeet[count]}]) (Press return/enter to skip)", "") if name == "": break config["characters"][alphabeeet[count]] = name count += 1 # print(f'DEBUG: C: {count} ALPH: {alphabeeet[count]}') else: # Not-so-fun part print( "Now, I need you to give me the characters and the text inside of their voting square brackets." ) while True: texte = ask("Text in square brackets:", "Press return/enter to skip") if texte == "Press return/enter to skip": break config["characters"][texte] = ask(f"Character name (of [{texte}]): ") print( "Lastly, please give me the seconds until voting ends. Typically it will be 48 hours aka 172800 seconds.\n" 'REMEMBER IT IS SECONDS AND ONLY SECONDS!!! YOU DON\'T NEED TO WRITE LIKE "172800s", JUST WRITE IT LIKE 172800.\n' "Pick 0 if you want to disable deadlines.") config["deadline"] = int(ask(f"Seconds until deadline: ", 172800)) print( "Alright, here's your configuration. \nFor your convenience, it has been saved to config.json or the specifiec " "one automatically. Then, you only need to run python3/python counter.py to start the counting process." ) print("-----------------") print(config) print("-----------------") json.dump(config, args.file_location, indent=4)
import argparse import json parser = argparse.ArgumentParser(description="Configuration Generator") parser.add_argument( "-f", "--file-location", help="The configuration location to be stored in. Must be " "a json file. Example: 'bfb11.json'", type=argparse.FileType("w+"), default="config.json", ) args = parser.parse_args() # Prepare variables config = {"info": None, "videoID": None, "token": None, "characters": {}} alphabeeet = "abcdefghijklmnopqrstuvwxyz" # Functions # noinspection PyUnboundLocalVariable def ask(q, default=None): """ Quick and easy ask function :param q: Question to ask :param default: Default answer, leave blank to set as mandatory :return: str(reply) """ resolve = False while not resolve: t = input(f'{q} [{"mandatory" if default is None else default}]: ') if t == "": if default is None: print( "This is a mandatory question! Please at least write something." ) else: t = default resolve = True else: resolve = True return t def choice(text: str, ch: list): """ Quick and easy choice asker :param text: Question to ask user :param ch: list: choices :return: str(reply) """ resolve = False chs = "/".join(ch) while not resolve: t = ask(f"{text} [{chs}]") if t.lower() not in ch: print( "Please pick an option in square brackets and write it exactly as prompted." ) else: return t # 7 years old played at this time o_O # Main print("Hello and welcome to this quick and easy config file generator.") print("You will be asked a few questions. Please answer them all.") input("Press Return or Enter to continue... ") print( "First, please give me a short description of the configuration. (i.e. bfb4)" ) config["info"] = ask( "This will help to distinguish between configurations when sharing it.") print( 'Next, please give me the video URL or ID (the part after "/watch?v=" or "youtu.be/", looks like "vL2oRzWrSgE")' ) # Could be ['video_id'], ['https:youtube.com', 'watch?v=Video_ID'] or ['https:youtu.be','fkjdsjkfgadsfk'] p = ask("Video ID").replace("//", "").split("/") if len(p) == 1: config["videoID"] = p[0] else: config["videoID"] = p[1].replace("watch?v=", "") print( "Next, please give me your google api token (due to an earlier incident of someone stealing ny api token, you " "must create your own api token to use this service. Instructions will be in" "https://github.com/kcomain/bfbVoteCounter/wiki/Getting-your-Google-API-token" ) config["token"] = ask("Google API Token") print( "Then, please give me the mode. There will be 2 valid modes, auto and custom.\n" "- Auto: this mode will be the mode you want to use if you're going to use this in a normal eliminating video.\n" "- Custom: this mode will most likely be used in favorite character voting screens. More inputs are needed." ) mode = choice("Which mode do you want to use?", ["auto", "custom"]) if mode == "auto": # Fun part print("Now, I need you to give me the characters.") count = 0 while True: name = ask( f"Character name: ([{alphabeeet[count]}]) (Press return/enter to skip)", "") if name == "": break config["characters"][alphabeeet[count]] = name count += 1 # print(f'DEBUG: C: {count} ALPH: {alphabeeet[count]}') else: # Not-so-fun part print( "Now, I need you to give me the characters and the text inside of their voting square brackets." ) while True: texte = ask("Text in square brackets:", "Press return/enter to skip") if texte == "Press return/enter to skip": break config["characters"][texte] = ask(f"Character name (of [{texte}]): ") print( "Lastly, please give me the seconds until voting ends. Typically it will be 48 hours aka 172800 seconds.\n" 'REMEMBER IT IS SECONDS AND ONLY SECONDS!!! YOU DON\'T NEED TO WRITE LIKE "172800s", JUST WRITE IT LIKE 172800.\n' "Pick 0 if you want to disable deadlines.") config["deadline"] = int(ask(f"Seconds until deadline: ", 172800)) print( "Alright, here's your configuration. \nFor your convenience, it has been saved to config.json or the specifiec " "one automatically. Then, you only need to run python3/python counter.py to start the counting process." ) print("-----------------") print(config) print("-----------------") json.dump(config, args.file_location, indent=4)
"Самотестирование сети" import speedtest import sys from colorama import init from rich.panel import Panel from rich.style import Style as STL from rich.console import Console init(autoreset=True) console2 = Console() def nettest(): with console2.status("[cyan]Ожидайте, идёт самотестирование сети..."): servers = [] threads = None try: s = speedtest.Speedtest() except Exception: console2.print("[bold red]Нет сети\n\nВыход") sys.exit('Bad nettest. Нет сети') s.get_servers(servers) s.get_best_server() s.download(threads=threads) s.upload(threads=threads) a = s.results.dict() d = round(a.get("download") / 1_000_000, 2) u = round(a.get("upload") / 1_000_000, 2) p = round(a.get("ping")) v4 = a.get("client") # Скорость загрузки. try: if d < 3: d = f"Download: [bold red]{d}[/bold red] Мбит/с" elif 3 <= d <= 5.5: d = f"Download: [yellow]{d}[/yellow] Мбит/с" elif d > 5.5: d = f"Download: [bold green]{d}[/bold green] Мбит/с" except: d = "Download: [bold red]Сбой[/bold red]" # Скорость выгрузки. try: if u < 0.8: u = f"Upload: [bold red]{u}[/bold red] Мбит/с" elif 0.8 <= u <= 1.5: u = f"Upload: [yellow]{u}[/yellow] Мбит/с" elif u > 1.5: u = f"Upload: [bold green]{u}[/bold green] Мбит/с" except: u = "Upload: [bold red]Сбой[/bold red]" # Ping. try: if p >= 250: p = f"Ping: [bold red]{p}[/bold red] мс" elif 60 <= p < 250: p = f"Ping: [yellow]{p}[/yellow] мс" elif p < 60: p = f"Ping: [bold green]{p}[/bold green] мс" except: p = "Ping: [bold red]Сбой[/bold red]" # Результат. console2.print(Panel.fit(f"{d}\n{u}\n{p}\n\nВаш ip: {v4.get("ip")}\nПровайдер: {v4.get("isp")}\nЛокация: {v4.get("country")}", title="🌐 Тест сети:", style=STL(color="cyan"))) console2.log("[cyan]--> завершен")
"Самотестирование сети" import speedtest import sys from colorama import init from rich.panel import Panel from rich.style import Style as STL from rich.console import Console init(autoreset=True) console2 = Console() def nettest(): with console2.status("[cyan]Ожидайте, идёт самотестирование сети..."): servers = [] threads = None try: s = speedtest.Speedtest() except Exception: console2.print("[bold red]Нет сети\n\nВыход") sys.exit('Bad nettest. Нет сети') s.get_servers(servers) s.get_best_server() s.download(threads=threads) s.upload(threads=threads) a = s.results.dict() d = round(a.get("download") / 1_000_000, 2) u = round(a.get("upload") / 1_000_000, 2) p = round(a.get("ping")) v4 = a.get("client") # Скорость загрузки. try: if d < 3: d = f"Download: [bold red]{d}[/bold red] Мбит/с" elif 3 <= d <= 5.5: d = f"Download: [yellow]{d}[/yellow] Мбит/с" elif d > 5.5: d = f"Download: [bold green]{d}[/bold green] Мбит/с" except: d = "Download: [bold red]Сбой[/bold red]" # Скорость выгрузки. try: if u < 0.8: u = f"Upload: [bold red]{u}[/bold red] Мбит/с" elif 0.8 <= u <= 1.5: u = f"Upload: [yellow]{u}[/yellow] Мбит/с" elif u > 1.5: u = f"Upload: [bold green]{u}[/bold green] Мбит/с" except: u = "Upload: [bold red]Сбой[/bold red]" # Ping. try: if p >= 250: p = f"Ping: [bold red]{p}[/bold red] мс" elif 60 <= p < 250: p = f"Ping: [yellow]{p}[/yellow] мс" elif p < 60: p = f"Ping: [bold green]{p}[/bold green] мс" except: p = "Ping: [bold red]Сбой[/bold red]" # Результат. console2.print(Panel.fit(f"{d}\n{u}\n{p}\n\nВаш ip: {v4.get('ip')}\nПровайдер: {v4.get('isp')}\nЛокация: {v4.get('country')}", title="🌐 Тест сети:", style=STL(color="cyan"))) console2.log("[cyan]--> завершен")
# Task Inference based meta-rl algorithm using Gaussian mixture models and gated Recurrent units (TIGR) import os import numpy as np import click import json import torch import copy from rlkit.envs import ENVS from rlkit.envs.wrappers import NormalizedBoxEnv from rlkit.torch.sac.policies import TanhGaussianPolicy from rlkit.torch.networks import Mlp, FlattenMlp from rlkit.launchers.launcher_util import setup_logger import rlkit.torch.pytorch_util as ptu from configs.default import default_config from tigr.task_inference.prediction_networks import DecoderMDP from tigr.sac import PolicyTrainer from tigr.stacked_replay_buffer import StackedReplayBuffer from tigr.rollout_worker import RolloutCoordinator from tigr.agent_module import Agent, ScriptedPolicyAgent from tigr.training_algorithm import TrainingAlgorithm from tigr.task_inference.true_gmm_inference import DecoupledEncoder from tigr.trainer.true_gmm_trainer import AugmentedTrainer from torch.utils.tensorboard import SummaryWriter import vis_utils.tb_logging as TB def experiment(variant): # optional GPU mode ptu.set_gpu_mode(variant['util_params']['use_gpu'], variant['util_params']['gpu_id']) torch.set_num_threads(1) # Important: Gru and Conv only work with trajectory encoding if variant['algo_params']['encoder_type'] in ['gru'] and variant['algo_params']['encoding_mode'] != 'trajectory': print(f'\nInformation: Setting encoding mode to trajectory since encoder type ' f'"{variant['algo_params']['encoder_type']}" doesn\'t work with ' f'"{variant['algo_params']['encoding_mode']}"!\n') variant['algo_params']['encoding_mode'] = 'trajectory' elif variant['algo_params']['encoder_type'] in ['transformer', 'conv'] and variant['algo_params']['encoding_mode'] != 'transitionSharedY': print(f'\nInformation: Setting encoding mode to trajectory since encoder type ' f'"{variant['algo_params']['encoder_type']}" doesn\'t work with ' f'"{variant['algo_params']['encoding_mode']}"!\n') variant['algo_params']['encoding_mode'] = 'transitionSharedY' # Seeding if(variant['algo_params']['use_fixed_seeding']): torch.manual_seed(variant['algo_params']['seed']) np.random.seed(variant['algo_params']['seed']) # create logging directory experiment_log_dir = setup_logger(variant['env_name'], variant=variant, exp_id=variant['util_params']['exp_name'], base_log_dir=variant['util_params']['base_log_dir'], snapshot_mode='gap', snapshot_gap=variant['algo_params']['snapshot_gap']) # Create tensorboard writer and reset values TB.TENSORBOARD_LOGGER = SummaryWriter(log_dir=os.path.join(experiment_log_dir, 'tensorboard')) TB.LOG_INTERVAL = variant['util_params']['tb_log_interval'] TB.TRAINING_LOG_STEP = 0 TB.AUGMENTATION_LOG_STEP = 0 TB.TI_LOG_STEP = 0 TB.DEBUG_LOG_STEP = 0 # create multi-task environment and sample tasks env = ENVS[variant['env_name']](**variant['env_params']) if variant['env_params']['use_normalized_env']: env = NormalizedBoxEnv(env) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) reward_dim = 1 tasks = list(range(len(env.tasks))) train_tasks = list(range(len(env.train_tasks))) test_tasks = tasks[-variant['env_params']['n_eval_tasks']:] # Dump task dict as json name2number = None if hasattr(env, 'name2number'): name2number = env.name2number with open(os.path.join(experiment_log_dir, 'task_dict.json'), 'w') as f: json.dump(name2number, f) # instantiate networks net_complex_enc_dec = variant['reconstruction_params']['net_complex_enc_dec'] latent_dim = variant['algo_params']['latent_size'] time_steps = variant['algo_params']['time_steps'] num_classes = variant['reconstruction_params']['num_classes'] # encoder used: single transitions or trajectories if variant['algo_params']['encoding_mode'] == 'transitionSharedY': encoder_input_dim = obs_dim + action_dim + reward_dim + obs_dim shared_dim = int(encoder_input_dim * net_complex_enc_dec) # dimension of shared encoder output elif variant['algo_params']['encoding_mode'] == 'trajectory': encoder_input_dim = time_steps * (obs_dim + action_dim + reward_dim + obs_dim) shared_dim = int(encoder_input_dim / time_steps * net_complex_enc_dec) # dimension of shared encoder output else: raise NotImplementedError encoder = DecoupledEncoder( shared_dim, encoder_input_dim, latent_dim, num_classes, time_steps, encoding_mode=variant['algo_params']['encoding_mode'], timestep_combination=variant['algo_params']['timestep_combination'], encoder_type=variant['algo_params']['encoder_type'] ) decoder = DecoderMDP( action_dim, obs_dim, reward_dim, latent_dim, net_complex_enc_dec, variant['env_params']['state_reconstruction_clip'], ) M = variant['algo_params']['sac_layer_size'] qf1 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) qf2 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) target_qf1 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) target_qf2 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) policy = TanhGaussianPolicy( obs_dim=(obs_dim + latent_dim), action_dim=action_dim, latent_dim=latent_dim, hidden_sizes=[M, M, M], ) alpha_net = Mlp( hidden_sizes=[latent_dim * 10], input_size=latent_dim, output_size=1 ) networks = {'encoder': encoder, 'decoder': decoder, 'qf1': qf1, 'qf2': qf2, 'target_qf1': target_qf1, 'target_qf2': target_qf2, 'policy': policy, 'alpha_net': alpha_net} replay_buffer = StackedReplayBuffer( variant['algo_params']['max_replay_buffer_size'], time_steps, obs_dim, action_dim, latent_dim, variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['algo_params']['sampling_mode'] ) replay_buffer_augmented = StackedReplayBuffer( variant['algo_params']['max_replay_buffer_size'], time_steps, obs_dim, action_dim, latent_dim, variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['algo_params']['sampling_mode'] ) # optionally load pre-trained weights if variant['path_to_weights'] is not None: itr = variant['showcase_itr'] path = variant['path_to_weights'] for name, net in networks.items(): try: net.load_state_dict(torch.load(os.path.join(path, name + '_itr_' + str(itr) + '.pth'), map_location='cpu')) except Exception as e: print(f'Loading weights for net {name} failed. Skipping.') print(f'Loaded weights "{variant['path_to_weights']}"') if os.path.exists(os.path.join(variant['path_to_weights'], 'stats_dict.json')): with open(os.path.join(variant['path_to_weights'], 'stats_dict.json'), 'r') as f: # Copy so not both changed during updates d = npify_dict(json.load(f)) replay_buffer.stats_dict = d replay_buffer_augmented.stats_dict = copy.deepcopy(d) else: if variant['algo_params']['use_data_normalization']: raise ValueError('WARNING: No stats dict for replay buffer was found. ' 'Stats dict is required for the algorithm to work properly!') #Agent agent_class = ScriptedPolicyAgent if variant['env_params']['scripted_policy'] else Agent agent = agent_class( encoder, policy ) # Rollout Coordinator rollout_coordinator = RolloutCoordinator( env, variant['env_name'], variant['env_params'], variant['train_or_showcase'], agent, replay_buffer, variant['algo_params']['batch_size_rollout'], time_steps, variant['algo_params']['max_path_length'], variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['util_params']['use_multiprocessing'], variant['algo_params']['use_data_normalization'], variant['util_params']['num_workers'], variant['util_params']['gpu_id'], variant['env_params']['scripted_policy'] ) reconstruction_trainer = AugmentedTrainer( encoder, decoder, replay_buffer, None, variant['algo_params']['batch_size_reconstruction'], num_classes, latent_dim, time_steps, variant['reconstruction_params']['lr_decoder'], variant['reconstruction_params']['lr_encoder'], variant['reconstruction_params']['alpha_kl_z'], variant['reconstruction_params']['beta_euclid'], variant['reconstruction_params']['gamma_sparsity'], variant['reconstruction_params']['regularization_lambda'], variant['reconstruction_params']['use_state_diff'], variant['env_params']['state_reconstruction_clip'], variant['algo_params']['use_data_normalization'], variant['reconstruction_params']['train_val_percent'], variant['reconstruction_params']['eval_interval'], variant['reconstruction_params']['early_stopping_threshold'], experiment_log_dir, variant['reconstruction_params']['use_regularization_loss'], use_PCGrad = variant['PCGrad_params']['use_PCGrad'], PCGrad_option = variant['PCGrad_params']['PCGrad_option'], optimizer_class = torch.optim.Adam, ) # PolicyTrainer policy_trainer = PolicyTrainer( policy, qf1, qf2, target_qf1, target_qf2, alpha_net, encoder, replay_buffer, replay_buffer_augmented, variant['algo_params']['batch_size_policy'], action_dim, 'tree_sampling', variant['algo_params']['use_data_normalization'], use_automatic_entropy_tuning=variant['algo_params']['automatic_entropy_tuning'], target_entropy_factor=variant['algo_params']['target_entropy_factor'], alpha=variant['algo_params']['sac_alpha'], use_PCGrad=variant['PCGrad_params']['use_PCGrad'], PCGrad_option=variant['PCGrad_params']['PCGrad_option'] ) algorithm = TrainingAlgorithm( replay_buffer, replay_buffer_augmented, rollout_coordinator, reconstruction_trainer, policy_trainer, agent, networks, train_tasks, test_tasks, variant['task_distribution'], latent_dim, num_classes, variant['algo_params']['use_data_normalization'], variant['algo_params']['num_train_epochs'], variant['showcase_itr'] if variant['path_to_weights'] is not None else 0, variant['algo_params']['num_training_steps_reconstruction'], variant['algo_params']['num_training_steps_policy'], variant['algo_params']['num_train_tasks_per_episode'], variant['algo_params']['num_transitions_per_episode'], variant['algo_params']['augmented_start_percentage'], variant['algo_params']['augmented_every'], variant['algo_params']['augmented_rollout_length'], variant['algo_params']['augmented_rollout_batch_size'], variant['algo_params']['num_eval_trajectories'], variant['algo_params']['test_evaluation_every'], variant['algo_params']['num_showcase'], experiment_log_dir, name2number ) if ptu.gpu_enabled(): algorithm.to() # debugging triggers a lot of printing and logs to a debug directory DEBUG = variant['util_params']['debug'] PLOT = variant['util_params']['plot'] os.environ['DEBUG'] = str(int(DEBUG)) os.environ['PLOT'] = str(int(PLOT)) # create temp folder if not os.path.exists(variant['reconstruction_params']['temp_folder']): os.makedirs(variant['reconstruction_params']['temp_folder']) # run the algorithm if variant['train_or_showcase'] == 'train': algorithm.train() algorithm.showcase_task_inference() elif variant['train_or_showcase'] == 'showcase_all': algorithm.showcase_all() elif variant['train_or_showcase'] == 'showcase_task_inference': algorithm.showcase_task_inference() elif variant['train_or_showcase'] == 'showcase_non_stationary_env': algorithm.showcase_non_stationary_env() def npify_dict(d: dict): for k, v in d.items(): if type(v) is dict: d[k] = npify_dict(v) else: d[k] = np.asarray(v) return d def deep_update_dict(fr, to): ''' update dict of dicts with new values ''' # assume dicts have same keys for k, v in fr.items(): if type(v) is dict: deep_update_dict(v, to[k]) else: to[k] = v return to @click.command() @click.argument('config', default=None) @click.option('--name', default='') @click.option('--ti_option', default='') @click.option('--gpu', default=None) @click.option('--num_workers', default=None) @click.option('--use_mp', is_flag=True, default=None) def click_main(config, name, ti_option, gpu, use_mp, num_workers): main(config, name, ti_option, gpu, use_mp, num_workers) def main(config=None, name='', ti_option='', gpu=None, use_mp=None, num_workers=None): variant = default_config if config: with open(os.path.join(config)) as f: exp_params = json.load(f) variant = deep_update_dict(exp_params, variant) # Only set values from input if they are actually inputted variant['inference_option'] = variant['inference_option'] if ti_option == '' else ti_option variant['util_params']['exp_name'] = f'{os.path.splitext(os.path.split(config)[1])[0].replace('-', '_') if config is not None else 'default'}_' + variant['inference_option'] + (f'_{name}' if name != '' else f'') variant['util_params']['use_gpu'] = variant['util_params']['use_gpu'] if gpu != '' else False variant['util_params']['gpu_id'] = variant['util_params']['gpu_id'] if gpu is None else gpu variant['util_params']['use_multiprocessing'] = variant['util_params']['use_multiprocessing'] if use_mp is None else use_mp variant['util_params']['num_workers'] = variant['util_params']['num_workers'] if num_workers is None else int(num_workers) experiment(variant) if __name__ == "__main__": click_main()
# Task Inference based meta-rl algorithm using Gaussian mixture models and gated Recurrent units (TIGR) import os import numpy as np import click import json import torch import copy from rlkit.envs import ENVS from rlkit.envs.wrappers import NormalizedBoxEnv from rlkit.torch.sac.policies import TanhGaussianPolicy from rlkit.torch.networks import Mlp, FlattenMlp from rlkit.launchers.launcher_util import setup_logger import rlkit.torch.pytorch_util as ptu from configs.default import default_config from tigr.task_inference.prediction_networks import DecoderMDP from tigr.sac import PolicyTrainer from tigr.stacked_replay_buffer import StackedReplayBuffer from tigr.rollout_worker import RolloutCoordinator from tigr.agent_module import Agent, ScriptedPolicyAgent from tigr.training_algorithm import TrainingAlgorithm from tigr.task_inference.true_gmm_inference import DecoupledEncoder from tigr.trainer.true_gmm_trainer import AugmentedTrainer from torch.utils.tensorboard import SummaryWriter import vis_utils.tb_logging as TB def experiment(variant): # optional GPU mode ptu.set_gpu_mode(variant['util_params']['use_gpu'], variant['util_params']['gpu_id']) torch.set_num_threads(1) # Important: Gru and Conv only work with trajectory encoding if variant['algo_params']['encoder_type'] in ['gru'] and variant['algo_params']['encoding_mode'] != 'trajectory': print(f'\nInformation: Setting encoding mode to trajectory since encoder type ' f'"{variant["algo_params"]["encoder_type"]}" doesn\'t work with ' f'"{variant["algo_params"]["encoding_mode"]}"!\n') variant['algo_params']['encoding_mode'] = 'trajectory' elif variant['algo_params']['encoder_type'] in ['transformer', 'conv'] and variant['algo_params']['encoding_mode'] != 'transitionSharedY': print(f'\nInformation: Setting encoding mode to trajectory since encoder type ' f'"{variant["algo_params"]["encoder_type"]}" doesn\'t work with ' f'"{variant["algo_params"]["encoding_mode"]}"!\n') variant['algo_params']['encoding_mode'] = 'transitionSharedY' # Seeding if(variant['algo_params']['use_fixed_seeding']): torch.manual_seed(variant['algo_params']['seed']) np.random.seed(variant['algo_params']['seed']) # create logging directory experiment_log_dir = setup_logger(variant['env_name'], variant=variant, exp_id=variant['util_params']['exp_name'], base_log_dir=variant['util_params']['base_log_dir'], snapshot_mode='gap', snapshot_gap=variant['algo_params']['snapshot_gap']) # Create tensorboard writer and reset values TB.TENSORBOARD_LOGGER = SummaryWriter(log_dir=os.path.join(experiment_log_dir, 'tensorboard')) TB.LOG_INTERVAL = variant['util_params']['tb_log_interval'] TB.TRAINING_LOG_STEP = 0 TB.AUGMENTATION_LOG_STEP = 0 TB.TI_LOG_STEP = 0 TB.DEBUG_LOG_STEP = 0 # create multi-task environment and sample tasks env = ENVS[variant['env_name']](**variant['env_params']) if variant['env_params']['use_normalized_env']: env = NormalizedBoxEnv(env) obs_dim = int(np.prod(env.observation_space.shape)) action_dim = int(np.prod(env.action_space.shape)) reward_dim = 1 tasks = list(range(len(env.tasks))) train_tasks = list(range(len(env.train_tasks))) test_tasks = tasks[-variant['env_params']['n_eval_tasks']:] # Dump task dict as json name2number = None if hasattr(env, 'name2number'): name2number = env.name2number with open(os.path.join(experiment_log_dir, 'task_dict.json'), 'w') as f: json.dump(name2number, f) # instantiate networks net_complex_enc_dec = variant['reconstruction_params']['net_complex_enc_dec'] latent_dim = variant['algo_params']['latent_size'] time_steps = variant['algo_params']['time_steps'] num_classes = variant['reconstruction_params']['num_classes'] # encoder used: single transitions or trajectories if variant['algo_params']['encoding_mode'] == 'transitionSharedY': encoder_input_dim = obs_dim + action_dim + reward_dim + obs_dim shared_dim = int(encoder_input_dim * net_complex_enc_dec) # dimension of shared encoder output elif variant['algo_params']['encoding_mode'] == 'trajectory': encoder_input_dim = time_steps * (obs_dim + action_dim + reward_dim + obs_dim) shared_dim = int(encoder_input_dim / time_steps * net_complex_enc_dec) # dimension of shared encoder output else: raise NotImplementedError encoder = DecoupledEncoder( shared_dim, encoder_input_dim, latent_dim, num_classes, time_steps, encoding_mode=variant['algo_params']['encoding_mode'], timestep_combination=variant['algo_params']['timestep_combination'], encoder_type=variant['algo_params']['encoder_type'] ) decoder = DecoderMDP( action_dim, obs_dim, reward_dim, latent_dim, net_complex_enc_dec, variant['env_params']['state_reconstruction_clip'], ) M = variant['algo_params']['sac_layer_size'] qf1 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) qf2 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) target_qf1 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) target_qf2 = FlattenMlp( input_size=(obs_dim + latent_dim) + action_dim, output_size=1, hidden_sizes=[M, M, M], ) policy = TanhGaussianPolicy( obs_dim=(obs_dim + latent_dim), action_dim=action_dim, latent_dim=latent_dim, hidden_sizes=[M, M, M], ) alpha_net = Mlp( hidden_sizes=[latent_dim * 10], input_size=latent_dim, output_size=1 ) networks = {'encoder': encoder, 'decoder': decoder, 'qf1': qf1, 'qf2': qf2, 'target_qf1': target_qf1, 'target_qf2': target_qf2, 'policy': policy, 'alpha_net': alpha_net} replay_buffer = StackedReplayBuffer( variant['algo_params']['max_replay_buffer_size'], time_steps, obs_dim, action_dim, latent_dim, variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['algo_params']['sampling_mode'] ) replay_buffer_augmented = StackedReplayBuffer( variant['algo_params']['max_replay_buffer_size'], time_steps, obs_dim, action_dim, latent_dim, variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['algo_params']['sampling_mode'] ) # optionally load pre-trained weights if variant['path_to_weights'] is not None: itr = variant['showcase_itr'] path = variant['path_to_weights'] for name, net in networks.items(): try: net.load_state_dict(torch.load(os.path.join(path, name + '_itr_' + str(itr) + '.pth'), map_location='cpu')) except Exception as e: print(f'Loading weights for net {name} failed. Skipping.') print(f'Loaded weights "{variant["path_to_weights"]}"') if os.path.exists(os.path.join(variant['path_to_weights'], 'stats_dict.json')): with open(os.path.join(variant['path_to_weights'], 'stats_dict.json'), 'r') as f: # Copy so not both changed during updates d = npify_dict(json.load(f)) replay_buffer.stats_dict = d replay_buffer_augmented.stats_dict = copy.deepcopy(d) else: if variant['algo_params']['use_data_normalization']: raise ValueError('WARNING: No stats dict for replay buffer was found. ' 'Stats dict is required for the algorithm to work properly!') #Agent agent_class = ScriptedPolicyAgent if variant['env_params']['scripted_policy'] else Agent agent = agent_class( encoder, policy ) # Rollout Coordinator rollout_coordinator = RolloutCoordinator( env, variant['env_name'], variant['env_params'], variant['train_or_showcase'], agent, replay_buffer, variant['algo_params']['batch_size_rollout'], time_steps, variant['algo_params']['max_path_length'], variant['algo_params']['permute_samples'], variant['algo_params']['encoding_mode'], variant['util_params']['use_multiprocessing'], variant['algo_params']['use_data_normalization'], variant['util_params']['num_workers'], variant['util_params']['gpu_id'], variant['env_params']['scripted_policy'] ) reconstruction_trainer = AugmentedTrainer( encoder, decoder, replay_buffer, None, variant['algo_params']['batch_size_reconstruction'], num_classes, latent_dim, time_steps, variant['reconstruction_params']['lr_decoder'], variant['reconstruction_params']['lr_encoder'], variant['reconstruction_params']['alpha_kl_z'], variant['reconstruction_params']['beta_euclid'], variant['reconstruction_params']['gamma_sparsity'], variant['reconstruction_params']['regularization_lambda'], variant['reconstruction_params']['use_state_diff'], variant['env_params']['state_reconstruction_clip'], variant['algo_params']['use_data_normalization'], variant['reconstruction_params']['train_val_percent'], variant['reconstruction_params']['eval_interval'], variant['reconstruction_params']['early_stopping_threshold'], experiment_log_dir, variant['reconstruction_params']['use_regularization_loss'], use_PCGrad = variant['PCGrad_params']['use_PCGrad'], PCGrad_option = variant['PCGrad_params']['PCGrad_option'], optimizer_class = torch.optim.Adam, ) # PolicyTrainer policy_trainer = PolicyTrainer( policy, qf1, qf2, target_qf1, target_qf2, alpha_net, encoder, replay_buffer, replay_buffer_augmented, variant['algo_params']['batch_size_policy'], action_dim, 'tree_sampling', variant['algo_params']['use_data_normalization'], use_automatic_entropy_tuning=variant['algo_params']['automatic_entropy_tuning'], target_entropy_factor=variant['algo_params']['target_entropy_factor'], alpha=variant['algo_params']['sac_alpha'], use_PCGrad=variant['PCGrad_params']['use_PCGrad'], PCGrad_option=variant['PCGrad_params']['PCGrad_option'] ) algorithm = TrainingAlgorithm( replay_buffer, replay_buffer_augmented, rollout_coordinator, reconstruction_trainer, policy_trainer, agent, networks, train_tasks, test_tasks, variant['task_distribution'], latent_dim, num_classes, variant['algo_params']['use_data_normalization'], variant['algo_params']['num_train_epochs'], variant['showcase_itr'] if variant['path_to_weights'] is not None else 0, variant['algo_params']['num_training_steps_reconstruction'], variant['algo_params']['num_training_steps_policy'], variant['algo_params']['num_train_tasks_per_episode'], variant['algo_params']['num_transitions_per_episode'], variant['algo_params']['augmented_start_percentage'], variant['algo_params']['augmented_every'], variant['algo_params']['augmented_rollout_length'], variant['algo_params']['augmented_rollout_batch_size'], variant['algo_params']['num_eval_trajectories'], variant['algo_params']['test_evaluation_every'], variant['algo_params']['num_showcase'], experiment_log_dir, name2number ) if ptu.gpu_enabled(): algorithm.to() # debugging triggers a lot of printing and logs to a debug directory DEBUG = variant['util_params']['debug'] PLOT = variant['util_params']['plot'] os.environ['DEBUG'] = str(int(DEBUG)) os.environ['PLOT'] = str(int(PLOT)) # create temp folder if not os.path.exists(variant['reconstruction_params']['temp_folder']): os.makedirs(variant['reconstruction_params']['temp_folder']) # run the algorithm if variant['train_or_showcase'] == 'train': algorithm.train() algorithm.showcase_task_inference() elif variant['train_or_showcase'] == 'showcase_all': algorithm.showcase_all() elif variant['train_or_showcase'] == 'showcase_task_inference': algorithm.showcase_task_inference() elif variant['train_or_showcase'] == 'showcase_non_stationary_env': algorithm.showcase_non_stationary_env() def npify_dict(d: dict): for k, v in d.items(): if type(v) is dict: d[k] = npify_dict(v) else: d[k] = np.asarray(v) return d def deep_update_dict(fr, to): ''' update dict of dicts with new values ''' # assume dicts have same keys for k, v in fr.items(): if type(v) is dict: deep_update_dict(v, to[k]) else: to[k] = v return to @click.command() @click.argument('config', default=None) @click.option('--name', default='') @click.option('--ti_option', default='') @click.option('--gpu', default=None) @click.option('--num_workers', default=None) @click.option('--use_mp', is_flag=True, default=None) def click_main(config, name, ti_option, gpu, use_mp, num_workers): main(config, name, ti_option, gpu, use_mp, num_workers) def main(config=None, name='', ti_option='', gpu=None, use_mp=None, num_workers=None): variant = default_config if config: with open(os.path.join(config)) as f: exp_params = json.load(f) variant = deep_update_dict(exp_params, variant) # Only set values from input if they are actually inputted variant['inference_option'] = variant['inference_option'] if ti_option == '' else ti_option variant['util_params']['exp_name'] = f'{os.path.splitext(os.path.split(config)[1])[0].replace("-", "_") if config is not None else "default"}_' + variant['inference_option'] + (f'_{name}' if name != '' else f'') variant['util_params']['use_gpu'] = variant['util_params']['use_gpu'] if gpu != '' else False variant['util_params']['gpu_id'] = variant['util_params']['gpu_id'] if gpu is None else gpu variant['util_params']['use_multiprocessing'] = variant['util_params']['use_multiprocessing'] if use_mp is None else use_mp variant['util_params']['num_workers'] = variant['util_params']['num_workers'] if num_workers is None else int(num_workers) experiment(variant) if __name__ == "__main__": click_main()
# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import dataclasses import functools import typing from typing import List import tenacity from mergify_engine import constants from mergify_engine import context from mergify_engine import exceptions from mergify_engine import github_types from mergify_engine import gitter from mergify_engine.clients import http from mergify_engine.dashboard.user_tokens import UserTokensUser @dataclasses.dataclass class DuplicateAlreadyExists(Exception): reason: str @dataclasses.dataclass class DuplicateUnexpectedError(Exception): reason: str @dataclasses.dataclass class DuplicateNeedRetry(exceptions.EngineNeedRetry): reason: str @dataclasses.dataclass class DuplicateNotNeeded(Exception): reason: str @dataclasses.dataclass class DuplicateFailed(Exception): reason: str @dataclasses.dataclass class DuplicateWithMergeFailure(Exception): reason: str GIT_MESSAGE_TO_EXCEPTION = { "Updates were rejected because the tip of your current branch is behind": DuplicateNeedRetry, "Aborting commit due to empty commit message": DuplicateNotNeeded, "reference already exists": DuplicateAlreadyExists, "You may want to first integrate the remote changes": DuplicateAlreadyExists, "is a merge but no -m option was given": DuplicateWithMergeFailure, "is not a commit and a branch": DuplicateFailed, "couldn't find remote ref": DuplicateFailed, } @functools.total_ordering class CommitOrderingKey: def __init__(self, obj: github_types.CachedGitHubBranchCommit) -> None: self.obj = obj @staticmethod def order_commit( c1: github_types.CachedGitHubBranchCommit, c2: github_types.CachedGitHubBranchCommit, ) -> int: if c1["sha"] == c2["sha"]: return 0 for p_sha in c1["parents"]: if c2["sha"] == p_sha: return 1 return -1 def __lt__(self, other: "CommitOrderingKey") -> bool: return ( isinstance(other, CommitOrderingKey) and self.order_commit(self.obj, other.obj) < 0 ) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, CommitOrderingKey) and self.order_commit(self.obj, other.obj) == 0 ) def is_base_branch_merge_commit( commit: github_types.CachedGitHubBranchCommit, base_branch: github_types.GitHubRefType, ) -> bool: return ( commit["commit_message"].startswith(f"Merge branch '{base_branch}'") and len(commit["parents"]) == 2 ) async def _get_commits_without_base_branch_merge( ctxt: context.Context, ) -> typing.List[github_types.CachedGitHubBranchCommit]: base_branch = ctxt.pull["base"]["ref"] return list( filter( lambda c: not is_base_branch_merge_commit(c, base_branch), sorted(await ctxt.commits, key=CommitOrderingKey), ) ) async def _get_commits_to_cherrypick( ctxt: context.Context, merge_commit: github_types.CachedGitHubBranchCommit ) -> typing.List[github_types.CachedGitHubBranchCommit]: if len(merge_commit["parents"]) == 1: # NOTE(sileht): We have a rebase+merge or squash+merge # We pick all commits until a sha is not linked with our PR out_commits: typing.List[github_types.CachedGitHubBranchCommit] = [] commit = merge_commit while True: if len(commit["parents"]) != 1: # NOTE(sileht): What is that? A merge here? ctxt.log.error("unhandled commit structure") return [] out_commits.insert(0, commit) parent_commit_sha = commit["parents"][0] pull_numbers = [ p["number"] async for p in ctxt.client.items( f"{ctxt.base_url}/commits/{parent_commit_sha}/pulls", api_version="groot", ) if ( p["base"]["repo"]["full_name"] == ctxt.pull["base"]["repo"]["full_name"] ) ] # Head repo can be None if deleted in the meantime if ctxt.pull["head"]["repo"] is not None: pull_numbers += [ p["number"] async for p in ctxt.client.items( f"/repos/{ctxt.pull["head"]["repo"]["full_name"]}/commits/{parent_commit_sha}/pulls", api_version="groot", ) if ( p["base"]["repo"]["full_name"] == ctxt.pull["base"]["repo"]["full_name"] ) ] if ctxt.pull["number"] not in pull_numbers: if len(out_commits) == 1: ctxt.log.debug( "Pull requests merged with one commit rebased, or squashed", ) else: ctxt.log.debug("Pull requests merged after rebase") return out_commits # Prepare next iteration commit = github_types.to_cached_github_branch_commit( typing.cast( github_types.GitHubBranchCommit, await ctxt.client.item( f"{ctxt.base_url}/commits/{parent_commit_sha}" ), ) ) elif len(merge_commit["parents"]) == 2: ctxt.log.debug("Pull request merged with merge commit") return await _get_commits_without_base_branch_merge(ctxt) elif len(merge_commit["parents"]) >= 3: raise DuplicateFailed("merge commit with more than 2 parents are unsupported") else: raise RuntimeError("merge commit with no parents") KindT = typing.Literal["backport", "copy"] def get_destination_branch_name( pull_number: github_types.GitHubPullRequestNumber, branch_name: github_types.GitHubRefType, branch_prefix: str, ) -> str: return f"mergify/{branch_prefix}/{branch_name}/pr-{pull_number}" @tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.2), stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_if_exception_type(DuplicateNeedRetry), reraise=True, ) async def duplicate( ctxt: context.Context, branch_name: github_types.GitHubRefType, *, title_template: str, body_template: str, bot_account: typing.Optional[github_types.GitHubLogin] = None, labels: typing.Optional[List[str]] = None, label_conflicts: typing.Optional[str] = None, ignore_conflicts: bool = False, assignees: typing.Optional[List[str]] = None, branch_prefix: str = "bp", ) -> typing.Optional[github_types.GitHubPullRequest]: """Duplicate a pull request. :param pull: The pull request. :type pull: py:class:mergify_engine.context.Context :param title_template: The pull request title template. :param body_template: The pull request body template. :param branch: The branch to copy to. :param labels: The list of labels to add to the created PR. :param label_conflicts: The label to add to the created PR when cherry-pick failed. :param ignore_conflicts: Whether to commit the result if the cherry-pick fails. :param assignees: The list of users to be assigned to the created PR. :param branch_prefix: the prefix of the temporary created branch """ bp_branch = get_destination_branch_name( ctxt.pull["number"], branch_name, branch_prefix ) cherry_pick_error: str = "" bot_account_user: typing.Optional[UserTokensUser] = None if bot_account is not None: user_tokens = await ctxt.repository.installation.get_user_tokens() bot_account_user = user_tokens.get_token_for(bot_account) if not bot_account_user: raise DuplicateFailed( f"User `{bot_account}` is unknown. " f"Please make sure `{bot_account}` has logged in Mergify dashboard." ) # TODO(sileht): This can be done with the Github API only I think: # An example: # https://github.com/shiqiyang-okta/ghpick/blob/master/ghpick/cherry.py git = gitter.Gitter(ctxt.log) try: await git.init() if bot_account_user is None: token = await ctxt.client.get_access_token() await git.configure() username = "x-access-token" password = token else: await git.configure( bot_account_user["name"] or bot_account_user["login"], bot_account_user["email"], ) username = bot_account_user["oauth_access_token"] password = "" # nosec await git.setup_remote("origin", ctxt.pull["base"]["repo"], username, password) await git("fetch", "--quiet", "origin", f"pull/{ctxt.pull["number"]}/head") await git("fetch", "--quiet", "origin", ctxt.pull["base"]["ref"]) await git("fetch", "--quiet", "origin", branch_name) await git("checkout", "--quiet", "-b", bp_branch, f"origin/{branch_name}") merge_commit = github_types.to_cached_github_branch_commit( typing.cast( github_types.GitHubBranchCommit, await ctxt.client.item( f"{ctxt.base_url}/commits/{ctxt.pull["merge_commit_sha"]}" ), ) ) for commit in await _get_commits_to_cherrypick(ctxt, merge_commit): # FIXME(sileht): Github does not allow to fetch only one commit # So we have to fetch the branch since the commit date ... # git("fetch", "origin", "%s:refs/remotes/origin/%s-commit" % # (commit["sha"], commit["sha"]) # ) # last_commit_date = commit["commit"]["committer"]["date"] # git("fetch", "origin", ctxt.pull["base"]["ref"], # "--shallow-since='%s'" % last_commit_date) try: await git("cherry-pick", "-x", commit["sha"]) except ( gitter.GitAuthenticationFailure, gitter.GitErrorRetriable, gitter.GitFatalError, ): raise except gitter.GitError as e: # pragma: no cover for message in GIT_MESSAGE_TO_EXCEPTION.keys(): if message in e.output: raise ctxt.log.info("fail to cherry-pick %s: %s", commit["sha"], e.output) output = await git("status") cherry_pick_error += f"Cherry-pick of {commit["sha"]} has failed:\n```\n{output}```\n\n\n" if not ignore_conflicts: raise DuplicateFailed(cherry_pick_error) await git("add", "*") await git("commit", "-a", "--no-edit", "--allow-empty") await git("push", "origin", bp_branch) except gitter.GitMergifyNamespaceConflict as e: raise DuplicateUnexpectedError( "`Mergify uses `mergify/...` namespace for creating temporary branches. " "A branch of your repository is conflicting with this namespace\n" f"```\n{e.output}\n```\n" ) except gitter.GitAuthenticationFailure as e: if bot_account_user is None: # Need to get a new token raise DuplicateNeedRetry( f"Git reported the following error:\n```\n{e.output}\n```\n" ) else: raise DuplicateUnexpectedError( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitErrorRetriable as e: raise DuplicateNeedRetry( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitFatalError as e: raise DuplicateUnexpectedError( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitError as e: # pragma: no cover for message, out_exception in GIT_MESSAGE_TO_EXCEPTION.items(): if message in e.output: raise out_exception( f"Git reported the following error:\n```\n{e.output}\n```\n" ) ctxt.log.error( "duplicate pull failed", output=e.output, returncode=e.returncode, exc_info=True, ) raise DuplicateUnexpectedError(e.output) finally: await git.cleanup() if cherry_pick_error: cherry_pick_error += ( "To fix up this pull request, you can check it out locally. " "See documentation: " "https://docs.github.com/en/github/" "collaborating-with-pull-requests/reviewing-changes-in-pull-requests/checking-out-pull-requests-locally" ) try: title = await ctxt.pull_request.render_template( title_template, extra_variables={"destination_branch": branch_name}, ) except context.RenderTemplateFailure as rmf: raise DuplicateFailed(f"Invalid title message: {rmf}") try: body = await ctxt.pull_request.render_template( body_template, extra_variables={ "destination_branch": branch_name, "cherry_pick_error": cherry_pick_error, }, ) except context.RenderTemplateFailure as rmf: raise DuplicateFailed(f"Invalid title message: {rmf}") try: duplicate_pr = typing.cast( github_types.GitHubPullRequest, ( await ctxt.client.post( f"{ctxt.base_url}/pulls", json={ "title": title, "body": body + "\n\n---\n\n" + constants.MERGIFY_PULL_REQUEST_DOC, "base": branch_name, "head": bp_branch, }, oauth_token=bot_account_user["oauth_access_token"] if bot_account_user else None, ) ).json(), ) except http.HTTPClientSideError as e: if e.status_code == 422: if "No commits between" in e.message: if cherry_pick_error: raise DuplicateFailed(cherry_pick_error) else: raise DuplicateNotNeeded(e.message) elif "A pull request already exists" in e.message: raise DuplicateAlreadyExists(e.message) raise effective_labels = [] if labels is not None: effective_labels.extend(labels) if cherry_pick_error and label_conflicts is not None: effective_labels.append(label_conflicts) if len(effective_labels) > 0: await ctxt.client.post( f"{ctxt.base_url}/issues/{duplicate_pr["number"]}/labels", json={"labels": effective_labels}, ) if assignees is not None and len(assignees) > 0: # NOTE(sileht): we don't have to deal with invalid assignees as GitHub # just ignore them and always return 201 await ctxt.client.post( f"{ctxt.base_url}/issues/{duplicate_pr["number"]}/assignees", json={"assignees": assignees}, ) return duplicate_pr
# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import dataclasses import functools import typing from typing import List import tenacity from mergify_engine import constants from mergify_engine import context from mergify_engine import exceptions from mergify_engine import github_types from mergify_engine import gitter from mergify_engine.clients import http from mergify_engine.dashboard.user_tokens import UserTokensUser @dataclasses.dataclass class DuplicateAlreadyExists(Exception): reason: str @dataclasses.dataclass class DuplicateUnexpectedError(Exception): reason: str @dataclasses.dataclass class DuplicateNeedRetry(exceptions.EngineNeedRetry): reason: str @dataclasses.dataclass class DuplicateNotNeeded(Exception): reason: str @dataclasses.dataclass class DuplicateFailed(Exception): reason: str @dataclasses.dataclass class DuplicateWithMergeFailure(Exception): reason: str GIT_MESSAGE_TO_EXCEPTION = { "Updates were rejected because the tip of your current branch is behind": DuplicateNeedRetry, "Aborting commit due to empty commit message": DuplicateNotNeeded, "reference already exists": DuplicateAlreadyExists, "You may want to first integrate the remote changes": DuplicateAlreadyExists, "is a merge but no -m option was given": DuplicateWithMergeFailure, "is not a commit and a branch": DuplicateFailed, "couldn't find remote ref": DuplicateFailed, } @functools.total_ordering class CommitOrderingKey: def __init__(self, obj: github_types.CachedGitHubBranchCommit) -> None: self.obj = obj @staticmethod def order_commit( c1: github_types.CachedGitHubBranchCommit, c2: github_types.CachedGitHubBranchCommit, ) -> int: if c1["sha"] == c2["sha"]: return 0 for p_sha in c1["parents"]: if c2["sha"] == p_sha: return 1 return -1 def __lt__(self, other: "CommitOrderingKey") -> bool: return ( isinstance(other, CommitOrderingKey) and self.order_commit(self.obj, other.obj) < 0 ) def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, CommitOrderingKey) and self.order_commit(self.obj, other.obj) == 0 ) def is_base_branch_merge_commit( commit: github_types.CachedGitHubBranchCommit, base_branch: github_types.GitHubRefType, ) -> bool: return ( commit["commit_message"].startswith(f"Merge branch '{base_branch}'") and len(commit["parents"]) == 2 ) async def _get_commits_without_base_branch_merge( ctxt: context.Context, ) -> typing.List[github_types.CachedGitHubBranchCommit]: base_branch = ctxt.pull["base"]["ref"] return list( filter( lambda c: not is_base_branch_merge_commit(c, base_branch), sorted(await ctxt.commits, key=CommitOrderingKey), ) ) async def _get_commits_to_cherrypick( ctxt: context.Context, merge_commit: github_types.CachedGitHubBranchCommit ) -> typing.List[github_types.CachedGitHubBranchCommit]: if len(merge_commit["parents"]) == 1: # NOTE(sileht): We have a rebase+merge or squash+merge # We pick all commits until a sha is not linked with our PR out_commits: typing.List[github_types.CachedGitHubBranchCommit] = [] commit = merge_commit while True: if len(commit["parents"]) != 1: # NOTE(sileht): What is that? A merge here? ctxt.log.error("unhandled commit structure") return [] out_commits.insert(0, commit) parent_commit_sha = commit["parents"][0] pull_numbers = [ p["number"] async for p in ctxt.client.items( f"{ctxt.base_url}/commits/{parent_commit_sha}/pulls", api_version="groot", ) if ( p["base"]["repo"]["full_name"] == ctxt.pull["base"]["repo"]["full_name"] ) ] # Head repo can be None if deleted in the meantime if ctxt.pull["head"]["repo"] is not None: pull_numbers += [ p["number"] async for p in ctxt.client.items( f"/repos/{ctxt.pull['head']['repo']['full_name']}/commits/{parent_commit_sha}/pulls", api_version="groot", ) if ( p["base"]["repo"]["full_name"] == ctxt.pull["base"]["repo"]["full_name"] ) ] if ctxt.pull["number"] not in pull_numbers: if len(out_commits) == 1: ctxt.log.debug( "Pull requests merged with one commit rebased, or squashed", ) else: ctxt.log.debug("Pull requests merged after rebase") return out_commits # Prepare next iteration commit = github_types.to_cached_github_branch_commit( typing.cast( github_types.GitHubBranchCommit, await ctxt.client.item( f"{ctxt.base_url}/commits/{parent_commit_sha}" ), ) ) elif len(merge_commit["parents"]) == 2: ctxt.log.debug("Pull request merged with merge commit") return await _get_commits_without_base_branch_merge(ctxt) elif len(merge_commit["parents"]) >= 3: raise DuplicateFailed("merge commit with more than 2 parents are unsupported") else: raise RuntimeError("merge commit with no parents") KindT = typing.Literal["backport", "copy"] def get_destination_branch_name( pull_number: github_types.GitHubPullRequestNumber, branch_name: github_types.GitHubRefType, branch_prefix: str, ) -> str: return f"mergify/{branch_prefix}/{branch_name}/pr-{pull_number}" @tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.2), stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_if_exception_type(DuplicateNeedRetry), reraise=True, ) async def duplicate( ctxt: context.Context, branch_name: github_types.GitHubRefType, *, title_template: str, body_template: str, bot_account: typing.Optional[github_types.GitHubLogin] = None, labels: typing.Optional[List[str]] = None, label_conflicts: typing.Optional[str] = None, ignore_conflicts: bool = False, assignees: typing.Optional[List[str]] = None, branch_prefix: str = "bp", ) -> typing.Optional[github_types.GitHubPullRequest]: """Duplicate a pull request. :param pull: The pull request. :type pull: py:class:mergify_engine.context.Context :param title_template: The pull request title template. :param body_template: The pull request body template. :param branch: The branch to copy to. :param labels: The list of labels to add to the created PR. :param label_conflicts: The label to add to the created PR when cherry-pick failed. :param ignore_conflicts: Whether to commit the result if the cherry-pick fails. :param assignees: The list of users to be assigned to the created PR. :param branch_prefix: the prefix of the temporary created branch """ bp_branch = get_destination_branch_name( ctxt.pull["number"], branch_name, branch_prefix ) cherry_pick_error: str = "" bot_account_user: typing.Optional[UserTokensUser] = None if bot_account is not None: user_tokens = await ctxt.repository.installation.get_user_tokens() bot_account_user = user_tokens.get_token_for(bot_account) if not bot_account_user: raise DuplicateFailed( f"User `{bot_account}` is unknown. " f"Please make sure `{bot_account}` has logged in Mergify dashboard." ) # TODO(sileht): This can be done with the Github API only I think: # An example: # https://github.com/shiqiyang-okta/ghpick/blob/master/ghpick/cherry.py git = gitter.Gitter(ctxt.log) try: await git.init() if bot_account_user is None: token = await ctxt.client.get_access_token() await git.configure() username = "x-access-token" password = token else: await git.configure( bot_account_user["name"] or bot_account_user["login"], bot_account_user["email"], ) username = bot_account_user["oauth_access_token"] password = "" # nosec await git.setup_remote("origin", ctxt.pull["base"]["repo"], username, password) await git("fetch", "--quiet", "origin", f"pull/{ctxt.pull['number']}/head") await git("fetch", "--quiet", "origin", ctxt.pull["base"]["ref"]) await git("fetch", "--quiet", "origin", branch_name) await git("checkout", "--quiet", "-b", bp_branch, f"origin/{branch_name}") merge_commit = github_types.to_cached_github_branch_commit( typing.cast( github_types.GitHubBranchCommit, await ctxt.client.item( f"{ctxt.base_url}/commits/{ctxt.pull['merge_commit_sha']}" ), ) ) for commit in await _get_commits_to_cherrypick(ctxt, merge_commit): # FIXME(sileht): Github does not allow to fetch only one commit # So we have to fetch the branch since the commit date ... # git("fetch", "origin", "%s:refs/remotes/origin/%s-commit" % # (commit["sha"], commit["sha"]) # ) # last_commit_date = commit["commit"]["committer"]["date"] # git("fetch", "origin", ctxt.pull["base"]["ref"], # "--shallow-since='%s'" % last_commit_date) try: await git("cherry-pick", "-x", commit["sha"]) except ( gitter.GitAuthenticationFailure, gitter.GitErrorRetriable, gitter.GitFatalError, ): raise except gitter.GitError as e: # pragma: no cover for message in GIT_MESSAGE_TO_EXCEPTION.keys(): if message in e.output: raise ctxt.log.info("fail to cherry-pick %s: %s", commit["sha"], e.output) output = await git("status") cherry_pick_error += f"Cherry-pick of {commit['sha']} has failed:\n```\n{output}```\n\n\n" if not ignore_conflicts: raise DuplicateFailed(cherry_pick_error) await git("add", "*") await git("commit", "-a", "--no-edit", "--allow-empty") await git("push", "origin", bp_branch) except gitter.GitMergifyNamespaceConflict as e: raise DuplicateUnexpectedError( "`Mergify uses `mergify/...` namespace for creating temporary branches. " "A branch of your repository is conflicting with this namespace\n" f"```\n{e.output}\n```\n" ) except gitter.GitAuthenticationFailure as e: if bot_account_user is None: # Need to get a new token raise DuplicateNeedRetry( f"Git reported the following error:\n```\n{e.output}\n```\n" ) else: raise DuplicateUnexpectedError( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitErrorRetriable as e: raise DuplicateNeedRetry( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitFatalError as e: raise DuplicateUnexpectedError( f"Git reported the following error:\n```\n{e.output}\n```\n" ) except gitter.GitError as e: # pragma: no cover for message, out_exception in GIT_MESSAGE_TO_EXCEPTION.items(): if message in e.output: raise out_exception( f"Git reported the following error:\n```\n{e.output}\n```\n" ) ctxt.log.error( "duplicate pull failed", output=e.output, returncode=e.returncode, exc_info=True, ) raise DuplicateUnexpectedError(e.output) finally: await git.cleanup() if cherry_pick_error: cherry_pick_error += ( "To fix up this pull request, you can check it out locally. " "See documentation: " "https://docs.github.com/en/github/" "collaborating-with-pull-requests/reviewing-changes-in-pull-requests/checking-out-pull-requests-locally" ) try: title = await ctxt.pull_request.render_template( title_template, extra_variables={"destination_branch": branch_name}, ) except context.RenderTemplateFailure as rmf: raise DuplicateFailed(f"Invalid title message: {rmf}") try: body = await ctxt.pull_request.render_template( body_template, extra_variables={ "destination_branch": branch_name, "cherry_pick_error": cherry_pick_error, }, ) except context.RenderTemplateFailure as rmf: raise DuplicateFailed(f"Invalid title message: {rmf}") try: duplicate_pr = typing.cast( github_types.GitHubPullRequest, ( await ctxt.client.post( f"{ctxt.base_url}/pulls", json={ "title": title, "body": body + "\n\n---\n\n" + constants.MERGIFY_PULL_REQUEST_DOC, "base": branch_name, "head": bp_branch, }, oauth_token=bot_account_user["oauth_access_token"] if bot_account_user else None, ) ).json(), ) except http.HTTPClientSideError as e: if e.status_code == 422: if "No commits between" in e.message: if cherry_pick_error: raise DuplicateFailed(cherry_pick_error) else: raise DuplicateNotNeeded(e.message) elif "A pull request already exists" in e.message: raise DuplicateAlreadyExists(e.message) raise effective_labels = [] if labels is not None: effective_labels.extend(labels) if cherry_pick_error and label_conflicts is not None: effective_labels.append(label_conflicts) if len(effective_labels) > 0: await ctxt.client.post( f"{ctxt.base_url}/issues/{duplicate_pr['number']}/labels", json={"labels": effective_labels}, ) if assignees is not None and len(assignees) > 0: # NOTE(sileht): we don't have to deal with invalid assignees as GitHub # just ignore them and always return 201 await ctxt.client.post( f"{ctxt.base_url}/issues/{duplicate_pr['number']}/assignees", json={"assignees": assignees}, ) return duplicate_pr
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """train FCN8s.""" import argparse import os import subprocess _CACHE_DATA_URL = "./cache/data" _CACHE_TRAIN_URL = "./cache/train" def _parse_args(): parser = argparse.ArgumentParser('mindspore FCN8s training') # url for modelarts parser.add_argument('--data_url', type=str, default='', help='Url for modelarts') parser.add_argument('--train_url', type=str, default='', help='Url for modelarts') # dataset parser.add_argument('--crop_size', type=int, default=512, help='crop_size') parser.add_argument('--ignore_label', type=int, default=255, help='ignore label') parser.add_argument('--num_classes', type=int, default=21, help='number of classes') parser.add_argument('--model', type=str, default='FCN8s', help='select model') parser.add_argument('--train_batch_size', type=int, default=32, help='train batch size') parser.add_argument('--min_scale', type=float, default=0.5, help='min scales of train') parser.add_argument('--max_scale', type=float, default=2.0, help='max scales of train') parser.add_argument('--data_file', type=str, default='vocaug_mindrecords/voctrain.mindrecord0', help='path of mindrecords') # optimizer parser.add_argument('--train_epochs', type=int, default=500, help='train epoch') parser.add_argument('--base_lr', type=float, default=0.015, help='base lr') parser.add_argument('--loss_scale', type=float, default=1024, help='loss scales') # model parser.add_argument('--ckpt_vgg16', type=str, default='', help='backbone pretrain') parser.add_argument('--ckpt_pre_trained', type=str, default='', help='model pretrain') parser.add_argument('--save_steps', type=int, default=330, help='steps interval for saving') parser.add_argument('--keep_checkpoint_max', type=int, default=5, help='max checkpoint for saving') parser.add_argument('--ckpt_dir', type=str, default='', help='where ckpts saved') # train parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'], help='device id of GPU or Ascend. (Default: Ascend)') parser.add_argument('--file_name', type=str, default='fcn8s', help='export file name') parser.add_argument('--file_format', type=str, default="AIR", choices=['AIR', 'MINDIR'], help='export model type') parser.add_argument('--filter_weight', type=str, default=False, help="filter weight") args, _ = parser.parse_known_args() return args def _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained): train_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "train.py") data_file = os.path.join(data_url, args.data_file) cmd = ["python", train_file, f"--output_path={os.path.abspath(train_url)}", f"--data_path={os.path.abspath(data_url)}", f"--crop_size={args.crop_size}", f"--ignore_label={args.ignore_label}", f"--num_classes={args.num_classes}", f"--model={args.model}", f"--train_batch_size={args.train_batch_size}", f"--min_scale={args.min_scale}", f"--max_scale={args.max_scale}", f"--data_file={data_file}", f"--train_epochs={args.train_epochs}", f"--base_lr={args.base_lr}", f"--loss_scale={args.loss_scale}", f"--ckpt_vgg16={ckpt_vgg16}", f"--ckpt_pre_trained={ckpt_pre_trained}", f"--save_steps={args.save_steps}", f"--keep_checkpoint_max={args.keep_checkpoint_max}", f"--ckpt_dir={os.path.abspath(train_url)}", f"--filter_weight={args.filter_weight}", f"--device_target={args.device_target}"] print(' '.join(cmd)) process = subprocess.Popen(cmd, shell=False) return process.wait() def _get_last_ckpt(ckpt_dir): ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir) if ckpt_file.endswith('.ckpt')] if not ckpt_files: print("No ckpt file found.") return None return os.path.join(ckpt_dir, sorted(ckpt_files)[-1]) def _export_air(args, ckpt_dir): ckpt_file = _get_last_ckpt(ckpt_dir) if not ckpt_file: return export_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "export.py") file_name = os.path.join(ckpt_dir, args.file_name) cmd = ["python", export_file, f"--file_format={args.file_format}", f"--ckpt_file={ckpt_file}", f"--num_classes={args.num_classes}", f"--file_name={file_name}", f"--device_target={args.device_target}"] print(f"Start exporting AIR, cmd = {" ".join(cmd)}.") process = subprocess.Popen(cmd, shell=False) process.wait() def main(): args = _parse_args() try: import moxing as mox os.makedirs(_CACHE_TRAIN_URL, exist_ok=True) os.makedirs(_CACHE_DATA_URL, exist_ok=True) mox.file.copy_parallel(args.data_url, _CACHE_DATA_URL) data_url = _CACHE_DATA_URL train_url = _CACHE_TRAIN_URL ckpt_vgg16 = os.path.join(data_url, args.ckpt_vgg16) \ if args.ckpt_vgg16 else "" ckpt_pre_trained = os.path.join(data_url, args.ckpt_pre_trained) \ if args.ckpt_pre_trained else "" ret = _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained) _export_air(args, train_url) mox.file.copy_parallel(_CACHE_TRAIN_URL, args.train_url) except ModuleNotFoundError: train_url = args.train_url data_url = args.data_url ckpt_pre_trained = args.ckpt_pre_trained ret = _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained) _export_air(args, train_url) if ret != 0: exit(1) if __name__ == '__main__': main()
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 3.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """train FCN8s.""" import argparse import os import subprocess _CACHE_DATA_URL = "./cache/data" _CACHE_TRAIN_URL = "./cache/train" def _parse_args(): parser = argparse.ArgumentParser('mindspore FCN8s training') # url for modelarts parser.add_argument('--data_url', type=str, default='', help='Url for modelarts') parser.add_argument('--train_url', type=str, default='', help='Url for modelarts') # dataset parser.add_argument('--crop_size', type=int, default=512, help='crop_size') parser.add_argument('--ignore_label', type=int, default=255, help='ignore label') parser.add_argument('--num_classes', type=int, default=21, help='number of classes') parser.add_argument('--model', type=str, default='FCN8s', help='select model') parser.add_argument('--train_batch_size', type=int, default=32, help='train batch size') parser.add_argument('--min_scale', type=float, default=0.5, help='min scales of train') parser.add_argument('--max_scale', type=float, default=2.0, help='max scales of train') parser.add_argument('--data_file', type=str, default='vocaug_mindrecords/voctrain.mindrecord0', help='path of mindrecords') # optimizer parser.add_argument('--train_epochs', type=int, default=500, help='train epoch') parser.add_argument('--base_lr', type=float, default=0.015, help='base lr') parser.add_argument('--loss_scale', type=float, default=1024, help='loss scales') # model parser.add_argument('--ckpt_vgg16', type=str, default='', help='backbone pretrain') parser.add_argument('--ckpt_pre_trained', type=str, default='', help='model pretrain') parser.add_argument('--save_steps', type=int, default=330, help='steps interval for saving') parser.add_argument('--keep_checkpoint_max', type=int, default=5, help='max checkpoint for saving') parser.add_argument('--ckpt_dir', type=str, default='', help='where ckpts saved') # train parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'], help='device id of GPU or Ascend. (Default: Ascend)') parser.add_argument('--file_name', type=str, default='fcn8s', help='export file name') parser.add_argument('--file_format', type=str, default="AIR", choices=['AIR', 'MINDIR'], help='export model type') parser.add_argument('--filter_weight', type=str, default=False, help="filter weight") args, _ = parser.parse_known_args() return args def _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained): train_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "train.py") data_file = os.path.join(data_url, args.data_file) cmd = ["python", train_file, f"--output_path={os.path.abspath(train_url)}", f"--data_path={os.path.abspath(data_url)}", f"--crop_size={args.crop_size}", f"--ignore_label={args.ignore_label}", f"--num_classes={args.num_classes}", f"--model={args.model}", f"--train_batch_size={args.train_batch_size}", f"--min_scale={args.min_scale}", f"--max_scale={args.max_scale}", f"--data_file={data_file}", f"--train_epochs={args.train_epochs}", f"--base_lr={args.base_lr}", f"--loss_scale={args.loss_scale}", f"--ckpt_vgg16={ckpt_vgg16}", f"--ckpt_pre_trained={ckpt_pre_trained}", f"--save_steps={args.save_steps}", f"--keep_checkpoint_max={args.keep_checkpoint_max}", f"--ckpt_dir={os.path.abspath(train_url)}", f"--filter_weight={args.filter_weight}", f"--device_target={args.device_target}"] print(' '.join(cmd)) process = subprocess.Popen(cmd, shell=False) return process.wait() def _get_last_ckpt(ckpt_dir): ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir) if ckpt_file.endswith('.ckpt')] if not ckpt_files: print("No ckpt file found.") return None return os.path.join(ckpt_dir, sorted(ckpt_files)[-1]) def _export_air(args, ckpt_dir): ckpt_file = _get_last_ckpt(ckpt_dir) if not ckpt_file: return export_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "export.py") file_name = os.path.join(ckpt_dir, args.file_name) cmd = ["python", export_file, f"--file_format={args.file_format}", f"--ckpt_file={ckpt_file}", f"--num_classes={args.num_classes}", f"--file_name={file_name}", f"--device_target={args.device_target}"] print(f"Start exporting AIR, cmd = {' '.join(cmd)}.") process = subprocess.Popen(cmd, shell=False) process.wait() def main(): args = _parse_args() try: import moxing as mox os.makedirs(_CACHE_TRAIN_URL, exist_ok=True) os.makedirs(_CACHE_DATA_URL, exist_ok=True) mox.file.copy_parallel(args.data_url, _CACHE_DATA_URL) data_url = _CACHE_DATA_URL train_url = _CACHE_TRAIN_URL ckpt_vgg16 = os.path.join(data_url, args.ckpt_vgg16) \ if args.ckpt_vgg16 else "" ckpt_pre_trained = os.path.join(data_url, args.ckpt_pre_trained) \ if args.ckpt_pre_trained else "" ret = _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained) _export_air(args, train_url) mox.file.copy_parallel(_CACHE_TRAIN_URL, args.train_url) except ModuleNotFoundError: train_url = args.train_url data_url = args.data_url ckpt_pre_trained = args.ckpt_pre_trained ret = _train(args, train_url, data_url, ckpt_vgg16, ckpt_pre_trained) _export_air(args, train_url) if ret != 0: exit(1) if __name__ == '__main__': main()
import codecs from functools import partial import json import os from typing import Any, List, Match, Tuple, Union # pylint: disable=unused-import from urllib.parse import urljoin, urlencode, quote as urlquote from jinja2 import Environment, PackageLoader, select_autoescape, Markup, escape import thor from redbot import __version__ from redbot.formatter import Formatter, relative_time, f_num from redbot.resource import HttpResource from redbot.speak import Note, levels, categories # pylint: disable=unused-import nl = "\n" def unicode_url_escape(url: str, safe: str) -> str: """ URL escape a unicode string. Assume that anything already encoded is to be left alone. """ # also include "~" because it doesn't need to be encoded, # but Python does anyway :/ return urlquote(url, safe + r"%~") uri_gen_delims = r":/?#[]@" uri_sub_delims = r"!$&'()*+,;=" e_url = partial(unicode_url_escape, safe=uri_gen_delims + uri_sub_delims) e_authority = partial(unicode_url_escape, safe=uri_sub_delims + r"[]:@") e_path = partial(unicode_url_escape, safe=uri_sub_delims + r":@/") e_path_seg = partial(unicode_url_escape, safe=uri_sub_delims + r":@") e_query = partial(unicode_url_escape, safe=uri_sub_delims + r":@/?") e_query_arg = partial(unicode_url_escape, safe=r"!$'()*+,:@/?") e_fragment = partial(unicode_url_escape, safe=r"!$&'()*+,;:@=/?") def e_js(instr: str) -> Markup: """ Make sure instr is safe for writing into a double-quoted JavaScript string. """ if not instr: return Markup("") instr = instr.replace("\\", "\\\\") instr = instr.replace('"', r"\"") instr = instr.replace("<", r"\x3c") return Markup(instr) class BaseHtmlFormatter(Formatter): """ Base class for HTML formatters.""" media_type = "text/html" def __init__(self, *args: Any, **kw: Any) -> None: Formatter.__init__(self, *args, **kw) self.templates = Environment( loader=PackageLoader("redbot.formatter"), trim_blocks=True, autoescape=select_autoescape( enabled_extensions=("html", "xml"), default_for_string=True, ), ) self.templates.filters.update( { "f_num": f_num, "relative_time": relative_time, "redbot_link": self.redbot_link, } ) self.templates.globals.update( { "formatter": self, "version": __version__, "baseuri": self.config["ui_uri"], "static": self.config["static_root"], "hcaptcha": self.config.get("hcaptcha_sitekey", "") != "" and self.config.get("hcaptcha_secret", "") != "", } ) self.start = thor.time() def feed(self, chunk: bytes) -> None: pass def start_output(self) -> None: if self.resource: uri = self.resource.request.uri or "" req_headers = self.resource.request.headers else: uri = "" req_headers = [] extra_title = " <span class='save'>" if self.kw.get("is_saved", None): extra_title += " saved " if self.resource and self.resource.check_name != "default": extra_title += f"{escape(self.resource.check_name)} response" extra_title += "</span>" extra_body_class = "" if self.kw.get("is_blank", None): extra_body_class = "blank" descend = "" if self.kw.get("descend", False): descend = "&descend=True" tpl = self.templates.get_template("response_start.html") self.output( tpl.render( html_uri=uri, test_id=self.kw.get("test_id", ""), config=Markup( json.dumps( { "redbot_uri": e_js(uri), "redbot_req_hdrs": req_headers, "redbot_version": __version__, "hcaptcha_sitekey": self.config.get( "hcaptcha_sitekey", None ), }, ensure_ascii=True, ).replace("<", "\\u003c") ), extra_js=self.format_extra(".js"), extra_title=Markup(extra_title), extra_body_class=extra_body_class, descend=descend, ) ) def finish_output(self) -> None: """ The bottom bits. """ self.output(self.format_extra()) tpl = self.templates.get_template("footer.html") self.output(tpl.render()) def error_output(self, message: str) -> None: """ Something bad happened. """ self.output(f"<p class='error'>{message}</p>") tpl = self.templates.get_template("footer.html") self.output(tpl.render(baseuri=self.config["ui_uri"])) def status(self, message: str) -> None: "Update the status bar of the browser" self.output( f""" <script> <!-- {thor.time() - self.start:3.3f} document.querySelector('#red_status').textContent = "{escape(message)}" --> </script> """ ) def debug(self, message: str) -> None: "Debug to console." self.output( f""" <script> <!-- console.log("{thor.time() - self.start:3.3f} {e_js(message)}"); --> </script> """ ) def final_status(self) -> None: # See issue #51 # self.status("REDbot made %(reqs)s requests in %(elapse)2.3f seconds." % { # 'reqs': fetch.total_requests, self.status("") self.output( f""" <div id="final_status">{thor.time() - self.start:2.2f} seconds</div> """ ) def format_extra(self, etype: str = ".html") -> Markup: """ Show extra content from the extra_dir, if any. MUST be UTF-8. Type controls the extension included; currently supported: - '.html': shown only on start page, after input block - '.js': javascript block (with script tag surrounding) included on every page view. """ o = [] if self.config.get("extra_dir", "") and os.path.isdir(self.config["extra_dir"]): extra_files = [ p for p in os.listdir(self.config["extra_dir"]) if os.path.splitext(p)[1] == etype ] for extra_file in extra_files: extra_path = os.path.join(self.config["extra_dir"], extra_file) try: o.append( codecs.open( extra_path, mode="r", encoding="utf-8", # type: ignore errors="replace", ).read() ) except IOError as why: o.append(f"<!-- error opening {extra_file}: {why} -->") return Markup(nl.join(o)) def redbot_link( self, link_value: str, link: str = None, check_name: str = None, res_format: str = None, use_stored: bool = True, descend: bool = False, referer: bool = False, css_class: str = "", title: str = "", ) -> Markup: """ Format an HTML form to refer to another REDbot resource. If it can be, it will be linked with a GET; otherwise a POST form will be written. "link_value" is the link text. "link" is the resource to test; it is evaluated relative to the current context If blank, it is the same resource. "check_name" is the request type to show; see active_check/__init__.py. If not specified, that of the current context will be used. "res_format" is the response format; see formatter/*.py. If not specified, HTML will be used. If "use_stored" is true, we'll refer to the test_id, rather than make a new request. If 'referer" is true, we'll strip any existing Referer and add our own. "css_class" adds css classes; 'title' adds a title. Request headers are copied over from the current context. """ uri = self.resource.request.uri args = [] # type: List[Tuple[str, str]] if use_stored and self.kw.get("test_id", None): args.append(("id", self.kw["test_id"])) if check_name: args.append(("check_name", check_name)) elif self.resource.check_name is not None: args.append(("check_name", self.resource.check_name)) return Markup( f"<a href='?{urlencode(args, doseq=True)}' class='{css_class}' title='{title}'>{link_value}</a>" ) else: args.append(("uri", urljoin(uri, link or ""))) for k, v in self.resource.request.headers: if referer and k.lower() == "referer": continue args.append(("req_hdr", f"{k}:{v}")) if referer: args.append(("req_hdr", f"Referer:{uri}")) if check_name: args.append(("check_name", check_name)) elif self.resource.check_name is not None: args.append(("check_name", self.resource.check_name)) if res_format: args.append(("format", res_format)) if descend: args.append(("descend", "1")) argstring = "".join( f"""<input type='hidden' name='{arg[0]}" value="{arg[1].replace(""", """)}' />""" for arg in args ) return Markup( f"""\ <form class="link" action="" method="POST"><input type="submit" value="{link_value}" class="post_link {css_class}" title="{title}" />{argstring}</form>""" )
import codecs from functools import partial import json import os from typing import Any, List, Match, Tuple, Union # pylint: disable=unused-import from urllib.parse import urljoin, urlencode, quote as urlquote from jinja2 import Environment, PackageLoader, select_autoescape, Markup, escape import thor from redbot import __version__ from redbot.formatter import Formatter, relative_time, f_num from redbot.resource import HttpResource from redbot.speak import Note, levels, categories # pylint: disable=unused-import nl = "\n" def unicode_url_escape(url: str, safe: str) -> str: """ URL escape a unicode string. Assume that anything already encoded is to be left alone. """ # also include "~" because it doesn't need to be encoded, # but Python does anyway :/ return urlquote(url, safe + r"%~") uri_gen_delims = r":/?#[]@" uri_sub_delims = r"!$&'()*+,;=" e_url = partial(unicode_url_escape, safe=uri_gen_delims + uri_sub_delims) e_authority = partial(unicode_url_escape, safe=uri_sub_delims + r"[]:@") e_path = partial(unicode_url_escape, safe=uri_sub_delims + r":@/") e_path_seg = partial(unicode_url_escape, safe=uri_sub_delims + r":@") e_query = partial(unicode_url_escape, safe=uri_sub_delims + r":@/?") e_query_arg = partial(unicode_url_escape, safe=r"!$'()*+,:@/?") e_fragment = partial(unicode_url_escape, safe=r"!$&'()*+,;:@=/?") def e_js(instr: str) -> Markup: """ Make sure instr is safe for writing into a double-quoted JavaScript string. """ if not instr: return Markup("") instr = instr.replace("\\", "\\\\") instr = instr.replace('"', r"\"") instr = instr.replace("<", r"\x3c") return Markup(instr) class BaseHtmlFormatter(Formatter): """ Base class for HTML formatters.""" media_type = "text/html" def __init__(self, *args: Any, **kw: Any) -> None: Formatter.__init__(self, *args, **kw) self.templates = Environment( loader=PackageLoader("redbot.formatter"), trim_blocks=True, autoescape=select_autoescape( enabled_extensions=("html", "xml"), default_for_string=True, ), ) self.templates.filters.update( { "f_num": f_num, "relative_time": relative_time, "redbot_link": self.redbot_link, } ) self.templates.globals.update( { "formatter": self, "version": __version__, "baseuri": self.config["ui_uri"], "static": self.config["static_root"], "hcaptcha": self.config.get("hcaptcha_sitekey", "") != "" and self.config.get("hcaptcha_secret", "") != "", } ) self.start = thor.time() def feed(self, chunk: bytes) -> None: pass def start_output(self) -> None: if self.resource: uri = self.resource.request.uri or "" req_headers = self.resource.request.headers else: uri = "" req_headers = [] extra_title = " <span class='save'>" if self.kw.get("is_saved", None): extra_title += " saved " if self.resource and self.resource.check_name != "default": extra_title += f"{escape(self.resource.check_name)} response" extra_title += "</span>" extra_body_class = "" if self.kw.get("is_blank", None): extra_body_class = "blank" descend = "" if self.kw.get("descend", False): descend = "&descend=True" tpl = self.templates.get_template("response_start.html") self.output( tpl.render( html_uri=uri, test_id=self.kw.get("test_id", ""), config=Markup( json.dumps( { "redbot_uri": e_js(uri), "redbot_req_hdrs": req_headers, "redbot_version": __version__, "hcaptcha_sitekey": self.config.get( "hcaptcha_sitekey", None ), }, ensure_ascii=True, ).replace("<", "\\u003c") ), extra_js=self.format_extra(".js"), extra_title=Markup(extra_title), extra_body_class=extra_body_class, descend=descend, ) ) def finish_output(self) -> None: """ The bottom bits. """ self.output(self.format_extra()) tpl = self.templates.get_template("footer.html") self.output(tpl.render()) def error_output(self, message: str) -> None: """ Something bad happened. """ self.output(f"<p class='error'>{message}</p>") tpl = self.templates.get_template("footer.html") self.output(tpl.render(baseuri=self.config["ui_uri"])) def status(self, message: str) -> None: "Update the status bar of the browser" self.output( f""" <script> <!-- {thor.time() - self.start:3.3f} document.querySelector('#red_status').textContent = "{escape(message)}" --> </script> """ ) def debug(self, message: str) -> None: "Debug to console." self.output( f""" <script> <!-- console.log("{thor.time() - self.start:3.3f} {e_js(message)}"); --> </script> """ ) def final_status(self) -> None: # See issue #51 # self.status("REDbot made %(reqs)s requests in %(elapse)2.3f seconds." % { # 'reqs': fetch.total_requests, self.status("") self.output( f""" <div id="final_status">{thor.time() - self.start:2.2f} seconds</div> """ ) def format_extra(self, etype: str = ".html") -> Markup: """ Show extra content from the extra_dir, if any. MUST be UTF-8. Type controls the extension included; currently supported: - '.html': shown only on start page, after input block - '.js': javascript block (with script tag surrounding) included on every page view. """ o = [] if self.config.get("extra_dir", "") and os.path.isdir(self.config["extra_dir"]): extra_files = [ p for p in os.listdir(self.config["extra_dir"]) if os.path.splitext(p)[1] == etype ] for extra_file in extra_files: extra_path = os.path.join(self.config["extra_dir"], extra_file) try: o.append( codecs.open( extra_path, mode="r", encoding="utf-8", # type: ignore errors="replace", ).read() ) except IOError as why: o.append(f"<!-- error opening {extra_file}: {why} -->") return Markup(nl.join(o)) def redbot_link( self, link_value: str, link: str = None, check_name: str = None, res_format: str = None, use_stored: bool = True, descend: bool = False, referer: bool = False, css_class: str = "", title: str = "", ) -> Markup: """ Format an HTML form to refer to another REDbot resource. If it can be, it will be linked with a GET; otherwise a POST form will be written. "link_value" is the link text. "link" is the resource to test; it is evaluated relative to the current context If blank, it is the same resource. "check_name" is the request type to show; see active_check/__init__.py. If not specified, that of the current context will be used. "res_format" is the response format; see formatter/*.py. If not specified, HTML will be used. If "use_stored" is true, we'll refer to the test_id, rather than make a new request. If 'referer" is true, we'll strip any existing Referer and add our own. "css_class" adds css classes; 'title' adds a title. Request headers are copied over from the current context. """ uri = self.resource.request.uri args = [] # type: List[Tuple[str, str]] if use_stored and self.kw.get("test_id", None): args.append(("id", self.kw["test_id"])) if check_name: args.append(("check_name", check_name)) elif self.resource.check_name is not None: args.append(("check_name", self.resource.check_name)) return Markup( f"<a href='?{urlencode(args, doseq=True)}' class='{css_class}' title='{title}'>{link_value}</a>" ) else: args.append(("uri", urljoin(uri, link or ""))) for k, v in self.resource.request.headers: if referer and k.lower() == "referer": continue args.append(("req_hdr", f"{k}:{v}")) if referer: args.append(("req_hdr", f"Referer:{uri}")) if check_name: args.append(("check_name", check_name)) elif self.resource.check_name is not None: args.append(("check_name", self.resource.check_name)) if res_format: args.append(("format", res_format)) if descend: args.append(("descend", "1")) argstring = "".join( f"""<input type='hidden' name='{arg[0]}' value='{arg[1].replace("'", '"')}' />""" for arg in args ) return Markup( f"""\ <form class="link" action="" method="POST"><input type="submit" value="{link_value}" class="post_link {css_class}" title="{title}" />{argstring}</form>""" )
import logging import clamd import sys from rest_framework.exceptions import ValidationError from django.conf import settings logger = logging.getLogger(__name__) def file_scan_validation(file): """ This validator sends the file to ClamAV for scanning and returns returns to the form. By default, if antivirus service is not available or there are errors, the validation will fail. Usage: class UploadForm(forms.Form): file = forms.FileField(validators=[file_scan_validation]) :param file: :return: """ logger.debug("starting file scanning with clamav") if not settings.CLAMAV_ENABLED: logger.warning('File scanning has been disabled.') return # make sure we're at the beginning of the file stream file.seek(0) # we're just going to assume a network connection to clamav here .. no local unix socket support scanner = clamd.ClamdNetworkSocket(settings.CLAMAV_HOST, settings.CLAMAV_PORT) try: result = scanner.instream(file) except Exception: # it doesn't really matter what the actual error is .. log it and raise validation error logger.error('Error occurred while trying to scan file. %s', sys.exc_info()[0]) raise ValidationError('Unable to scan file.', code='scanerror') finally: # reset file stream file.seek(0) if result and result['stream'][0] == 'FOUND': logger.warning('Virus found: %s', file.name) raise ValidationError('Infected file found.', code='infected') def valid_file_extension(file): extension = file.name.split('.')[-1] if extension.lower() not in ['pdf', 'png', 'gif', 'jpg', 'jpe', 'jpeg']: raise ValidationError(f'File type not supported: {extension}') def valid_doc_type(value): valid_codes = ['AAI', 'AFDO', 'AFTL', 'CSA', 'EFSS1', 'EFSS2', 'MC', 'NCV', 'OFI', 'RDP'] if value.upper() not in valid_codes: raise ValidationError(f'Doc type not supported: {value}. Valid codes: {', '.join(valid_codes)}') def valid_rotation(value): if value % 90 != 0: raise ValidationError('Rotation must be 0, 90, 180, or 270')
import logging import clamd import sys from rest_framework.exceptions import ValidationError from django.conf import settings logger = logging.getLogger(__name__) def file_scan_validation(file): """ This validator sends the file to ClamAV for scanning and returns returns to the form. By default, if antivirus service is not available or there are errors, the validation will fail. Usage: class UploadForm(forms.Form): file = forms.FileField(validators=[file_scan_validation]) :param file: :return: """ logger.debug("starting file scanning with clamav") if not settings.CLAMAV_ENABLED: logger.warning('File scanning has been disabled.') return # make sure we're at the beginning of the file stream file.seek(0) # we're just going to assume a network connection to clamav here .. no local unix socket support scanner = clamd.ClamdNetworkSocket(settings.CLAMAV_HOST, settings.CLAMAV_PORT) try: result = scanner.instream(file) except Exception: # it doesn't really matter what the actual error is .. log it and raise validation error logger.error('Error occurred while trying to scan file. %s', sys.exc_info()[0]) raise ValidationError('Unable to scan file.', code='scanerror') finally: # reset file stream file.seek(0) if result and result['stream'][0] == 'FOUND': logger.warning('Virus found: %s', file.name) raise ValidationError('Infected file found.', code='infected') def valid_file_extension(file): extension = file.name.split('.')[-1] if extension.lower() not in ['pdf', 'png', 'gif', 'jpg', 'jpe', 'jpeg']: raise ValidationError(f'File type not supported: {extension}') def valid_doc_type(value): valid_codes = ['AAI', 'AFDO', 'AFTL', 'CSA', 'EFSS1', 'EFSS2', 'MC', 'NCV', 'OFI', 'RDP'] if value.upper() not in valid_codes: raise ValidationError(f'Doc type not supported: {value}. Valid codes: {", ".join(valid_codes)}') def valid_rotation(value): if value % 90 != 0: raise ValidationError('Rotation must be 0, 90, 180, or 270')
#!/usr/bin/python3 import json import re from pathlib import Path from textwrap import TextWrapper from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union import eth_abi from eth_hash.auto import keccak from eth_utils import remove_0x_prefix from hexbytes import HexBytes from brownie._config import ARGV, CONFIG from brownie.convert.datatypes import Wei from brownie.convert.normalize import format_input, format_output from brownie.convert.utils import get_type_strings from brownie.exceptions import ( ContractExists, ContractNotFound, UndeployedLibrary, VirtualMachineError, ) from brownie.project import ethpm from brownie.typing import AccountsType, TransactionReceiptType from brownie.utils import color from . import accounts, rpc from .event import _get_topics from .rpc import _revert_register from .state import _add_contract, _find_contract, _remove_contract from .web3 import _resolve_address, web3 __tracebackhide__ = True class _ContractBase: _dir_color = "bright magenta" def __init__(self, project: Any, build: Dict, name: str, abi: List) -> None: self._project = project self._build = build self._name = name self.abi = abi self.topics = _get_topics(abi) self.signatures = dict((i["name"], _selector(i)) for i in abi if i["type"] == "function") def info(self) -> None: """ Display NatSpec documentation for this contract. """ _print_natspec(self._build["natspec"]) def get_method(self, calldata: str) -> Optional[str]: sig = calldata[:10].lower() return next((k for k, v in self.signatures.items() if v == sig), None) class ContractContainer(_ContractBase): """List-like container class that holds all Contract instances of the same type, and is used to deploy new instances of that contract. Attributes: abi: Complete contract ABI. bytecode: Bytecode used to deploy the contract. signatures: Dictionary of {'function name': "bytes4 signature"} topics: Dictionary of {'event name': "bytes32 topic"}""" def __init__(self, project: Any, build: Dict) -> None: self.tx = None self.bytecode = build["bytecode"] self._contracts: List["ProjectContract"] = [] super().__init__(project, build, build["contractName"], build["abi"]) self.deploy = ContractConstructor(self, self._name) _revert_register(self) def __iter__(self) -> Iterator: return iter(self._contracts) def __getitem__(self, i: Any) -> "ProjectContract": return self._contracts[i] def __delitem__(self, key: Any) -> None: item = self._contracts[key] self.remove(item) def __len__(self) -> int: return len(self._contracts) def __repr__(self) -> str: return str(self._contracts) def _reset(self) -> None: for contract in self._contracts: contract._delete_deployment() _remove_contract(contract) contract._reverted = True self._contracts.clear() def _revert(self, height: int) -> None: reverted = [ i for i in self._contracts if (i.tx and i.tx.block_number > height) or len(web3.eth.getCode(i.address).hex()) <= 4 ] for contract in reverted: self.remove(contract) contract._reverted = True def remove(self, contract: "ProjectContract") -> None: """Removes a contract from the container. Args: contract: Contract instance of address string of the contract.""" if contract not in self._contracts: raise TypeError("Object is not in container.") self._contracts.remove(contract) contract._delete_deployment() _remove_contract(contract) def at( self, address: str, owner: Optional[AccountsType] = None, tx: Optional[TransactionReceiptType] = None, ) -> "ProjectContract": """Returns a contract address. Raises ValueError if no bytecode exists at the address. Args: address: Address string of the contract. owner: Default Account instance to send contract transactions from. tx: Transaction ID of the contract creation.""" contract = _find_contract(address) if contract: if contract._name == self._name and contract._project == self._project: return contract raise ContractExists( f"'{contract._name}' declared at {address} in project '{contract._project._name}'" ) if _verify_deployed_code(address, self._build["deployedBytecode"]): contract = ProjectContract(self._project, self._build, address, owner, tx) else: contract = Contract(self._name, address, self.abi, owner=owner) contract._project = self._project contract._save_deployment() _add_contract(contract) self._contracts.append(contract) return contract def _add_from_tx(self, tx: TransactionReceiptType) -> None: tx._confirmed.wait() if tx.status: self.at(tx.contract_address, tx.sender, tx) class ContractConstructor: _dir_color = "bright magenta" def __init__(self, parent: "ContractContainer", name: str) -> None: self._parent = parent try: self.abi = next(i for i in parent.abi if i["type"] == "constructor") self.abi["name"] = "constructor" except Exception: self.abi = {"inputs": [], "name": "constructor", "type": "constructor"} self._name = name def __repr__(self) -> str: return f"<{type(self).__name__} '{self._name}.constructor({_inputs(self.abi)})'>" def __call__(self, *args: Tuple) -> Union["Contract", TransactionReceiptType]: """Deploys a contract. Args: *args: Constructor arguments. The last argument MUST be a dictionary of transaction values containing at minimum a 'from' key to specify which account to deploy this contract from. Returns: * Contract instance if the transaction confirms * TransactionReceipt if the transaction is pending or reverts""" args, tx = _get_tx(None, args) if not tx["from"]: raise AttributeError( "No deployer address given. You must supply a tx dict" " with a 'from' field as the last argument." ) return tx["from"].deploy( self._parent, *args, amount=tx["value"], gas_limit=tx["gas"], gas_price=tx["gasPrice"] ) def encode_input(self, *args: tuple) -> str: bytecode = self._parent.bytecode # find and replace unlinked library pointers in bytecode for marker in re.findall("_{1,}[^_]*_{1,}", bytecode): library = marker.strip("_") if not self._parent._project[library]: raise UndeployedLibrary( f"Contract requires '{library}' library, but it has not been deployed yet" ) address = self._parent._project[library][-1].address[-40:] bytecode = bytecode.replace(marker, address) data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return bytecode + eth_abi.encode_abi(types_list, data).hex() class _DeployedContractBase(_ContractBase): """Methods for interacting with a deployed contract. Each public contract method is available as a ContractCall or ContractTx instance, created when this class is instantiated. Attributes: bytecode: Bytecode of the deployed contract, including constructor args. tx: TransactionReceipt of the of the tx that deployed the contract.""" _reverted = False def __init__( self, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None ) -> None: address = _resolve_address(address) self.bytecode = web3.eth.getCode(address).hex()[2:] if not self.bytecode: raise ContractNotFound(f"No contract deployed at {address}") self._owner = owner self.tx = tx self.address = address fn_names = [i["name"] for i in self.abi if i["type"] == "function"] for abi in [i for i in self.abi if i["type"] == "function"]: name = f"{self._name}.{abi["name"]}" sig = _signature(abi) natspec: Dict = {} if "natspec" in self._build: natspec = self._build["natspec"]["methods"].get(sig, {}) if fn_names.count(abi["name"]) == 1: fn = _get_method_object(address, abi, name, owner, natspec) self._check_and_set(abi["name"], fn) continue if not hasattr(self, abi["name"]): overloaded = OverloadedMethod(address, name, owner) self._check_and_set(abi["name"], overloaded) key = ",".join(i["type"] for i in abi["inputs"]).replace("256", "") fn = _get_method_object(address, abi, name, owner, natspec) getattr(self, abi["name"]).methods[key] = fn def _check_and_set(self, name: str, obj: Any) -> None: if hasattr(self, name): raise AttributeError(f"Namespace collision: '{self._name}.{name}'") setattr(self, name, obj) def __hash__(self) -> int: return hash(f"{self._name}{self.address}{self._project}") def __str__(self) -> str: return self.address def __repr__(self) -> str: return f"<{self._name} Contract "{color("bright magenta")}{self.address}{color}'>" def __eq__(self, other: object) -> bool: if isinstance(other, _DeployedContractBase): return self.address == other.address and self.bytecode == other.bytecode if isinstance(other, str): try: address = _resolve_address(other) return address == self.address except ValueError: return False return super().__eq__(other) def __getattribute__(self, name: str) -> Any: if super().__getattribute__("_reverted"): raise ContractNotFound("This contract no longer exists.") return super().__getattribute__(name) def balance(self) -> Wei: """Returns the current ether balance of the contract, in wei.""" balance = web3.eth.getBalance(self.address) return Wei(balance) def _deployment_path(self) -> Optional[Path]: if not CONFIG["active_network"].get("persist", None) or not self._project._path: return None network = CONFIG["active_network"]["name"] path = self._project._path.joinpath(f"build/deployments/{network}") path.mkdir(exist_ok=True) return path.joinpath(f"{self.address}.json") def _save_deployment(self) -> None: path = self._deployment_path() if path and not path.exists(): with path.open("w") as fp: json.dump(self._build, fp) def _delete_deployment(self) -> None: path = self._deployment_path() if path and path.exists(): path.unlink() class Contract(_DeployedContractBase): def __init__( self, name: str, address: Optional[str] = None, abi: Optional[List] = None, manifest_uri: Optional[str] = None, owner: Optional[AccountsType] = None, ) -> None: if manifest_uri and abi: raise ValueError("Contract requires either abi or manifest_uri, but not both") if manifest_uri is not None: manifest = ethpm.get_manifest(manifest_uri) abi = manifest["contract_types"][name]["abi"] if address is None: address_list = ethpm.get_deployment_addresses(manifest, name) if not address_list: raise ContractNotFound( f"'{manifest["package_name"]}' manifest does not contain" f" a deployment of '{name}' on this chain" ) if len(address_list) > 1: raise ValueError( f"'{manifest["package_name"]}' manifest contains more than one " f"deployment of '{name}' on this chain, you must specify an address:" f" {", ".join(address_list)}" ) address = address_list[0] name = manifest["contract_types"][name]["contract_name"] elif not address: raise TypeError("Address cannot be None unless creating object from manifest") build = {"abi": abi, "contractName": name, "type": "contract"} _ContractBase.__init__(self, None, build, name, abi) # type: ignore _DeployedContractBase.__init__(self, address, owner, None) contract = _find_contract(address) if not contract: return if contract.bytecode != self.bytecode: contract._reverted = True class ProjectContract(_DeployedContractBase): """Methods for interacting with a deployed contract as part of a Brownie project.""" def __init__( self, project: Any, build: Dict, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None, ) -> None: _ContractBase.__init__(self, project, build, build["contractName"], build["abi"]) _DeployedContractBase.__init__(self, address, owner, tx) class OverloadedMethod: def __init__(self, address: str, name: str, owner: Optional[AccountsType]): self._address = address self._name = name self._owner = owner self.methods: Dict = {} def __getitem__(self, key: Union[Tuple, str]) -> "_ContractMethod": if isinstance(key, tuple): key = ",".join(key) key = key.replace("256", "").replace(", ", ",") return self.methods[key] def __repr__(self) -> str: return f"<OverloadedMethod '{self._name}'>" def __len__(self) -> int: return len(self.methods) class _ContractMethod: _dir_color = "bright magenta" def __init__( self, address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Optional[Dict] = None, ) -> None: self._address = address self._name = name self.abi = abi self._owner = owner self.signature = _selector(abi) self.natspec = natspec or {} def __repr__(self) -> str: if "payable" in self.abi: pay_bool = self.abi["payable"] else: pay_bool = self.abi["stateMutability"] == "payable" pay = "payable " if pay_bool else "" return f"<{type(self).__name__} {pay}"{self.abi["name"]}({_inputs(self.abi)})'>" def info(self) -> None: """ Display NatSpec documentation for this method. """ print(f"{self.abi["name"]}({_inputs(self.abi)})") _print_natspec(self.natspec) def call(self, *args: Tuple) -> Any: """Calls the contract method without broadcasting a transaction. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: Contract method return value(s).""" args, tx = _get_tx(self._owner, args) if tx["from"]: tx["from"] = str(tx["from"]) tx.update({"to": self._address, "data": self.encode_input(*args)}) try: data = web3.eth.call(dict((k, v) for k, v in tx.items() if v)) except ValueError as e: raise VirtualMachineError(e) from None return self.decode_output(data) def transact(self, *args: Tuple) -> TransactionReceiptType: """Broadcasts a transaction that calls this contract method. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: TransactionReceipt instance.""" args, tx = _get_tx(self._owner, args) if not tx["from"]: raise AttributeError( "Contract has no owner, you must supply a tx dict" " with a 'from' field as the last argument." ) return tx["from"].transfer( self._address, tx["value"], gas_limit=tx["gas"], gas_price=tx["gasPrice"], data=self.encode_input(*args), ) def encode_input(self, *args: Tuple) -> str: """Returns encoded ABI data to call the method with the given arguments. Args: *args: Contract method inputs Returns: Hexstring of encoded ABI data.""" data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return self.signature + eth_abi.encode_abi(types_list, data).hex() def decode_output(self, hexstr: str) -> Tuple: """Decodes hexstring data returned by this method. Args: hexstr: Hexstring of returned call data Returns: Decoded values.""" types_list = get_type_strings(self.abi["outputs"]) result = eth_abi.decode_abi(types_list, HexBytes(hexstr)) result = format_output(self.abi, result) if len(result) == 1: result = result[0] return result class ContractTx(_ContractMethod): """A public payable or non-payable contract method. Args: abi: Contract ABI specific to this method. signature: Bytes4 method signature.""" def __call__(self, *args: Tuple) -> TransactionReceiptType: """Broadcasts a transaction that calls this contract method. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: TransactionReceipt instance.""" return self.transact(*args) class ContractCall(_ContractMethod): """A public view or pure contract method. Args: abi: Contract ABI specific to this method. signature: Bytes4 method signature.""" def __call__(self, *args: Tuple) -> Callable: """Calls the contract method without broadcasting a transaction. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: Contract method return value(s).""" if not ARGV["always_transact"]: return self.call(*args) rpc._internal_snap() args, tx = _get_tx(self._owner, args) tx["gas_price"] = 0 try: tx = self.transact(*args, tx) return tx.return_value finally: rpc._internal_revert() def _get_tx(owner: Optional[AccountsType], args: Tuple) -> Tuple: # set / remove default sender if owner is None: owner = accounts.default default_owner = CONFIG["active_network"].get("default_contract_owner", True) if ARGV["cli"] == "test" and default_owner is False: owner = None # seperate contract inputs from tx dict and set default tx values tx = {"from": owner, "value": 0, "gas": None, "gasPrice": None} if args and isinstance(args[-1], dict): tx.update(args[-1]) args = args[:-1] for key, target in [("amount", "value"), ("gas_limit", "gas"), ("gas_price", "gasPrice")]: if key in tx: tx[target] = tx[key] return args, tx def _get_method_object( address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Dict ) -> Union["ContractCall", "ContractTx"]: if "constant" in abi: constant = abi["constant"] else: constant = abi["stateMutability"] in ("view", "pure") if constant: return ContractCall(address, abi, name, owner, natspec) return ContractTx(address, abi, name, owner, natspec) def _inputs(abi: Dict) -> str: types_list = get_type_strings(abi["inputs"], {"fixed168x10": "decimal"}) params = zip([i["name"] for i in abi["inputs"]], types_list) return ", ".join( f"{i[1]}{color("bright blue")}{" "+i[0] if i[0] else ""}{color}" for i in params ) def _signature(abi: Dict) -> str: types_list = get_type_strings(abi["inputs"]) return f"{abi["name"]}({",".join(types_list)})" def _selector(abi: Dict) -> str: sig = _signature(abi) return "0x" + keccak(sig.encode()).hex()[:8] def _verify_deployed_code(address: str, expected_bytecode: str) -> bool: actual_bytecode = web3.eth.getCode(address).hex()[2:] expected_bytecode = remove_0x_prefix(expected_bytecode) # type: ignore if expected_bytecode.startswith("730000000000000000000000000000000000000000"): # special case for Solidity libraries return ( actual_bytecode.startswith(f"73{address[2:].lower()}") and actual_bytecode[42:] == expected_bytecode[42:] ) if "_" in expected_bytecode: for marker in re.findall("_{1,}[^_]*_{1,}", expected_bytecode): idx = expected_bytecode.index(marker) actual_bytecode = actual_bytecode[:idx] + actual_bytecode[idx + 40 :] expected_bytecode = expected_bytecode[:idx] + expected_bytecode[idx + 40 :] return actual_bytecode == expected_bytecode def _print_natspec(natspec: Dict) -> None: wrapper = TextWrapper(initial_indent=f" {color("bright magenta")}") for key in [i for i in ("title", "notice", "author", "details") if i in natspec]: wrapper.subsequent_indent = " " * (len(key) + 4) print(wrapper.fill(f"@{key} {color}{natspec[key]}")) for key, value in natspec.get("params", {}).items(): print(wrapper.fill(f"@param {color("bright blue")}{key}{color} {value}")) if "return" in natspec: print(wrapper.fill(f"@return {color}{natspec["return"]}")) for key in sorted(natspec.get("returns", [])): print(wrapper.fill(f"@return {color}{natspec["returns"][key]}")) print()
#!/usr/bin/python3 import json import re from pathlib import Path from textwrap import TextWrapper from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union import eth_abi from eth_hash.auto import keccak from eth_utils import remove_0x_prefix from hexbytes import HexBytes from brownie._config import ARGV, CONFIG from brownie.convert.datatypes import Wei from brownie.convert.normalize import format_input, format_output from brownie.convert.utils import get_type_strings from brownie.exceptions import ( ContractExists, ContractNotFound, UndeployedLibrary, VirtualMachineError, ) from brownie.project import ethpm from brownie.typing import AccountsType, TransactionReceiptType from brownie.utils import color from . import accounts, rpc from .event import _get_topics from .rpc import _revert_register from .state import _add_contract, _find_contract, _remove_contract from .web3 import _resolve_address, web3 __tracebackhide__ = True class _ContractBase: _dir_color = "bright magenta" def __init__(self, project: Any, build: Dict, name: str, abi: List) -> None: self._project = project self._build = build self._name = name self.abi = abi self.topics = _get_topics(abi) self.signatures = dict((i["name"], _selector(i)) for i in abi if i["type"] == "function") def info(self) -> None: """ Display NatSpec documentation for this contract. """ _print_natspec(self._build["natspec"]) def get_method(self, calldata: str) -> Optional[str]: sig = calldata[:10].lower() return next((k for k, v in self.signatures.items() if v == sig), None) class ContractContainer(_ContractBase): """List-like container class that holds all Contract instances of the same type, and is used to deploy new instances of that contract. Attributes: abi: Complete contract ABI. bytecode: Bytecode used to deploy the contract. signatures: Dictionary of {'function name': "bytes4 signature"} topics: Dictionary of {'event name': "bytes32 topic"}""" def __init__(self, project: Any, build: Dict) -> None: self.tx = None self.bytecode = build["bytecode"] self._contracts: List["ProjectContract"] = [] super().__init__(project, build, build["contractName"], build["abi"]) self.deploy = ContractConstructor(self, self._name) _revert_register(self) def __iter__(self) -> Iterator: return iter(self._contracts) def __getitem__(self, i: Any) -> "ProjectContract": return self._contracts[i] def __delitem__(self, key: Any) -> None: item = self._contracts[key] self.remove(item) def __len__(self) -> int: return len(self._contracts) def __repr__(self) -> str: return str(self._contracts) def _reset(self) -> None: for contract in self._contracts: contract._delete_deployment() _remove_contract(contract) contract._reverted = True self._contracts.clear() def _revert(self, height: int) -> None: reverted = [ i for i in self._contracts if (i.tx and i.tx.block_number > height) or len(web3.eth.getCode(i.address).hex()) <= 4 ] for contract in reverted: self.remove(contract) contract._reverted = True def remove(self, contract: "ProjectContract") -> None: """Removes a contract from the container. Args: contract: Contract instance of address string of the contract.""" if contract not in self._contracts: raise TypeError("Object is not in container.") self._contracts.remove(contract) contract._delete_deployment() _remove_contract(contract) def at( self, address: str, owner: Optional[AccountsType] = None, tx: Optional[TransactionReceiptType] = None, ) -> "ProjectContract": """Returns a contract address. Raises ValueError if no bytecode exists at the address. Args: address: Address string of the contract. owner: Default Account instance to send contract transactions from. tx: Transaction ID of the contract creation.""" contract = _find_contract(address) if contract: if contract._name == self._name and contract._project == self._project: return contract raise ContractExists( f"'{contract._name}' declared at {address} in project '{contract._project._name}'" ) if _verify_deployed_code(address, self._build["deployedBytecode"]): contract = ProjectContract(self._project, self._build, address, owner, tx) else: contract = Contract(self._name, address, self.abi, owner=owner) contract._project = self._project contract._save_deployment() _add_contract(contract) self._contracts.append(contract) return contract def _add_from_tx(self, tx: TransactionReceiptType) -> None: tx._confirmed.wait() if tx.status: self.at(tx.contract_address, tx.sender, tx) class ContractConstructor: _dir_color = "bright magenta" def __init__(self, parent: "ContractContainer", name: str) -> None: self._parent = parent try: self.abi = next(i for i in parent.abi if i["type"] == "constructor") self.abi["name"] = "constructor" except Exception: self.abi = {"inputs": [], "name": "constructor", "type": "constructor"} self._name = name def __repr__(self) -> str: return f"<{type(self).__name__} '{self._name}.constructor({_inputs(self.abi)})'>" def __call__(self, *args: Tuple) -> Union["Contract", TransactionReceiptType]: """Deploys a contract. Args: *args: Constructor arguments. The last argument MUST be a dictionary of transaction values containing at minimum a 'from' key to specify which account to deploy this contract from. Returns: * Contract instance if the transaction confirms * TransactionReceipt if the transaction is pending or reverts""" args, tx = _get_tx(None, args) if not tx["from"]: raise AttributeError( "No deployer address given. You must supply a tx dict" " with a 'from' field as the last argument." ) return tx["from"].deploy( self._parent, *args, amount=tx["value"], gas_limit=tx["gas"], gas_price=tx["gasPrice"] ) def encode_input(self, *args: tuple) -> str: bytecode = self._parent.bytecode # find and replace unlinked library pointers in bytecode for marker in re.findall("_{1,}[^_]*_{1,}", bytecode): library = marker.strip("_") if not self._parent._project[library]: raise UndeployedLibrary( f"Contract requires '{library}' library, but it has not been deployed yet" ) address = self._parent._project[library][-1].address[-40:] bytecode = bytecode.replace(marker, address) data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return bytecode + eth_abi.encode_abi(types_list, data).hex() class _DeployedContractBase(_ContractBase): """Methods for interacting with a deployed contract. Each public contract method is available as a ContractCall or ContractTx instance, created when this class is instantiated. Attributes: bytecode: Bytecode of the deployed contract, including constructor args. tx: TransactionReceipt of the of the tx that deployed the contract.""" _reverted = False def __init__( self, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None ) -> None: address = _resolve_address(address) self.bytecode = web3.eth.getCode(address).hex()[2:] if not self.bytecode: raise ContractNotFound(f"No contract deployed at {address}") self._owner = owner self.tx = tx self.address = address fn_names = [i["name"] for i in self.abi if i["type"] == "function"] for abi in [i for i in self.abi if i["type"] == "function"]: name = f"{self._name}.{abi['name']}" sig = _signature(abi) natspec: Dict = {} if "natspec" in self._build: natspec = self._build["natspec"]["methods"].get(sig, {}) if fn_names.count(abi["name"]) == 1: fn = _get_method_object(address, abi, name, owner, natspec) self._check_and_set(abi["name"], fn) continue if not hasattr(self, abi["name"]): overloaded = OverloadedMethod(address, name, owner) self._check_and_set(abi["name"], overloaded) key = ",".join(i["type"] for i in abi["inputs"]).replace("256", "") fn = _get_method_object(address, abi, name, owner, natspec) getattr(self, abi["name"]).methods[key] = fn def _check_and_set(self, name: str, obj: Any) -> None: if hasattr(self, name): raise AttributeError(f"Namespace collision: '{self._name}.{name}'") setattr(self, name, obj) def __hash__(self) -> int: return hash(f"{self._name}{self.address}{self._project}") def __str__(self) -> str: return self.address def __repr__(self) -> str: return f"<{self._name} Contract '{color('bright magenta')}{self.address}{color}'>" def __eq__(self, other: object) -> bool: if isinstance(other, _DeployedContractBase): return self.address == other.address and self.bytecode == other.bytecode if isinstance(other, str): try: address = _resolve_address(other) return address == self.address except ValueError: return False return super().__eq__(other) def __getattribute__(self, name: str) -> Any: if super().__getattribute__("_reverted"): raise ContractNotFound("This contract no longer exists.") return super().__getattribute__(name) def balance(self) -> Wei: """Returns the current ether balance of the contract, in wei.""" balance = web3.eth.getBalance(self.address) return Wei(balance) def _deployment_path(self) -> Optional[Path]: if not CONFIG["active_network"].get("persist", None) or not self._project._path: return None network = CONFIG["active_network"]["name"] path = self._project._path.joinpath(f"build/deployments/{network}") path.mkdir(exist_ok=True) return path.joinpath(f"{self.address}.json") def _save_deployment(self) -> None: path = self._deployment_path() if path and not path.exists(): with path.open("w") as fp: json.dump(self._build, fp) def _delete_deployment(self) -> None: path = self._deployment_path() if path and path.exists(): path.unlink() class Contract(_DeployedContractBase): def __init__( self, name: str, address: Optional[str] = None, abi: Optional[List] = None, manifest_uri: Optional[str] = None, owner: Optional[AccountsType] = None, ) -> None: if manifest_uri and abi: raise ValueError("Contract requires either abi or manifest_uri, but not both") if manifest_uri is not None: manifest = ethpm.get_manifest(manifest_uri) abi = manifest["contract_types"][name]["abi"] if address is None: address_list = ethpm.get_deployment_addresses(manifest, name) if not address_list: raise ContractNotFound( f"'{manifest['package_name']}' manifest does not contain" f" a deployment of '{name}' on this chain" ) if len(address_list) > 1: raise ValueError( f"'{manifest['package_name']}' manifest contains more than one " f"deployment of '{name}' on this chain, you must specify an address:" f" {', '.join(address_list)}" ) address = address_list[0] name = manifest["contract_types"][name]["contract_name"] elif not address: raise TypeError("Address cannot be None unless creating object from manifest") build = {"abi": abi, "contractName": name, "type": "contract"} _ContractBase.__init__(self, None, build, name, abi) # type: ignore _DeployedContractBase.__init__(self, address, owner, None) contract = _find_contract(address) if not contract: return if contract.bytecode != self.bytecode: contract._reverted = True class ProjectContract(_DeployedContractBase): """Methods for interacting with a deployed contract as part of a Brownie project.""" def __init__( self, project: Any, build: Dict, address: str, owner: Optional[AccountsType] = None, tx: TransactionReceiptType = None, ) -> None: _ContractBase.__init__(self, project, build, build["contractName"], build["abi"]) _DeployedContractBase.__init__(self, address, owner, tx) class OverloadedMethod: def __init__(self, address: str, name: str, owner: Optional[AccountsType]): self._address = address self._name = name self._owner = owner self.methods: Dict = {} def __getitem__(self, key: Union[Tuple, str]) -> "_ContractMethod": if isinstance(key, tuple): key = ",".join(key) key = key.replace("256", "").replace(", ", ",") return self.methods[key] def __repr__(self) -> str: return f"<OverloadedMethod '{self._name}'>" def __len__(self) -> int: return len(self.methods) class _ContractMethod: _dir_color = "bright magenta" def __init__( self, address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Optional[Dict] = None, ) -> None: self._address = address self._name = name self.abi = abi self._owner = owner self.signature = _selector(abi) self.natspec = natspec or {} def __repr__(self) -> str: if "payable" in self.abi: pay_bool = self.abi["payable"] else: pay_bool = self.abi["stateMutability"] == "payable" pay = "payable " if pay_bool else "" return f"<{type(self).__name__} {pay}'{self.abi['name']}({_inputs(self.abi)})'>" def info(self) -> None: """ Display NatSpec documentation for this method. """ print(f"{self.abi['name']}({_inputs(self.abi)})") _print_natspec(self.natspec) def call(self, *args: Tuple) -> Any: """Calls the contract method without broadcasting a transaction. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: Contract method return value(s).""" args, tx = _get_tx(self._owner, args) if tx["from"]: tx["from"] = str(tx["from"]) tx.update({"to": self._address, "data": self.encode_input(*args)}) try: data = web3.eth.call(dict((k, v) for k, v in tx.items() if v)) except ValueError as e: raise VirtualMachineError(e) from None return self.decode_output(data) def transact(self, *args: Tuple) -> TransactionReceiptType: """Broadcasts a transaction that calls this contract method. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: TransactionReceipt instance.""" args, tx = _get_tx(self._owner, args) if not tx["from"]: raise AttributeError( "Contract has no owner, you must supply a tx dict" " with a 'from' field as the last argument." ) return tx["from"].transfer( self._address, tx["value"], gas_limit=tx["gas"], gas_price=tx["gasPrice"], data=self.encode_input(*args), ) def encode_input(self, *args: Tuple) -> str: """Returns encoded ABI data to call the method with the given arguments. Args: *args: Contract method inputs Returns: Hexstring of encoded ABI data.""" data = format_input(self.abi, args) types_list = get_type_strings(self.abi["inputs"]) return self.signature + eth_abi.encode_abi(types_list, data).hex() def decode_output(self, hexstr: str) -> Tuple: """Decodes hexstring data returned by this method. Args: hexstr: Hexstring of returned call data Returns: Decoded values.""" types_list = get_type_strings(self.abi["outputs"]) result = eth_abi.decode_abi(types_list, HexBytes(hexstr)) result = format_output(self.abi, result) if len(result) == 1: result = result[0] return result class ContractTx(_ContractMethod): """A public payable or non-payable contract method. Args: abi: Contract ABI specific to this method. signature: Bytes4 method signature.""" def __call__(self, *args: Tuple) -> TransactionReceiptType: """Broadcasts a transaction that calls this contract method. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: TransactionReceipt instance.""" return self.transact(*args) class ContractCall(_ContractMethod): """A public view or pure contract method. Args: abi: Contract ABI specific to this method. signature: Bytes4 method signature.""" def __call__(self, *args: Tuple) -> Callable: """Calls the contract method without broadcasting a transaction. Args: *args: Contract method inputs. You can optionally provide a dictionary of transaction properties as the last arg. Returns: Contract method return value(s).""" if not ARGV["always_transact"]: return self.call(*args) rpc._internal_snap() args, tx = _get_tx(self._owner, args) tx["gas_price"] = 0 try: tx = self.transact(*args, tx) return tx.return_value finally: rpc._internal_revert() def _get_tx(owner: Optional[AccountsType], args: Tuple) -> Tuple: # set / remove default sender if owner is None: owner = accounts.default default_owner = CONFIG["active_network"].get("default_contract_owner", True) if ARGV["cli"] == "test" and default_owner is False: owner = None # seperate contract inputs from tx dict and set default tx values tx = {"from": owner, "value": 0, "gas": None, "gasPrice": None} if args and isinstance(args[-1], dict): tx.update(args[-1]) args = args[:-1] for key, target in [("amount", "value"), ("gas_limit", "gas"), ("gas_price", "gasPrice")]: if key in tx: tx[target] = tx[key] return args, tx def _get_method_object( address: str, abi: Dict, name: str, owner: Optional[AccountsType], natspec: Dict ) -> Union["ContractCall", "ContractTx"]: if "constant" in abi: constant = abi["constant"] else: constant = abi["stateMutability"] in ("view", "pure") if constant: return ContractCall(address, abi, name, owner, natspec) return ContractTx(address, abi, name, owner, natspec) def _inputs(abi: Dict) -> str: types_list = get_type_strings(abi["inputs"], {"fixed168x10": "decimal"}) params = zip([i["name"] for i in abi["inputs"]], types_list) return ", ".join( f"{i[1]}{color('bright blue')}{' '+i[0] if i[0] else ''}{color}" for i in params ) def _signature(abi: Dict) -> str: types_list = get_type_strings(abi["inputs"]) return f"{abi['name']}({','.join(types_list)})" def _selector(abi: Dict) -> str: sig = _signature(abi) return "0x" + keccak(sig.encode()).hex()[:8] def _verify_deployed_code(address: str, expected_bytecode: str) -> bool: actual_bytecode = web3.eth.getCode(address).hex()[2:] expected_bytecode = remove_0x_prefix(expected_bytecode) # type: ignore if expected_bytecode.startswith("730000000000000000000000000000000000000000"): # special case for Solidity libraries return ( actual_bytecode.startswith(f"73{address[2:].lower()}") and actual_bytecode[42:] == expected_bytecode[42:] ) if "_" in expected_bytecode: for marker in re.findall("_{1,}[^_]*_{1,}", expected_bytecode): idx = expected_bytecode.index(marker) actual_bytecode = actual_bytecode[:idx] + actual_bytecode[idx + 40 :] expected_bytecode = expected_bytecode[:idx] + expected_bytecode[idx + 40 :] return actual_bytecode == expected_bytecode def _print_natspec(natspec: Dict) -> None: wrapper = TextWrapper(initial_indent=f" {color('bright magenta')}") for key in [i for i in ("title", "notice", "author", "details") if i in natspec]: wrapper.subsequent_indent = " " * (len(key) + 4) print(wrapper.fill(f"@{key} {color}{natspec[key]}")) for key, value in natspec.get("params", {}).items(): print(wrapper.fill(f"@param {color('bright blue')}{key}{color} {value}")) if "return" in natspec: print(wrapper.fill(f"@return {color}{natspec['return']}")) for key in sorted(natspec.get("returns", [])): print(wrapper.fill(f"@return {color}{natspec['returns'][key]}")) print()
import datetime import functools import json import os import re import tempfile from collections import OrderedDict, namedtuple from django.conf import settings from django.contrib import messages from django.http import HttpResponse, HttpResponseRedirect from django.http.response import Http404, JsonResponse from django.utils.decorators import method_decorator from django.utils.http import urlencode from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from django.views.decorators.http import require_POST from django.views.generic import View import six.moves.urllib.error import six.moves.urllib.parse import six.moves.urllib.request from couchdbkit.exceptions import ResourceNotFound from memoized import memoized from sqlalchemy import exc, types from sqlalchemy.exc import ProgrammingError from couchexport.export import export_from_tables from couchexport.files import Temp from couchexport.models import Format from couchexport.shortcuts import export_response from dimagi.utils.couch.undo import ( get_deleted_doc_type, is_deleted, soft_delete, undo_delete, ) from dimagi.utils.logging import notify_exception from dimagi.utils.web import json_response from pillowtop.dao.exceptions import DocumentNotFoundError from corehq import toggles from corehq.apps.accounting.models import Subscription from corehq.apps.analytics.tasks import ( HUBSPOT_SAVED_UCR_FORM_ID, send_hubspot_form, track_workflow, update_hubspot_properties, ) from corehq.apps.app_manager.models import Application from corehq.apps.app_manager.util import purge_report_from_mobile_ucr from corehq.apps.change_feed.data_sources import ( get_document_store_for_doc_type, ) from corehq.apps.domain.decorators import api_auth, login_and_domain_required, domain_admin_required from corehq.apps.domain.models import Domain from corehq.apps.domain.views.base import BaseDomainView from corehq.apps.hqwebapp.decorators import ( use_datatables, use_daterangepicker, use_jquery_ui, use_multiselect, use_nvd3, ) from corehq.apps.hqwebapp.tasks import send_mail_async from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled from corehq.apps.linked_domain.models import DomainLink, ReportLinkDetail from corehq.apps.linked_domain.ucr import create_linked_ucr, linked_downstream_reports_by_domain from corehq.apps.linked_domain.util import is_linked_report from corehq.apps.locations.permissions import conditionally_location_safe from corehq.apps.reports.daterange import get_simple_dateranges from corehq.apps.reports.dispatcher import cls_to_view_login_and_domain from corehq.apps.saved_reports.models import ReportConfig from corehq.apps.userreports.app_manager.data_source_meta import ( DATA_SOURCE_TYPE_RAW, ) from corehq.apps.userreports.app_manager.helpers import ( get_case_data_source, get_form_data_source, ) from corehq.apps.userreports.const import ( DATA_SOURCE_MISSING_APP_ERROR_MESSAGE, DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE, NAMED_EXPRESSION_PREFIX, NAMED_FILTER_PREFIX, REPORT_BUILDER_EVENTS_KEY, ) from corehq.apps.userreports.dbaccessors import get_datasources_for_domain from corehq.apps.userreports.exceptions import ( BadBuilderConfigError, BadSpecError, DataSourceConfigurationNotFoundError, ReportConfigurationNotFoundError, TableNotFoundWarning, UserQueryError, translate_programming_error, ) from corehq.apps.userreports.expressions.factory import ExpressionFactory from corehq.apps.userreports.filters.factory import FilterFactory from corehq.apps.userreports.indicators.factory import IndicatorFactory from corehq.apps.userreports.models import ( DataSourceConfiguration, ReportConfiguration, StaticDataSourceConfiguration, StaticReportConfiguration, get_datasource_config, get_report_config, id_is_static, report_config_id_is_static, ) from corehq.apps.userreports.rebuild import DataSourceResumeHelper from corehq.apps.userreports.reports.builder.forms import ( ConfigureListReportForm, ConfigureMapReportForm, ConfigureTableReportForm, DataSourceForm, get_data_source_interface, ) from corehq.apps.userreports.reports.builder.sources import ( get_source_type_from_report_config, ) from corehq.apps.userreports.reports.filters.choice_providers import ( ChoiceQueryContext, ) from corehq.apps.userreports.reports.util import report_has_location_filter from corehq.apps.userreports.reports.view import ConfigurableReportView from corehq.apps.userreports.specs import EvaluationContext, FactoryContext from corehq.apps.userreports.tasks import ( rebuild_indicators, rebuild_indicators_in_place, resume_building_indicators, ) from corehq.apps.userreports.ui.forms import ( ConfigurableDataSourceEditForm, ConfigurableDataSourceFromAppForm, ConfigurableReportEditForm, ) from corehq.apps.userreports.util import ( add_event, allowed_report_builder_reports, get_referring_apps, get_indicator_adapter, has_report_builder_access, has_report_builder_add_on_privilege, number_of_report_builder_reports, ) from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from corehq.tabs.tabclasses import ProjectReportsTab from corehq.util import reverse from corehq.util.couch import get_document_or_404 from corehq.util.quickcache import quickcache from corehq.util.soft_assert import soft_assert TEMP_REPORT_PREFIX = '__tmp' def get_datasource_config_or_404(config_id, domain): try: return get_datasource_config(config_id, domain) except DataSourceConfigurationNotFoundError: raise Http404 def get_report_config_or_404(config_id, domain): try: return get_report_config(config_id, domain) except ReportConfigurationNotFoundError: raise Http404 def swallow_programming_errors(fn): @functools.wraps(fn) def decorated(request, domain, *args, **kwargs): try: return fn(request, domain, *args, **kwargs) except ProgrammingError as e: if settings.DEBUG: raise messages.error( request, _('There was a problem processing your request. ' 'If you have recently modified your report data source please try again in a few minutes.' '<br><br>Technical details:<br>{}'.format(e)), extra_tags='html', ) return HttpResponseRedirect(reverse('configurable_reports_home', args=[domain])) return decorated @method_decorator(toggles.USER_CONFIGURABLE_REPORTS.required_decorator(), name='dispatch') class BaseUserConfigReportsView(BaseDomainView): section_name = ugettext_lazy("Configurable Reports") @property def main_context(self): static_reports = list(StaticReportConfiguration.by_domain(self.domain)) context = super(BaseUserConfigReportsView, self).main_context context.update({ 'reports': ReportConfiguration.by_domain(self.domain) + static_reports, 'data_sources': get_datasources_for_domain(self.domain, include_static=True) }) if toggle_enabled(self.request, toggles.AGGREGATE_UCRS): from corehq.apps.aggregate_ucrs.models import AggregateTableDefinition context['aggregate_data_sources'] = AggregateTableDefinition.objects.filter(domain=self.domain) return context @property def section_url(self): return reverse(UserConfigReportsHomeView.urlname, args=(self.domain,)) @property def page_url(self): return reverse(self.urlname, args=(self.domain,)) class UserConfigReportsHomeView(BaseUserConfigReportsView): urlname = 'configurable_reports_home' template_name = 'userreports/configurable_reports_home.html' page_title = ugettext_lazy("Reports Home") class BaseEditConfigReportView(BaseUserConfigReportsView): template_name = 'userreports/edit_report_config.html' @use_multiselect def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) @property def report_id(self): return self.kwargs.get('report_id') @property def page_url(self): if self.report_id: return reverse(self.urlname, args=(self.domain, self.report_id,)) return super(BaseEditConfigReportView, self).page_url @property def page_context(self): return { 'form': self.edit_form, 'report': self.config, 'referring_apps': get_referring_apps(self.domain, self.report_id), 'linked_report_domain_list': linked_downstream_reports_by_domain( self.domain, self.report_id ), } @property @memoized def config(self): if self.report_id is None: return ReportConfiguration(domain=self.domain) return get_report_config_or_404(self.report_id, self.domain)[0] @property def read_only(self): if self.report_id is not None: return (report_config_id_is_static(self.report_id) or is_linked_report(self.config)) return False @property @memoized def edit_form(self): if self.request.method == 'POST': return ConfigurableReportEditForm( self.domain, self.config, self.read_only, data=self.request.POST) return ConfigurableReportEditForm(self.domain, self.config, self.read_only) def post(self, request, *args, **kwargs): if self.edit_form.is_valid(): self.edit_form.save(commit=True) messages.success(request, _('Report "{}" saved!').format(self.config.title)) return HttpResponseRedirect(reverse( 'edit_configurable_report', args=[self.domain, self.config._id]) ) return self.get(request, *args, **kwargs) class EditConfigReportView(BaseEditConfigReportView): urlname = 'edit_configurable_report' page_title = ugettext_lazy("Edit Report") class CreateConfigReportView(BaseEditConfigReportView): urlname = 'create_configurable_report' page_title = ugettext_lazy("Create Report") class ReportBuilderView(BaseDomainView): @method_decorator(require_permission(Permissions.edit_reports)) @cls_to_view_login_and_domain @use_daterangepicker @use_datatables def dispatch(self, request, *args, **kwargs): return super(ReportBuilderView, self).dispatch(request, *args, **kwargs) @property def main_context(self): main_context = super(ReportBuilderView, self).main_context allowed_num_reports = allowed_report_builder_reports(self.request) main_context.update({ 'has_report_builder_access': has_report_builder_access(self.request), 'at_report_limit': number_of_report_builder_reports(self.domain) >= allowed_num_reports, 'report_limit': allowed_num_reports, 'paywall_url': paywall_home(self.domain), 'pricing_page_url': settings.PRICING_PAGE_URL, }) return main_context @property def section_name(self): return _("Report Builder") @property def section_url(self): return reverse(ReportBuilderDataSourceSelect.urlname, args=[self.domain]) @quickcache(["domain"], timeout=0, memoize_timeout=4) def paywall_home(domain): """ Return the url for the page in the report builder paywall that users in the given domain should be directed to upon clicking "+ Create new report" """ domain_obj = Domain.get_by_name(domain, strict=True) if domain_obj.requested_report_builder_subscription: return reverse(ReportBuilderPaywallActivatingSubscription.urlname, args=[domain]) else: return reverse(ReportBuilderPaywallPricing.urlname, args=[domain]) class ReportBuilderPaywallBase(BaseDomainView): page_title = ugettext_lazy('Subscribe') @property def section_name(self): return _("Report Builder") @property def section_url(self): return paywall_home(self.domain) @property @memoized def plan_name(self): return Subscription.get_subscribed_plan_by_domain(self.domain).plan.name class ReportBuilderPaywallPricing(ReportBuilderPaywallBase): template_name = "userreports/paywall/pricing.html" urlname = 'report_builder_paywall_pricing' page_title = ugettext_lazy('Pricing') @property def page_context(self): context = super(ReportBuilderPaywallPricing, self).page_context max_allowed_reports = allowed_report_builder_reports(self.request) num_builder_reports = number_of_report_builder_reports(self.domain) context.update({ 'has_report_builder_access': has_report_builder_access(self.request), 'at_report_limit': num_builder_reports >= max_allowed_reports, 'max_allowed_reports': max_allowed_reports, 'pricing_page_url': settings.PRICING_PAGE_URL, }) return context class ReportBuilderPaywallActivatingSubscription(ReportBuilderPaywallBase): template_name = "userreports/paywall/activating_subscription.html" urlname = 'report_builder_paywall_activating_subscription' def post(self, request, domain, *args, **kwargs): self.domain_object.requested_report_builder_subscription.append(request.user.username) self.domain_object.save() send_mail_async.delay( "Report Builder Subscription Request: {}".format(domain), "User {} in the {} domain has requested a report builder subscription." " Current subscription is '{}'.".format( request.user.username, domain, self.plan_name ), settings.DEFAULT_FROM_EMAIL, [settings.SALES_EMAIL], ) update_hubspot_properties.delay(request.couch_user, {'report_builder_subscription_request': 'yes'}) return self.get(request, domain, *args, **kwargs) class ReportBuilderDataSourceSelect(ReportBuilderView): template_name = 'userreports/reportbuilder/data_source_select.html' page_title = ugettext_lazy('Create Report') urlname = 'report_builder_select_source' @property def page_context(self): context = { "sources_map": self.form.sources_map, "domain": self.domain, 'report': {"title": _("Create New Report")}, 'form': self.form, } return context @property @memoized def form(self): max_allowed_reports = allowed_report_builder_reports(self.request) if self.request.method == 'POST': return DataSourceForm(self.domain, max_allowed_reports, self.request.POST) return DataSourceForm(self.domain, max_allowed_reports) def post(self, request, *args, **kwargs): if self.form.is_valid(): app_source = self.form.get_selected_source() track_workflow( request.user.email, "Successfully submitted the first part of the Report Builder " "wizard where you give your report a name and choose a data source" ) add_event(request, [ "Report Builder", "Successful Click on Next (Data Source)", app_source.source_type, ]) get_params = { 'report_name': self.form.cleaned_data['report_name'], 'application': app_source.application, 'source_type': app_source.source_type, 'source': app_source.source, } return HttpResponseRedirect( reverse(ConfigureReport.urlname, args=[self.domain], params=get_params) ) else: return self.get(request, *args, **kwargs) class EditReportInBuilder(View): def dispatch(self, request, *args, **kwargs): report_id = kwargs['report_id'] report = get_document_or_404(ReportConfiguration, request.domain, report_id) if report.report_meta.created_by_builder: try: return ConfigureReport.as_view(existing_report=report)(request, *args, **kwargs) except BadBuilderConfigError as e: messages.error(request, str(e)) return HttpResponseRedirect(reverse(ConfigurableReportView.slug, args=[request.domain, report_id])) raise Http404("Report was not created by the report builder") class ConfigureReport(ReportBuilderView): urlname = 'configure_report' page_title = ugettext_lazy("Configure Report") template_name = "userreports/reportbuilder/configure_report.html" report_title = '{}' existing_report = None @use_jquery_ui @use_datatables @use_nvd3 @use_multiselect def dispatch(self, request, *args, **kwargs): if self.existing_report: self.source_type = get_source_type_from_report_config(self.existing_report) if self.source_type != DATA_SOURCE_TYPE_RAW: self.source_id = self.existing_report.config.meta.build.source_id self.app_id = self.existing_report.config.meta.build.app_id self.app = Application.get(self.app_id) if self.app_id else None else: self.source_id = self.existing_report.config_id self.app_id = self.app = None else: self.app_id = self.request.GET['application'] self.app = Application.get(self.app_id) self.source_type = self.request.GET['source_type'] self.source_id = self.request.GET['source'] if not self.app_id and self.source_type != DATA_SOURCE_TYPE_RAW: raise BadBuilderConfigError(DATA_SOURCE_MISSING_APP_ERROR_MESSAGE) try: data_source_interface = get_data_source_interface( self.domain, self.app, self.source_type, self.source_id ) except ResourceNotFound: self.template_name = 'userreports/report_error.html' if self.existing_report: context = {'report_id': self.existing_report.get_id, 'is_static': self.existing_report.is_static} else: context = {} context['error_message'] = DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE context.update(self.main_context) return self.render_to_response(context) self._populate_data_source_properties_from_interface(data_source_interface) return super(ConfigureReport, self).dispatch(request, *args, **kwargs) @property def page_name(self): title = self._get_report_name() return _(self.report_title).format(title) @property def report_description(self): if self.existing_report: return self.existing_report.description or None return None def _populate_data_source_properties_from_interface(self, data_source_interface): self._properties_by_column_id = {} for p in data_source_interface.data_source_properties.values(): column = p.to_report_column_option() for agg in column.aggregation_options: indicators = column.get_indicators(agg) for i in indicators: self._properties_by_column_id[i['column_id']] = p def _get_report_name(self, request=None): if self.existing_report: return self.existing_report.title else: request = request or self.request return request.GET.get('report_name', '') def _get_existing_report_type(self): if self.existing_report: type_ = "list" if self.existing_report.aggregation_columns != ["doc_id"]: type_ = "table" if self.existing_report.map_config: type_ = "map" return type_ def _get_property_id_by_indicator_id(self, indicator_column_id): """ Return the data source property id corresponding to the given data source indicator column id. :param indicator_column_id: The column_id field of a data source indicator configuration dictionary :return: A DataSourceProperty property id, e.g. "/data/question1" """ data_source_property = self._properties_by_column_id.get(indicator_column_id) if data_source_property: return data_source_property.get_id() def _get_initial_location(self, report_form): if self.existing_report: cols = [col for col in self.existing_report.report_columns if col.type == 'location'] if cols: indicator_id = cols[0].field return report_form._get_property_id_by_indicator_id(indicator_id) def _get_initial_chart_type(self): if self.existing_report: if self.existing_report.configured_charts: type_ = self.existing_report.configured_charts[0]['type'] if type_ == "multibar": return "bar" if type_ == "pie": return "pie" def _get_column_options(self, report_form): options = OrderedDict() for option in report_form.report_column_options.values(): key = option.get_uniquenss_key() if key in options: options[key].append(option) else: options[key] = [option] @property def page_context(self): form_type = _get_form_type(self._get_existing_report_type()) report_form = form_type( self.domain, self.page_name, self.app_id, self.source_type, self.source_id, self.existing_report ) temp_ds_id = report_form.create_temp_data_source_if_necessary(self.request.user.username) return { 'existing_report': self.existing_report, 'report_description': self.report_description, 'report_title': self.page_name, 'existing_report_type': self._get_existing_report_type(), 'column_options': [p.to_view_model() for p in report_form.report_column_options.values()], # TODO: Consider renaming this because it's more like "possible" data source props 'data_source_properties': [p.to_view_model() for p in report_form.data_source_properties.values()], 'initial_user_filters': [f._asdict() for f in report_form.initial_user_filters], 'initial_default_filters': [f._asdict() for f in report_form.initial_default_filters], 'initial_columns': [c._asdict() for c in report_form.initial_columns], 'initial_location': self._get_initial_location(report_form), 'initial_chart_type': self._get_initial_chart_type(), 'source_type': self.source_type, 'source_id': self.source_id, 'application': self.app_id, 'report_preview_url': reverse(ReportPreview.urlname, args=[self.domain, temp_ds_id]), 'preview_datasource_id': temp_ds_id, 'report_builder_events': self.request.session.pop(REPORT_BUILDER_EVENTS_KEY, []), 'MAPBOX_ACCESS_TOKEN': settings.MAPBOX_ACCESS_TOKEN, 'date_range_options': [r._asdict() for r in get_simple_dateranges()], 'linked_report_domain_list': linked_downstream_reports_by_domain( self.domain, self.existing_report.get_id ) if self.existing_report else {} } def _get_bound_form(self, report_data): form_class = _get_form_type(report_data['report_type']) return form_class( self.domain, self._get_report_name(), self.app._id, self.source_type, self.source_id, self.existing_report, report_data ) def post(self, request, domain, *args, **kwargs): if not has_report_builder_access(request): raise Http404 report_data = json.loads(request.body.decode('utf-8')) if report_data['existing_report'] and not self.existing_report: # This is the case if the user has clicked "Save" for a second time from the new report page # i.e. the user created a report with the first click, but didn't navigate to the report view page self.existing_report = ReportConfiguration.get(report_data['existing_report']) _munge_report_data(report_data) bound_form = self._get_bound_form(report_data) if bound_form.is_valid(): if self.existing_report: report_configuration = bound_form.update_report() else: self._confirm_report_limit() try: report_configuration = bound_form.create_report() except BadSpecError as err: messages.error(self.request, str(err)) notify_exception(self.request, str(err), details={ 'domain': self.domain, 'report_form_class': bound_form.__class__.__name__, 'report_type': bound_form.report_type, 'group_by': getattr(bound_form, 'group_by', 'Not set'), 'user_filters': getattr(bound_form, 'user_filters', 'Not set'), 'default_filters': getattr(bound_form, 'default_filters', 'Not set'), }) return self.get(request, domain, *args, **kwargs) else: ProjectReportsTab.clear_dropdown_cache(domain, request.couch_user.get_id) self._delete_temp_data_source(report_data) send_hubspot_form(HUBSPOT_SAVED_UCR_FORM_ID, request) return json_response({ 'report_url': reverse(ConfigurableReportView.slug, args=[self.domain, report_configuration._id]), 'report_id': report_configuration._id, }) def _delete_temp_data_source(self, report_data): if report_data.get("delete_temp_data_source", False): delete_data_source_shared(self.domain, report_data["preview_data_source_id"]) def _confirm_report_limit(self): """ This method is used to confirm that the user is not creating more reports than they are allowed. The user is normally turned back earlier in the process, but this check is necessary in case they navigated directly to this view either maliciously or with a bookmark perhaps. """ if (number_of_report_builder_reports(self.domain) >= allowed_report_builder_reports(self.request)): raise Http404() def update_report_description(request, domain, report_id): new_description = request.POST['value'] report = get_document_or_404(ReportConfiguration, domain, report_id) report.description = new_description report.save() return json_response({}) def _get_form_type(report_type): assert report_type in (None, "list", "table", "chart", "map") if report_type == "list" or report_type is None: return ConfigureListReportForm if report_type == "table": return ConfigureTableReportForm if report_type == "map": return ConfigureMapReportForm def _munge_report_data(report_data): """ Split aggregation columns out of report_data and into :param report_data: :return: """ report_data['columns'] = json.dumps(report_data['columns']) report_data['user_filters'] = json.dumps(report_data['user_filters']) report_data['default_filters'] = json.dumps(report_data['default_filters']) class ReportPreview(BaseDomainView): urlname = 'report_preview' def post(self, request, domain, data_source): report_data = json.loads(six.moves.urllib.parse.unquote(request.body.decode('utf-8'))) form_class = _get_form_type(report_data['report_type']) # ignore user filters report_data['user_filters'] = [] _munge_report_data(report_data) bound_form = form_class( domain, '{}_{}_{}'.format(TEMP_REPORT_PREFIX, self.domain, data_source), report_data['app'], report_data['source_type'], report_data['source_id'], None, report_data ) if bound_form.is_valid(): try: temp_report = bound_form.create_temp_report(data_source, self.request.user.username) response_data = ConfigurableReportView.report_preview_data(self.domain, temp_report) if response_data: return json_response(response_data) except BadBuilderConfigError as e: return json_response({'status': 'error', 'message': str(e)}, status_code=400) else: return json_response({ 'status': 'error', 'message': 'Invalid report configuration', 'errors': bound_form.errors, }, status_code=400) def _assert_report_delete_privileges(request): if not (toggle_enabled(request, toggles.USER_CONFIGURABLE_REPORTS) or toggle_enabled(request, toggles.REPORT_BUILDER) or toggle_enabled(request, toggles.REPORT_BUILDER_BETA_GROUP) or has_report_builder_add_on_privilege(request)): raise Http404() @login_and_domain_required @require_permission(Permissions.edit_reports) def delete_report(request, domain, report_id): _assert_report_delete_privileges(request) config = get_document_or_404(ReportConfiguration, domain, report_id) # Delete the data source too if it's not being used by any other reports. try: data_source, __ = get_datasource_config(config.config_id, domain) except DataSourceConfigurationNotFoundError: # It's possible the data source has already been deleted, but that's fine with us. pass else: if data_source.get_report_count() <= 1: # No other reports reference this data source. data_source.deactivate(initiated_by=request.user.username) soft_delete(config) did_purge_something = purge_report_from_mobile_ucr(config) messages.success( request, _('Report "{name}" has been deleted. <a href="{url}" class="post-link">Undo</a>').format( name=config.title, url=reverse('undo_delete_configurable_report', args=[domain, config._id]), ), extra_tags='html' ) report_configs = ReportConfig.by_domain_and_owner( domain, request.couch_user.get_id, "configurable") for rc in report_configs: if rc.subreport_slug == config.get_id: rc.delete() if did_purge_something: messages.warning( request, _("This report was used in one or more applications. " "It has been removed from there too.") ) ProjectReportsTab.clear_dropdown_cache(domain, request.couch_user.get_id) redirect = request.GET.get("redirect", None) if not redirect: redirect = reverse('configurable_reports_home', args=[domain]) return HttpResponseRedirect(redirect) @login_and_domain_required @require_permission(Permissions.edit_reports) def undelete_report(request, domain, report_id): _assert_report_delete_privileges(request) config = get_document_or_404(ReportConfiguration, domain, report_id, additional_doc_types=[ get_deleted_doc_type(ReportConfiguration) ]) if config and is_deleted(config): undo_delete(config) messages.success( request, _('Successfully restored report "{name}"').format(name=config.title) ) else: messages.info(request, _('Report "{name}" not deleted.').format(name=config.title)) return HttpResponseRedirect(reverse(ConfigurableReportView.slug, args=[request.domain, report_id])) class ImportConfigReportView(BaseUserConfigReportsView): page_title = ugettext_lazy("Import Report") template_name = "userreports/import_report.html" urlname = 'import_configurable_report' @property def spec(self): if self.request.method == "POST": return self.request.POST['report_spec'] return '' def post(self, request, *args, **kwargs): try: json_spec = json.loads(self.spec) if '_id' in json_spec: del json_spec['_id'] json_spec['domain'] = self.domain report = ReportConfiguration.wrap(json_spec) report.validate() report.save() messages.success(request, _('Report created!')) return HttpResponseRedirect(reverse( EditConfigReportView.urlname, args=[self.domain, report._id] )) except (ValueError, BadSpecError) as e: messages.error(request, _('Bad report source: {}').format(e)) return self.get(request, *args, **kwargs) @property def page_context(self): return { 'spec': self.spec, } @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def report_source_json(request, domain, report_id): config, _ = get_report_config_or_404(report_id, domain) config._doc.pop('_rev', None) return json_response(config) class ExpressionDebuggerView(BaseUserConfigReportsView): urlname = 'expression_debugger' template_name = 'userreports/expression_debugger.html' page_title = ugettext_lazy("Expression Debugger") class DataSourceDebuggerView(BaseUserConfigReportsView): urlname = 'expression_debugger' template_name = 'userreports/data_source_debugger.html' page_title = ugettext_lazy("Data Source Debugger") @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def evaluate_expression(request, domain): doc_type = request.POST['doc_type'] doc_id = request.POST['doc_id'] data_source_id = request.POST['data_source'] try: if data_source_id: data_source = get_datasource_config(data_source_id, domain)[0] factory_context = data_source.get_factory_context() else: factory_context = FactoryContext.empty() usable_type = { 'form': 'XFormInstance', 'case': 'CommCareCase', }.get(doc_type, 'Unknown') document_store = get_document_store_for_doc_type( domain, usable_type, load_source="eval_expression") doc = document_store.get_document(doc_id) expression_text = request.POST['expression'] expression_json = json.loads(expression_text) parsed_expression = ExpressionFactory.from_spec( expression_json, context=factory_context ) result = parsed_expression(doc, EvaluationContext(doc)) return json_response({ "result": result, }) except DataSourceConfigurationNotFoundError: return json_response( {"error": _("Data source with id {} not found in domain {}.").format( data_source_id, domain )}, status_code=404, ) except DocumentNotFoundError: return json_response( {"error": _("{} with id {} not found in domain {}.").format( doc_type, doc_id, domain )}, status_code=404, ) except BadSpecError as e: return json_response( {"error": _("Problem with expression: {}").format(e)}, status_code=400, ) except Exception as e: return json_response( {"error": str(e)}, status_code=500, ) @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def evaluate_data_source(request, domain): data_source_id = request.POST['data_source'] docs_id = request.POST['docs_id'] try: data_source = get_datasource_config(data_source_id, domain)[0] except DataSourceConfigurationNotFoundError: return JsonResponse( {"error": _("Data source with id {} not found in domain {}.").format( data_source_id, domain )}, status=404, ) docs_id = [doc_id.strip() for doc_id in docs_id.split(',')] document_store = get_document_store_for_doc_type( domain, data_source.referenced_doc_type, load_source="eval_data_source") rows = [] docs = 0 for doc in document_store.iter_documents(docs_id): docs += 1 for row in data_source.get_all_values(doc): rows.append({i.column.database_column_name.decode(): i.value for i in row}) if not docs: return JsonResponse(data={'error': _('No documents found. Check the IDs and try again.')}, status=404) data = { 'rows': rows, 'db_rows': [], 'columns': [ column.database_column_name.decode() for column in data_source.get_columns() ], } try: adapter = get_indicator_adapter(data_source) table = adapter.get_table() query = adapter.get_query_object().filter(table.c.doc_id.in_(docs_id)) db_rows = [ {column.name: getattr(row, column.name) for column in table.columns} for row in query ] data['db_rows'] = db_rows except ProgrammingError as e: err = translate_programming_error(e) if err and isinstance(err, TableNotFoundWarning): data['db_error'] = _("Datasource table does not exist. Try rebuilding the datasource.") else: data['db_error'] = _("Error querying database for data.") return JsonResponse(data=data) class CreateDataSourceFromAppView(BaseUserConfigReportsView): urlname = 'create_configurable_data_source_from_app' template_name = "userreports/data_source_from_app.html" page_title = ugettext_lazy("Create Data Source from Application") @property @memoized def form(self): if self.request.method == 'POST': return ConfigurableDataSourceFromAppForm(self.domain, self.request.POST) return ConfigurableDataSourceFromAppForm(self.domain) def post(self, request, *args, **kwargs): if self.form.is_valid(): app_source = self.form.app_source_helper.get_app_source(self.form.cleaned_data) app = Application.get(app_source.application) if app_source.source_type == 'case': data_source = get_case_data_source(app, app_source.source) data_source.save() messages.success(request, _("Data source created for '{}'".format(app_source.source))) else: assert app_source.source_type == 'form' xform = app.get_form(app_source.source) data_source = get_form_data_source(app, xform) data_source.save() messages.success(request, _("Data source created for '{}'".format(xform.default_name()))) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[self.domain, data_source._id] )) return self.get(request, *args, **kwargs) @property def page_context(self): return { 'sources_map': self.form.app_source_helper.all_sources, 'form': self.form, } class BaseEditDataSourceView(BaseUserConfigReportsView): template_name = 'userreports/edit_data_source.html' @property def page_context(self): return { 'form': self.edit_form, 'data_source': self.config, 'read_only': self.read_only, 'used_by_reports': self.get_reports(), } @property def page_url(self): if self.config_id: return reverse(self.urlname, args=(self.domain, self.config_id,)) return super(BaseEditDataSourceView, self).page_url @property def config_id(self): return self.kwargs.get('config_id') @property def read_only(self): return id_is_static(self.config_id) if self.config_id is not None else False @property @memoized def config(self): if self.config_id is None: return DataSourceConfiguration(domain=self.domain) return get_datasource_config_or_404(self.config_id, self.domain)[0] @property @memoized def edit_form(self): if self.request.method == 'POST': return ConfigurableDataSourceEditForm( self.domain, self.config, self.read_only, data=self.request.POST ) return ConfigurableDataSourceEditForm( self.domain, self.config, self.read_only ) def post(self, request, *args, **kwargs): if self.edit_form.is_valid(): config = self.edit_form.save(commit=True) messages.success(request, _('Data source "{}" saved!').format( config.display_name )) if self.config_id is None: return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[self.domain, config._id]) ) return self.get(request, *args, **kwargs) def get_reports(self): reports = StaticReportConfiguration.by_domain(self.domain) reports += ReportConfiguration.by_domain(self.domain) ret = [] for report in reports: try: if report.table_id == self.config.table_id: ret.append(report) except DataSourceConfigurationNotFoundError: _soft_assert = soft_assert(to=[ '{}@{}'.format(name, 'dimagi.com') for name in ['cellowitz', 'frener'] ]) _soft_assert(False, "Report {} on domain {} attempted to reference deleted table".format( report._id, self.domain )) return ret def get(self, request, *args, **kwargs): if self.config.is_deactivated: messages.info( request, _( 'Data source "{}" has no associated table.\n' 'Click "Rebuild Data Source" to recreate the table.' ).format(self.config.display_name) ) return super(BaseEditDataSourceView, self).get(request, *args, **kwargs) class CreateDataSourceView(BaseEditDataSourceView): urlname = 'create_configurable_data_source' page_title = ugettext_lazy("Create Data Source") class EditDataSourceView(BaseEditDataSourceView): urlname = 'edit_configurable_data_source' page_title = ugettext_lazy("Edit Data Source") @property def page_name(self): return "Edit {}".format(self.config.display_name) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def delete_data_source(request, domain, config_id): delete_data_source_shared(domain, config_id, request) return HttpResponseRedirect(reverse('configurable_reports_home', args=[domain])) def delete_data_source_shared(domain, config_id, request=None): config = get_document_or_404(DataSourceConfiguration, domain, config_id) adapter = get_indicator_adapter(config) username = request.user.username if request else None skip = not request # skip logging when we remove temporary tables adapter.drop_table(initiated_by=username, source='delete_data_source', skip_log=skip) soft_delete(config) if request: messages.success( request, _('Data source "{name}" has been deleted. <a href="{url}" class="post-link">Undo</a>').format( name=config.display_name, url=reverse('undo_delete_data_source', args=[domain, config._id]), ), extra_tags='html' ) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def undelete_data_source(request, domain, config_id): config = get_document_or_404(DataSourceConfiguration, domain, config_id, additional_doc_types=[ get_deleted_doc_type(DataSourceConfiguration) ]) if config and is_deleted(config): undo_delete(config) messages.success( request, _('Successfully restored data source "{name}"').format(name=config.display_name) ) else: messages.info(request, _('Data source "{name}" not deleted.').format(name=config.display_name)) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def rebuild_data_source(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if config.is_deactivated: config.is_deactivated = False config.save() messages.success( request, _('Table "{}" is now being rebuilt. Data should start showing up soon').format( config.display_name ) ) rebuild_indicators.delay(config_id, request.user.username) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def resume_building_data_source(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if not is_static and config.meta.build.finished: messages.warning( request, _('Table "{}" has already finished building. Rebuild table to start over.').format( config.display_name ) ) elif not DataSourceResumeHelper(config).has_resume_info(): messages.warning( request, _('Table "{}" did not finish building but resume information is not available. ' 'Unfortunately, this means you need to rebuild the table.').format( config.display_name ) ) else: messages.success( request, _('Resuming rebuilding table "{}".').format(config.display_name) ) resume_building_indicators.delay(config_id, request.user.username) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def build_data_source_in_place(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if config.is_deactivated: config.is_deactivated = False config.save() messages.success( request, _('Table "{}" is now being rebuilt. Data should start showing up soon').format( config.display_name ) ) rebuild_indicators_in_place.delay(config_id, request.user.username, source='edit_data_source_build_in_place') return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def data_source_json(request, domain, config_id): config, _ = get_datasource_config_or_404(config_id, domain) config._doc.pop('_rev', None) return json_response(config) class PreviewDataSourceView(BaseUserConfigReportsView): urlname = 'preview_configurable_data_source' template_name = "userreports/preview_data.html" page_title = ugettext_lazy("Preview Data Source") @method_decorator(swallow_programming_errors) def dispatch(self, request, *args, **kwargs): return super(PreviewDataSourceView, self).dispatch(request, *args, **kwargs) @property def config_id(self): return self.kwargs['config_id'] @property def page_url(self): return reverse(self.urlname, args=(self.domain, self.config_id,)) @property def page_context(self): config, is_static = get_datasource_config_or_404(self.config_id, self.domain) adapter = get_indicator_adapter(config) q = adapter.get_query_object() return { 'data_source': config, 'columns': q.column_descriptions, 'data': [list(row) for row in q[:20]], 'total_rows': q.count(), } ExportParameters = namedtuple('ExportParameters', ['format', 'keyword_filters', 'sql_filters']) def _last_n_days(column, value): if not isinstance(column.type, (types.Date, types.DateTime)): raise UserQueryError(_("You can only use 'lastndays' on date columns")) end = datetime.date.today() start = end - datetime.timedelta(days=int(value)) return column.between(start, end) def _range_filter(column, value): try: start, end = value.split('..') except ValueError: raise UserQueryError(_('Ranges must have the format "start..end"')) return column.between(start, end) sql_directives = [ # (suffix matching url parameter, callable returning a filter), ('-lastndays', _last_n_days), ('-range', _range_filter), ] def process_url_params(params, columns): """ Converts a dictionary of parameters from the user to sql filters. If a parameter is of the form <field name>-<suffix>, where suffix is defined in `sql_directives`, the corresponding function is used to produce a filter. """ # support passing `format` instead of `$format` so we don't break people's # existing URLs. Let's remove this once we can. format_ = params.get('$format', params.get('format', Format.UNZIPPED_CSV)) keyword_filters = {} sql_filters = [] for key, value in params.items(): if key in ('$format', 'format'): continue for suffix, fn in sql_directives: if key.endswith(suffix): field = key[:-len(suffix)] if field not in columns: raise UserQueryError(_('No field named {}').format(field)) sql_filters.append(fn(columns[field], value)) break else: if key in columns: keyword_filters[key] = value else: raise UserQueryError(_('Invalid filter parameter: {}') .format(key)) return ExportParameters(format_, keyword_filters, sql_filters) @api_auth @require_permission(Permissions.view_reports) @swallow_programming_errors def export_data_source(request, domain, config_id): config, _ = get_datasource_config_or_404(config_id, domain) adapter = get_indicator_adapter(config, load_source='export_data_source') url = reverse('export_configurable_data_source', args=[domain, config._id]) return export_sql_adapter_view(request, domain, adapter, url) def export_sql_adapter_view(request, domain, adapter, too_large_redirect_url): q = adapter.get_query_object() table = adapter.get_table() try: params = process_url_params(request.GET, table.columns) allowed_formats = [ Format.CSV, Format.HTML, Format.XLS, Format.XLS_2007, ] if params.format not in allowed_formats: msg = ugettext_lazy('format must be one of the following: {}').format(', '.join(allowed_formats)) return HttpResponse(msg, status=400) except UserQueryError as e: return HttpResponse(str(e), status=400) q = q.filter_by(**params.keyword_filters) for sql_filter in params.sql_filters: q = q.filter(sql_filter) # xls format has limit of 65536 rows # First row is taken up by headers if params.format == Format.XLS and q.count() >= 65535: keyword_params = dict(**request.GET) # use default format if 'format' in keyword_params: del keyword_params['format'] return HttpResponseRedirect( '%s?%s' % ( too_large_redirect_url, urlencode(keyword_params) ) ) # build export def get_table(q): yield list(table.columns.keys()) for row in q: adapter.track_load() yield row fd, path = tempfile.mkstemp() with os.fdopen(fd, 'wb') as tmpfile: try: tables = [[adapter.table_id, get_table(q)]] export_from_tables(tables, tmpfile, params.format) except exc.DataError: msg = ugettext_lazy( "There was a problem executing your query, " "please make sure your parameters are valid." ) return HttpResponse(msg, status=400) return export_response(Temp(path), params.format, adapter.display_name) def _get_report_filter(domain, report_id, filter_id): report = get_report_config_or_404(report_id, domain)[0] report_filter = report.get_ui_filter(filter_id) if report_filter is None: raise Http404(_('Filter {} not found!').format(filter_id)) return report_filter def _is_location_safe_choice_list(view_fn, request, domain, report_id, filter_id, **view_kwargs): return report_has_location_filter(config_id=report_id, domain=domain) @login_and_domain_required @conditionally_location_safe(_is_location_safe_choice_list) def choice_list_api(request, domain, report_id, filter_id): report_filter = _get_report_filter(domain, report_id, filter_id) if hasattr(report_filter, 'choice_provider'): query_context = ChoiceQueryContext( query=request.GET.get('q', None), limit=int(request.GET.get('limit', 20)), page=int(request.GET.get('page', 1)) - 1, user=request.couch_user ) return json_response([ choice._asdict() for choice in report_filter.choice_provider.query(query_context) ]) else: # mobile UCR hits this API for invalid filters. Just return no choices. return json_response([]) def _shared_context(domain): static_reports = list(StaticReportConfiguration.by_domain(domain)) static_data_sources = list(StaticDataSourceConfiguration.by_domain(domain)) return { 'domain': domain, 'reports': ReportConfiguration.by_domain(domain) + static_reports, 'data_sources': DataSourceConfiguration.by_domain(domain) + static_data_sources, } class DataSourceSummaryView(BaseUserConfigReportsView): urlname = 'summary_configurable_data_source' template_name = "userreports/summary_data_source.html" page_title = ugettext_lazy("Data Source Summary") @property def config_id(self): return self.kwargs['config_id'] @property @memoized def config(self): return get_datasource_config_or_404(self.config_id, self.domain)[0] @property def page_url(self): return reverse(self.urlname, args=(self.domain, self.config_id,)) @property def page_name(self): return "Summary - {}".format(self.config.display_name) @property def page_context(self): return { 'datasource_display_name': self.config.display_name, 'filter_summary': self.configured_filter_summary(), 'indicator_summary': self._add_links_to_output(self.indicator_summary()), 'named_expression_summary': self._add_links_to_output(self.named_expression_summary()), 'named_filter_summary': self._add_links_to_output(self.named_filter_summary()), 'named_filter_prefix': NAMED_FILTER_PREFIX, 'named_expression_prefix': NAMED_EXPRESSION_PREFIX, } def indicator_summary(self): context = self.config.get_factory_context() wrapped_specs = [ IndicatorFactory.from_spec(spec, context).wrapped_spec for spec in self.config.configured_indicators ] return [ { "column_id": wrapped.column_id, "comment": wrapped.comment, "readable_output": wrapped.readable_output(context) } for wrapped in wrapped_specs if wrapped ] def named_expression_summary(self): return [ { "name": name, "comment": self.config.named_expressions[name].get('comment'), "readable_output": str(exp) } for name, exp in self.config.named_expression_objects.items() ] def named_filter_summary(self): return [ { "name": name, "comment": self.config.named_filters[name].get('comment'), "readable_output": str(filter) } for name, filter in self.config.named_filter_objects.items() ] def configured_filter_summary(self): return str(FilterFactory.from_spec(self.config.configured_filter, context=self.config.get_factory_context())) def _add_links_to_output(self, items): def make_link(match): value = match.group() return '<a href="#{value}">{value}</a>'.format(value=value) def add_links(content): content = re.sub(r"{}:[A-Za-z0-9_-]+".format(NAMED_FILTER_PREFIX), make_link, content) content = re.sub(r"{}:[A-Za-z0-9_-]+".format(NAMED_EXPRESSION_PREFIX), make_link, content) return content list = [] for i in items: i['readable_output'] = add_links(i.get('readable_output')) list.append(i) return list @domain_admin_required def copy_report(request, domain): from_domain = domain to_domains = request.POST.getlist("to_domains") report_id = request.POST.get("report_id") successes = [] failures = [] for to_domain in to_domains: domain_link = DomainLink.objects.get(master_domain=from_domain, linked_domain=to_domain) try: link_info = create_linked_ucr(domain_link, report_id) domain_link.update_last_pull( 'report', request.couch_user._id, model_detail=ReportLinkDetail(report_id=link_info.report.get_id).to_json(), ) successes.append(to_domain) except Exception as err: failures.append(to_domain) notify_exception(request, message=str(err)) if successes: messages.success( request, _(f"Successfully linked and copied {link_info.report.title} to {", ".join(successes)}. ")) if failures: messages.error(request, _(f"Due to errors, the report was not copied to {", ".join(failures)}")) return HttpResponseRedirect( reverse(ConfigurableReportView.slug, args=[from_domain, report_id]) )
import datetime import functools import json import os import re import tempfile from collections import OrderedDict, namedtuple from django.conf import settings from django.contrib import messages from django.http import HttpResponse, HttpResponseRedirect from django.http.response import Http404, JsonResponse from django.utils.decorators import method_decorator from django.utils.http import urlencode from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from django.views.decorators.http import require_POST from django.views.generic import View import six.moves.urllib.error import six.moves.urllib.parse import six.moves.urllib.request from couchdbkit.exceptions import ResourceNotFound from memoized import memoized from sqlalchemy import exc, types from sqlalchemy.exc import ProgrammingError from couchexport.export import export_from_tables from couchexport.files import Temp from couchexport.models import Format from couchexport.shortcuts import export_response from dimagi.utils.couch.undo import ( get_deleted_doc_type, is_deleted, soft_delete, undo_delete, ) from dimagi.utils.logging import notify_exception from dimagi.utils.web import json_response from pillowtop.dao.exceptions import DocumentNotFoundError from corehq import toggles from corehq.apps.accounting.models import Subscription from corehq.apps.analytics.tasks import ( HUBSPOT_SAVED_UCR_FORM_ID, send_hubspot_form, track_workflow, update_hubspot_properties, ) from corehq.apps.app_manager.models import Application from corehq.apps.app_manager.util import purge_report_from_mobile_ucr from corehq.apps.change_feed.data_sources import ( get_document_store_for_doc_type, ) from corehq.apps.domain.decorators import api_auth, login_and_domain_required, domain_admin_required from corehq.apps.domain.models import Domain from corehq.apps.domain.views.base import BaseDomainView from corehq.apps.hqwebapp.decorators import ( use_datatables, use_daterangepicker, use_jquery_ui, use_multiselect, use_nvd3, ) from corehq.apps.hqwebapp.tasks import send_mail_async from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled from corehq.apps.linked_domain.models import DomainLink, ReportLinkDetail from corehq.apps.linked_domain.ucr import create_linked_ucr, linked_downstream_reports_by_domain from corehq.apps.linked_domain.util import is_linked_report from corehq.apps.locations.permissions import conditionally_location_safe from corehq.apps.reports.daterange import get_simple_dateranges from corehq.apps.reports.dispatcher import cls_to_view_login_and_domain from corehq.apps.saved_reports.models import ReportConfig from corehq.apps.userreports.app_manager.data_source_meta import ( DATA_SOURCE_TYPE_RAW, ) from corehq.apps.userreports.app_manager.helpers import ( get_case_data_source, get_form_data_source, ) from corehq.apps.userreports.const import ( DATA_SOURCE_MISSING_APP_ERROR_MESSAGE, DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE, NAMED_EXPRESSION_PREFIX, NAMED_FILTER_PREFIX, REPORT_BUILDER_EVENTS_KEY, ) from corehq.apps.userreports.dbaccessors import get_datasources_for_domain from corehq.apps.userreports.exceptions import ( BadBuilderConfigError, BadSpecError, DataSourceConfigurationNotFoundError, ReportConfigurationNotFoundError, TableNotFoundWarning, UserQueryError, translate_programming_error, ) from corehq.apps.userreports.expressions.factory import ExpressionFactory from corehq.apps.userreports.filters.factory import FilterFactory from corehq.apps.userreports.indicators.factory import IndicatorFactory from corehq.apps.userreports.models import ( DataSourceConfiguration, ReportConfiguration, StaticDataSourceConfiguration, StaticReportConfiguration, get_datasource_config, get_report_config, id_is_static, report_config_id_is_static, ) from corehq.apps.userreports.rebuild import DataSourceResumeHelper from corehq.apps.userreports.reports.builder.forms import ( ConfigureListReportForm, ConfigureMapReportForm, ConfigureTableReportForm, DataSourceForm, get_data_source_interface, ) from corehq.apps.userreports.reports.builder.sources import ( get_source_type_from_report_config, ) from corehq.apps.userreports.reports.filters.choice_providers import ( ChoiceQueryContext, ) from corehq.apps.userreports.reports.util import report_has_location_filter from corehq.apps.userreports.reports.view import ConfigurableReportView from corehq.apps.userreports.specs import EvaluationContext, FactoryContext from corehq.apps.userreports.tasks import ( rebuild_indicators, rebuild_indicators_in_place, resume_building_indicators, ) from corehq.apps.userreports.ui.forms import ( ConfigurableDataSourceEditForm, ConfigurableDataSourceFromAppForm, ConfigurableReportEditForm, ) from corehq.apps.userreports.util import ( add_event, allowed_report_builder_reports, get_referring_apps, get_indicator_adapter, has_report_builder_access, has_report_builder_add_on_privilege, number_of_report_builder_reports, ) from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from corehq.tabs.tabclasses import ProjectReportsTab from corehq.util import reverse from corehq.util.couch import get_document_or_404 from corehq.util.quickcache import quickcache from corehq.util.soft_assert import soft_assert TEMP_REPORT_PREFIX = '__tmp' def get_datasource_config_or_404(config_id, domain): try: return get_datasource_config(config_id, domain) except DataSourceConfigurationNotFoundError: raise Http404 def get_report_config_or_404(config_id, domain): try: return get_report_config(config_id, domain) except ReportConfigurationNotFoundError: raise Http404 def swallow_programming_errors(fn): @functools.wraps(fn) def decorated(request, domain, *args, **kwargs): try: return fn(request, domain, *args, **kwargs) except ProgrammingError as e: if settings.DEBUG: raise messages.error( request, _('There was a problem processing your request. ' 'If you have recently modified your report data source please try again in a few minutes.' '<br><br>Technical details:<br>{}'.format(e)), extra_tags='html', ) return HttpResponseRedirect(reverse('configurable_reports_home', args=[domain])) return decorated @method_decorator(toggles.USER_CONFIGURABLE_REPORTS.required_decorator(), name='dispatch') class BaseUserConfigReportsView(BaseDomainView): section_name = ugettext_lazy("Configurable Reports") @property def main_context(self): static_reports = list(StaticReportConfiguration.by_domain(self.domain)) context = super(BaseUserConfigReportsView, self).main_context context.update({ 'reports': ReportConfiguration.by_domain(self.domain) + static_reports, 'data_sources': get_datasources_for_domain(self.domain, include_static=True) }) if toggle_enabled(self.request, toggles.AGGREGATE_UCRS): from corehq.apps.aggregate_ucrs.models import AggregateTableDefinition context['aggregate_data_sources'] = AggregateTableDefinition.objects.filter(domain=self.domain) return context @property def section_url(self): return reverse(UserConfigReportsHomeView.urlname, args=(self.domain,)) @property def page_url(self): return reverse(self.urlname, args=(self.domain,)) class UserConfigReportsHomeView(BaseUserConfigReportsView): urlname = 'configurable_reports_home' template_name = 'userreports/configurable_reports_home.html' page_title = ugettext_lazy("Reports Home") class BaseEditConfigReportView(BaseUserConfigReportsView): template_name = 'userreports/edit_report_config.html' @use_multiselect def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) @property def report_id(self): return self.kwargs.get('report_id') @property def page_url(self): if self.report_id: return reverse(self.urlname, args=(self.domain, self.report_id,)) return super(BaseEditConfigReportView, self).page_url @property def page_context(self): return { 'form': self.edit_form, 'report': self.config, 'referring_apps': get_referring_apps(self.domain, self.report_id), 'linked_report_domain_list': linked_downstream_reports_by_domain( self.domain, self.report_id ), } @property @memoized def config(self): if self.report_id is None: return ReportConfiguration(domain=self.domain) return get_report_config_or_404(self.report_id, self.domain)[0] @property def read_only(self): if self.report_id is not None: return (report_config_id_is_static(self.report_id) or is_linked_report(self.config)) return False @property @memoized def edit_form(self): if self.request.method == 'POST': return ConfigurableReportEditForm( self.domain, self.config, self.read_only, data=self.request.POST) return ConfigurableReportEditForm(self.domain, self.config, self.read_only) def post(self, request, *args, **kwargs): if self.edit_form.is_valid(): self.edit_form.save(commit=True) messages.success(request, _('Report "{}" saved!').format(self.config.title)) return HttpResponseRedirect(reverse( 'edit_configurable_report', args=[self.domain, self.config._id]) ) return self.get(request, *args, **kwargs) class EditConfigReportView(BaseEditConfigReportView): urlname = 'edit_configurable_report' page_title = ugettext_lazy("Edit Report") class CreateConfigReportView(BaseEditConfigReportView): urlname = 'create_configurable_report' page_title = ugettext_lazy("Create Report") class ReportBuilderView(BaseDomainView): @method_decorator(require_permission(Permissions.edit_reports)) @cls_to_view_login_and_domain @use_daterangepicker @use_datatables def dispatch(self, request, *args, **kwargs): return super(ReportBuilderView, self).dispatch(request, *args, **kwargs) @property def main_context(self): main_context = super(ReportBuilderView, self).main_context allowed_num_reports = allowed_report_builder_reports(self.request) main_context.update({ 'has_report_builder_access': has_report_builder_access(self.request), 'at_report_limit': number_of_report_builder_reports(self.domain) >= allowed_num_reports, 'report_limit': allowed_num_reports, 'paywall_url': paywall_home(self.domain), 'pricing_page_url': settings.PRICING_PAGE_URL, }) return main_context @property def section_name(self): return _("Report Builder") @property def section_url(self): return reverse(ReportBuilderDataSourceSelect.urlname, args=[self.domain]) @quickcache(["domain"], timeout=0, memoize_timeout=4) def paywall_home(domain): """ Return the url for the page in the report builder paywall that users in the given domain should be directed to upon clicking "+ Create new report" """ domain_obj = Domain.get_by_name(domain, strict=True) if domain_obj.requested_report_builder_subscription: return reverse(ReportBuilderPaywallActivatingSubscription.urlname, args=[domain]) else: return reverse(ReportBuilderPaywallPricing.urlname, args=[domain]) class ReportBuilderPaywallBase(BaseDomainView): page_title = ugettext_lazy('Subscribe') @property def section_name(self): return _("Report Builder") @property def section_url(self): return paywall_home(self.domain) @property @memoized def plan_name(self): return Subscription.get_subscribed_plan_by_domain(self.domain).plan.name class ReportBuilderPaywallPricing(ReportBuilderPaywallBase): template_name = "userreports/paywall/pricing.html" urlname = 'report_builder_paywall_pricing' page_title = ugettext_lazy('Pricing') @property def page_context(self): context = super(ReportBuilderPaywallPricing, self).page_context max_allowed_reports = allowed_report_builder_reports(self.request) num_builder_reports = number_of_report_builder_reports(self.domain) context.update({ 'has_report_builder_access': has_report_builder_access(self.request), 'at_report_limit': num_builder_reports >= max_allowed_reports, 'max_allowed_reports': max_allowed_reports, 'pricing_page_url': settings.PRICING_PAGE_URL, }) return context class ReportBuilderPaywallActivatingSubscription(ReportBuilderPaywallBase): template_name = "userreports/paywall/activating_subscription.html" urlname = 'report_builder_paywall_activating_subscription' def post(self, request, domain, *args, **kwargs): self.domain_object.requested_report_builder_subscription.append(request.user.username) self.domain_object.save() send_mail_async.delay( "Report Builder Subscription Request: {}".format(domain), "User {} in the {} domain has requested a report builder subscription." " Current subscription is '{}'.".format( request.user.username, domain, self.plan_name ), settings.DEFAULT_FROM_EMAIL, [settings.SALES_EMAIL], ) update_hubspot_properties.delay(request.couch_user, {'report_builder_subscription_request': 'yes'}) return self.get(request, domain, *args, **kwargs) class ReportBuilderDataSourceSelect(ReportBuilderView): template_name = 'userreports/reportbuilder/data_source_select.html' page_title = ugettext_lazy('Create Report') urlname = 'report_builder_select_source' @property def page_context(self): context = { "sources_map": self.form.sources_map, "domain": self.domain, 'report': {"title": _("Create New Report")}, 'form': self.form, } return context @property @memoized def form(self): max_allowed_reports = allowed_report_builder_reports(self.request) if self.request.method == 'POST': return DataSourceForm(self.domain, max_allowed_reports, self.request.POST) return DataSourceForm(self.domain, max_allowed_reports) def post(self, request, *args, **kwargs): if self.form.is_valid(): app_source = self.form.get_selected_source() track_workflow( request.user.email, "Successfully submitted the first part of the Report Builder " "wizard where you give your report a name and choose a data source" ) add_event(request, [ "Report Builder", "Successful Click on Next (Data Source)", app_source.source_type, ]) get_params = { 'report_name': self.form.cleaned_data['report_name'], 'application': app_source.application, 'source_type': app_source.source_type, 'source': app_source.source, } return HttpResponseRedirect( reverse(ConfigureReport.urlname, args=[self.domain], params=get_params) ) else: return self.get(request, *args, **kwargs) class EditReportInBuilder(View): def dispatch(self, request, *args, **kwargs): report_id = kwargs['report_id'] report = get_document_or_404(ReportConfiguration, request.domain, report_id) if report.report_meta.created_by_builder: try: return ConfigureReport.as_view(existing_report=report)(request, *args, **kwargs) except BadBuilderConfigError as e: messages.error(request, str(e)) return HttpResponseRedirect(reverse(ConfigurableReportView.slug, args=[request.domain, report_id])) raise Http404("Report was not created by the report builder") class ConfigureReport(ReportBuilderView): urlname = 'configure_report' page_title = ugettext_lazy("Configure Report") template_name = "userreports/reportbuilder/configure_report.html" report_title = '{}' existing_report = None @use_jquery_ui @use_datatables @use_nvd3 @use_multiselect def dispatch(self, request, *args, **kwargs): if self.existing_report: self.source_type = get_source_type_from_report_config(self.existing_report) if self.source_type != DATA_SOURCE_TYPE_RAW: self.source_id = self.existing_report.config.meta.build.source_id self.app_id = self.existing_report.config.meta.build.app_id self.app = Application.get(self.app_id) if self.app_id else None else: self.source_id = self.existing_report.config_id self.app_id = self.app = None else: self.app_id = self.request.GET['application'] self.app = Application.get(self.app_id) self.source_type = self.request.GET['source_type'] self.source_id = self.request.GET['source'] if not self.app_id and self.source_type != DATA_SOURCE_TYPE_RAW: raise BadBuilderConfigError(DATA_SOURCE_MISSING_APP_ERROR_MESSAGE) try: data_source_interface = get_data_source_interface( self.domain, self.app, self.source_type, self.source_id ) except ResourceNotFound: self.template_name = 'userreports/report_error.html' if self.existing_report: context = {'report_id': self.existing_report.get_id, 'is_static': self.existing_report.is_static} else: context = {} context['error_message'] = DATA_SOURCE_NOT_FOUND_ERROR_MESSAGE context.update(self.main_context) return self.render_to_response(context) self._populate_data_source_properties_from_interface(data_source_interface) return super(ConfigureReport, self).dispatch(request, *args, **kwargs) @property def page_name(self): title = self._get_report_name() return _(self.report_title).format(title) @property def report_description(self): if self.existing_report: return self.existing_report.description or None return None def _populate_data_source_properties_from_interface(self, data_source_interface): self._properties_by_column_id = {} for p in data_source_interface.data_source_properties.values(): column = p.to_report_column_option() for agg in column.aggregation_options: indicators = column.get_indicators(agg) for i in indicators: self._properties_by_column_id[i['column_id']] = p def _get_report_name(self, request=None): if self.existing_report: return self.existing_report.title else: request = request or self.request return request.GET.get('report_name', '') def _get_existing_report_type(self): if self.existing_report: type_ = "list" if self.existing_report.aggregation_columns != ["doc_id"]: type_ = "table" if self.existing_report.map_config: type_ = "map" return type_ def _get_property_id_by_indicator_id(self, indicator_column_id): """ Return the data source property id corresponding to the given data source indicator column id. :param indicator_column_id: The column_id field of a data source indicator configuration dictionary :return: A DataSourceProperty property id, e.g. "/data/question1" """ data_source_property = self._properties_by_column_id.get(indicator_column_id) if data_source_property: return data_source_property.get_id() def _get_initial_location(self, report_form): if self.existing_report: cols = [col for col in self.existing_report.report_columns if col.type == 'location'] if cols: indicator_id = cols[0].field return report_form._get_property_id_by_indicator_id(indicator_id) def _get_initial_chart_type(self): if self.existing_report: if self.existing_report.configured_charts: type_ = self.existing_report.configured_charts[0]['type'] if type_ == "multibar": return "bar" if type_ == "pie": return "pie" def _get_column_options(self, report_form): options = OrderedDict() for option in report_form.report_column_options.values(): key = option.get_uniquenss_key() if key in options: options[key].append(option) else: options[key] = [option] @property def page_context(self): form_type = _get_form_type(self._get_existing_report_type()) report_form = form_type( self.domain, self.page_name, self.app_id, self.source_type, self.source_id, self.existing_report ) temp_ds_id = report_form.create_temp_data_source_if_necessary(self.request.user.username) return { 'existing_report': self.existing_report, 'report_description': self.report_description, 'report_title': self.page_name, 'existing_report_type': self._get_existing_report_type(), 'column_options': [p.to_view_model() for p in report_form.report_column_options.values()], # TODO: Consider renaming this because it's more like "possible" data source props 'data_source_properties': [p.to_view_model() for p in report_form.data_source_properties.values()], 'initial_user_filters': [f._asdict() for f in report_form.initial_user_filters], 'initial_default_filters': [f._asdict() for f in report_form.initial_default_filters], 'initial_columns': [c._asdict() for c in report_form.initial_columns], 'initial_location': self._get_initial_location(report_form), 'initial_chart_type': self._get_initial_chart_type(), 'source_type': self.source_type, 'source_id': self.source_id, 'application': self.app_id, 'report_preview_url': reverse(ReportPreview.urlname, args=[self.domain, temp_ds_id]), 'preview_datasource_id': temp_ds_id, 'report_builder_events': self.request.session.pop(REPORT_BUILDER_EVENTS_KEY, []), 'MAPBOX_ACCESS_TOKEN': settings.MAPBOX_ACCESS_TOKEN, 'date_range_options': [r._asdict() for r in get_simple_dateranges()], 'linked_report_domain_list': linked_downstream_reports_by_domain( self.domain, self.existing_report.get_id ) if self.existing_report else {} } def _get_bound_form(self, report_data): form_class = _get_form_type(report_data['report_type']) return form_class( self.domain, self._get_report_name(), self.app._id, self.source_type, self.source_id, self.existing_report, report_data ) def post(self, request, domain, *args, **kwargs): if not has_report_builder_access(request): raise Http404 report_data = json.loads(request.body.decode('utf-8')) if report_data['existing_report'] and not self.existing_report: # This is the case if the user has clicked "Save" for a second time from the new report page # i.e. the user created a report with the first click, but didn't navigate to the report view page self.existing_report = ReportConfiguration.get(report_data['existing_report']) _munge_report_data(report_data) bound_form = self._get_bound_form(report_data) if bound_form.is_valid(): if self.existing_report: report_configuration = bound_form.update_report() else: self._confirm_report_limit() try: report_configuration = bound_form.create_report() except BadSpecError as err: messages.error(self.request, str(err)) notify_exception(self.request, str(err), details={ 'domain': self.domain, 'report_form_class': bound_form.__class__.__name__, 'report_type': bound_form.report_type, 'group_by': getattr(bound_form, 'group_by', 'Not set'), 'user_filters': getattr(bound_form, 'user_filters', 'Not set'), 'default_filters': getattr(bound_form, 'default_filters', 'Not set'), }) return self.get(request, domain, *args, **kwargs) else: ProjectReportsTab.clear_dropdown_cache(domain, request.couch_user.get_id) self._delete_temp_data_source(report_data) send_hubspot_form(HUBSPOT_SAVED_UCR_FORM_ID, request) return json_response({ 'report_url': reverse(ConfigurableReportView.slug, args=[self.domain, report_configuration._id]), 'report_id': report_configuration._id, }) def _delete_temp_data_source(self, report_data): if report_data.get("delete_temp_data_source", False): delete_data_source_shared(self.domain, report_data["preview_data_source_id"]) def _confirm_report_limit(self): """ This method is used to confirm that the user is not creating more reports than they are allowed. The user is normally turned back earlier in the process, but this check is necessary in case they navigated directly to this view either maliciously or with a bookmark perhaps. """ if (number_of_report_builder_reports(self.domain) >= allowed_report_builder_reports(self.request)): raise Http404() def update_report_description(request, domain, report_id): new_description = request.POST['value'] report = get_document_or_404(ReportConfiguration, domain, report_id) report.description = new_description report.save() return json_response({}) def _get_form_type(report_type): assert report_type in (None, "list", "table", "chart", "map") if report_type == "list" or report_type is None: return ConfigureListReportForm if report_type == "table": return ConfigureTableReportForm if report_type == "map": return ConfigureMapReportForm def _munge_report_data(report_data): """ Split aggregation columns out of report_data and into :param report_data: :return: """ report_data['columns'] = json.dumps(report_data['columns']) report_data['user_filters'] = json.dumps(report_data['user_filters']) report_data['default_filters'] = json.dumps(report_data['default_filters']) class ReportPreview(BaseDomainView): urlname = 'report_preview' def post(self, request, domain, data_source): report_data = json.loads(six.moves.urllib.parse.unquote(request.body.decode('utf-8'))) form_class = _get_form_type(report_data['report_type']) # ignore user filters report_data['user_filters'] = [] _munge_report_data(report_data) bound_form = form_class( domain, '{}_{}_{}'.format(TEMP_REPORT_PREFIX, self.domain, data_source), report_data['app'], report_data['source_type'], report_data['source_id'], None, report_data ) if bound_form.is_valid(): try: temp_report = bound_form.create_temp_report(data_source, self.request.user.username) response_data = ConfigurableReportView.report_preview_data(self.domain, temp_report) if response_data: return json_response(response_data) except BadBuilderConfigError as e: return json_response({'status': 'error', 'message': str(e)}, status_code=400) else: return json_response({ 'status': 'error', 'message': 'Invalid report configuration', 'errors': bound_form.errors, }, status_code=400) def _assert_report_delete_privileges(request): if not (toggle_enabled(request, toggles.USER_CONFIGURABLE_REPORTS) or toggle_enabled(request, toggles.REPORT_BUILDER) or toggle_enabled(request, toggles.REPORT_BUILDER_BETA_GROUP) or has_report_builder_add_on_privilege(request)): raise Http404() @login_and_domain_required @require_permission(Permissions.edit_reports) def delete_report(request, domain, report_id): _assert_report_delete_privileges(request) config = get_document_or_404(ReportConfiguration, domain, report_id) # Delete the data source too if it's not being used by any other reports. try: data_source, __ = get_datasource_config(config.config_id, domain) except DataSourceConfigurationNotFoundError: # It's possible the data source has already been deleted, but that's fine with us. pass else: if data_source.get_report_count() <= 1: # No other reports reference this data source. data_source.deactivate(initiated_by=request.user.username) soft_delete(config) did_purge_something = purge_report_from_mobile_ucr(config) messages.success( request, _('Report "{name}" has been deleted. <a href="{url}" class="post-link">Undo</a>').format( name=config.title, url=reverse('undo_delete_configurable_report', args=[domain, config._id]), ), extra_tags='html' ) report_configs = ReportConfig.by_domain_and_owner( domain, request.couch_user.get_id, "configurable") for rc in report_configs: if rc.subreport_slug == config.get_id: rc.delete() if did_purge_something: messages.warning( request, _("This report was used in one or more applications. " "It has been removed from there too.") ) ProjectReportsTab.clear_dropdown_cache(domain, request.couch_user.get_id) redirect = request.GET.get("redirect", None) if not redirect: redirect = reverse('configurable_reports_home', args=[domain]) return HttpResponseRedirect(redirect) @login_and_domain_required @require_permission(Permissions.edit_reports) def undelete_report(request, domain, report_id): _assert_report_delete_privileges(request) config = get_document_or_404(ReportConfiguration, domain, report_id, additional_doc_types=[ get_deleted_doc_type(ReportConfiguration) ]) if config and is_deleted(config): undo_delete(config) messages.success( request, _('Successfully restored report "{name}"').format(name=config.title) ) else: messages.info(request, _('Report "{name}" not deleted.').format(name=config.title)) return HttpResponseRedirect(reverse(ConfigurableReportView.slug, args=[request.domain, report_id])) class ImportConfigReportView(BaseUserConfigReportsView): page_title = ugettext_lazy("Import Report") template_name = "userreports/import_report.html" urlname = 'import_configurable_report' @property def spec(self): if self.request.method == "POST": return self.request.POST['report_spec'] return '' def post(self, request, *args, **kwargs): try: json_spec = json.loads(self.spec) if '_id' in json_spec: del json_spec['_id'] json_spec['domain'] = self.domain report = ReportConfiguration.wrap(json_spec) report.validate() report.save() messages.success(request, _('Report created!')) return HttpResponseRedirect(reverse( EditConfigReportView.urlname, args=[self.domain, report._id] )) except (ValueError, BadSpecError) as e: messages.error(request, _('Bad report source: {}').format(e)) return self.get(request, *args, **kwargs) @property def page_context(self): return { 'spec': self.spec, } @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def report_source_json(request, domain, report_id): config, _ = get_report_config_or_404(report_id, domain) config._doc.pop('_rev', None) return json_response(config) class ExpressionDebuggerView(BaseUserConfigReportsView): urlname = 'expression_debugger' template_name = 'userreports/expression_debugger.html' page_title = ugettext_lazy("Expression Debugger") class DataSourceDebuggerView(BaseUserConfigReportsView): urlname = 'expression_debugger' template_name = 'userreports/data_source_debugger.html' page_title = ugettext_lazy("Data Source Debugger") @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def evaluate_expression(request, domain): doc_type = request.POST['doc_type'] doc_id = request.POST['doc_id'] data_source_id = request.POST['data_source'] try: if data_source_id: data_source = get_datasource_config(data_source_id, domain)[0] factory_context = data_source.get_factory_context() else: factory_context = FactoryContext.empty() usable_type = { 'form': 'XFormInstance', 'case': 'CommCareCase', }.get(doc_type, 'Unknown') document_store = get_document_store_for_doc_type( domain, usable_type, load_source="eval_expression") doc = document_store.get_document(doc_id) expression_text = request.POST['expression'] expression_json = json.loads(expression_text) parsed_expression = ExpressionFactory.from_spec( expression_json, context=factory_context ) result = parsed_expression(doc, EvaluationContext(doc)) return json_response({ "result": result, }) except DataSourceConfigurationNotFoundError: return json_response( {"error": _("Data source with id {} not found in domain {}.").format( data_source_id, domain )}, status_code=404, ) except DocumentNotFoundError: return json_response( {"error": _("{} with id {} not found in domain {}.").format( doc_type, doc_id, domain )}, status_code=404, ) except BadSpecError as e: return json_response( {"error": _("Problem with expression: {}").format(e)}, status_code=400, ) except Exception as e: return json_response( {"error": str(e)}, status_code=500, ) @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def evaluate_data_source(request, domain): data_source_id = request.POST['data_source'] docs_id = request.POST['docs_id'] try: data_source = get_datasource_config(data_source_id, domain)[0] except DataSourceConfigurationNotFoundError: return JsonResponse( {"error": _("Data source with id {} not found in domain {}.").format( data_source_id, domain )}, status=404, ) docs_id = [doc_id.strip() for doc_id in docs_id.split(',')] document_store = get_document_store_for_doc_type( domain, data_source.referenced_doc_type, load_source="eval_data_source") rows = [] docs = 0 for doc in document_store.iter_documents(docs_id): docs += 1 for row in data_source.get_all_values(doc): rows.append({i.column.database_column_name.decode(): i.value for i in row}) if not docs: return JsonResponse(data={'error': _('No documents found. Check the IDs and try again.')}, status=404) data = { 'rows': rows, 'db_rows': [], 'columns': [ column.database_column_name.decode() for column in data_source.get_columns() ], } try: adapter = get_indicator_adapter(data_source) table = adapter.get_table() query = adapter.get_query_object().filter(table.c.doc_id.in_(docs_id)) db_rows = [ {column.name: getattr(row, column.name) for column in table.columns} for row in query ] data['db_rows'] = db_rows except ProgrammingError as e: err = translate_programming_error(e) if err and isinstance(err, TableNotFoundWarning): data['db_error'] = _("Datasource table does not exist. Try rebuilding the datasource.") else: data['db_error'] = _("Error querying database for data.") return JsonResponse(data=data) class CreateDataSourceFromAppView(BaseUserConfigReportsView): urlname = 'create_configurable_data_source_from_app' template_name = "userreports/data_source_from_app.html" page_title = ugettext_lazy("Create Data Source from Application") @property @memoized def form(self): if self.request.method == 'POST': return ConfigurableDataSourceFromAppForm(self.domain, self.request.POST) return ConfigurableDataSourceFromAppForm(self.domain) def post(self, request, *args, **kwargs): if self.form.is_valid(): app_source = self.form.app_source_helper.get_app_source(self.form.cleaned_data) app = Application.get(app_source.application) if app_source.source_type == 'case': data_source = get_case_data_source(app, app_source.source) data_source.save() messages.success(request, _("Data source created for '{}'".format(app_source.source))) else: assert app_source.source_type == 'form' xform = app.get_form(app_source.source) data_source = get_form_data_source(app, xform) data_source.save() messages.success(request, _("Data source created for '{}'".format(xform.default_name()))) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[self.domain, data_source._id] )) return self.get(request, *args, **kwargs) @property def page_context(self): return { 'sources_map': self.form.app_source_helper.all_sources, 'form': self.form, } class BaseEditDataSourceView(BaseUserConfigReportsView): template_name = 'userreports/edit_data_source.html' @property def page_context(self): return { 'form': self.edit_form, 'data_source': self.config, 'read_only': self.read_only, 'used_by_reports': self.get_reports(), } @property def page_url(self): if self.config_id: return reverse(self.urlname, args=(self.domain, self.config_id,)) return super(BaseEditDataSourceView, self).page_url @property def config_id(self): return self.kwargs.get('config_id') @property def read_only(self): return id_is_static(self.config_id) if self.config_id is not None else False @property @memoized def config(self): if self.config_id is None: return DataSourceConfiguration(domain=self.domain) return get_datasource_config_or_404(self.config_id, self.domain)[0] @property @memoized def edit_form(self): if self.request.method == 'POST': return ConfigurableDataSourceEditForm( self.domain, self.config, self.read_only, data=self.request.POST ) return ConfigurableDataSourceEditForm( self.domain, self.config, self.read_only ) def post(self, request, *args, **kwargs): if self.edit_form.is_valid(): config = self.edit_form.save(commit=True) messages.success(request, _('Data source "{}" saved!').format( config.display_name )) if self.config_id is None: return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[self.domain, config._id]) ) return self.get(request, *args, **kwargs) def get_reports(self): reports = StaticReportConfiguration.by_domain(self.domain) reports += ReportConfiguration.by_domain(self.domain) ret = [] for report in reports: try: if report.table_id == self.config.table_id: ret.append(report) except DataSourceConfigurationNotFoundError: _soft_assert = soft_assert(to=[ '{}@{}'.format(name, 'dimagi.com') for name in ['cellowitz', 'frener'] ]) _soft_assert(False, "Report {} on domain {} attempted to reference deleted table".format( report._id, self.domain )) return ret def get(self, request, *args, **kwargs): if self.config.is_deactivated: messages.info( request, _( 'Data source "{}" has no associated table.\n' 'Click "Rebuild Data Source" to recreate the table.' ).format(self.config.display_name) ) return super(BaseEditDataSourceView, self).get(request, *args, **kwargs) class CreateDataSourceView(BaseEditDataSourceView): urlname = 'create_configurable_data_source' page_title = ugettext_lazy("Create Data Source") class EditDataSourceView(BaseEditDataSourceView): urlname = 'edit_configurable_data_source' page_title = ugettext_lazy("Edit Data Source") @property def page_name(self): return "Edit {}".format(self.config.display_name) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def delete_data_source(request, domain, config_id): delete_data_source_shared(domain, config_id, request) return HttpResponseRedirect(reverse('configurable_reports_home', args=[domain])) def delete_data_source_shared(domain, config_id, request=None): config = get_document_or_404(DataSourceConfiguration, domain, config_id) adapter = get_indicator_adapter(config) username = request.user.username if request else None skip = not request # skip logging when we remove temporary tables adapter.drop_table(initiated_by=username, source='delete_data_source', skip_log=skip) soft_delete(config) if request: messages.success( request, _('Data source "{name}" has been deleted. <a href="{url}" class="post-link">Undo</a>').format( name=config.display_name, url=reverse('undo_delete_data_source', args=[domain, config._id]), ), extra_tags='html' ) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def undelete_data_source(request, domain, config_id): config = get_document_or_404(DataSourceConfiguration, domain, config_id, additional_doc_types=[ get_deleted_doc_type(DataSourceConfiguration) ]) if config and is_deleted(config): undo_delete(config) messages.success( request, _('Successfully restored data source "{name}"').format(name=config.display_name) ) else: messages.info(request, _('Data source "{name}" not deleted.').format(name=config.display_name)) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def rebuild_data_source(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if config.is_deactivated: config.is_deactivated = False config.save() messages.success( request, _('Table "{}" is now being rebuilt. Data should start showing up soon').format( config.display_name ) ) rebuild_indicators.delay(config_id, request.user.username) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def resume_building_data_source(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if not is_static and config.meta.build.finished: messages.warning( request, _('Table "{}" has already finished building. Rebuild table to start over.').format( config.display_name ) ) elif not DataSourceResumeHelper(config).has_resume_info(): messages.warning( request, _('Table "{}" did not finish building but resume information is not available. ' 'Unfortunately, this means you need to rebuild the table.').format( config.display_name ) ) else: messages.success( request, _('Resuming rebuilding table "{}".').format(config.display_name) ) resume_building_indicators.delay(config_id, request.user.username) return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() @require_POST def build_data_source_in_place(request, domain, config_id): config, is_static = get_datasource_config_or_404(config_id, domain) if config.is_deactivated: config.is_deactivated = False config.save() messages.success( request, _('Table "{}" is now being rebuilt. Data should start showing up soon').format( config.display_name ) ) rebuild_indicators_in_place.delay(config_id, request.user.username, source='edit_data_source_build_in_place') return HttpResponseRedirect(reverse( EditDataSourceView.urlname, args=[domain, config._id] )) @login_and_domain_required @toggles.USER_CONFIGURABLE_REPORTS.required_decorator() def data_source_json(request, domain, config_id): config, _ = get_datasource_config_or_404(config_id, domain) config._doc.pop('_rev', None) return json_response(config) class PreviewDataSourceView(BaseUserConfigReportsView): urlname = 'preview_configurable_data_source' template_name = "userreports/preview_data.html" page_title = ugettext_lazy("Preview Data Source") @method_decorator(swallow_programming_errors) def dispatch(self, request, *args, **kwargs): return super(PreviewDataSourceView, self).dispatch(request, *args, **kwargs) @property def config_id(self): return self.kwargs['config_id'] @property def page_url(self): return reverse(self.urlname, args=(self.domain, self.config_id,)) @property def page_context(self): config, is_static = get_datasource_config_or_404(self.config_id, self.domain) adapter = get_indicator_adapter(config) q = adapter.get_query_object() return { 'data_source': config, 'columns': q.column_descriptions, 'data': [list(row) for row in q[:20]], 'total_rows': q.count(), } ExportParameters = namedtuple('ExportParameters', ['format', 'keyword_filters', 'sql_filters']) def _last_n_days(column, value): if not isinstance(column.type, (types.Date, types.DateTime)): raise UserQueryError(_("You can only use 'lastndays' on date columns")) end = datetime.date.today() start = end - datetime.timedelta(days=int(value)) return column.between(start, end) def _range_filter(column, value): try: start, end = value.split('..') except ValueError: raise UserQueryError(_('Ranges must have the format "start..end"')) return column.between(start, end) sql_directives = [ # (suffix matching url parameter, callable returning a filter), ('-lastndays', _last_n_days), ('-range', _range_filter), ] def process_url_params(params, columns): """ Converts a dictionary of parameters from the user to sql filters. If a parameter is of the form <field name>-<suffix>, where suffix is defined in `sql_directives`, the corresponding function is used to produce a filter. """ # support passing `format` instead of `$format` so we don't break people's # existing URLs. Let's remove this once we can. format_ = params.get('$format', params.get('format', Format.UNZIPPED_CSV)) keyword_filters = {} sql_filters = [] for key, value in params.items(): if key in ('$format', 'format'): continue for suffix, fn in sql_directives: if key.endswith(suffix): field = key[:-len(suffix)] if field not in columns: raise UserQueryError(_('No field named {}').format(field)) sql_filters.append(fn(columns[field], value)) break else: if key in columns: keyword_filters[key] = value else: raise UserQueryError(_('Invalid filter parameter: {}') .format(key)) return ExportParameters(format_, keyword_filters, sql_filters) @api_auth @require_permission(Permissions.view_reports) @swallow_programming_errors def export_data_source(request, domain, config_id): config, _ = get_datasource_config_or_404(config_id, domain) adapter = get_indicator_adapter(config, load_source='export_data_source') url = reverse('export_configurable_data_source', args=[domain, config._id]) return export_sql_adapter_view(request, domain, adapter, url) def export_sql_adapter_view(request, domain, adapter, too_large_redirect_url): q = adapter.get_query_object() table = adapter.get_table() try: params = process_url_params(request.GET, table.columns) allowed_formats = [ Format.CSV, Format.HTML, Format.XLS, Format.XLS_2007, ] if params.format not in allowed_formats: msg = ugettext_lazy('format must be one of the following: {}').format(', '.join(allowed_formats)) return HttpResponse(msg, status=400) except UserQueryError as e: return HttpResponse(str(e), status=400) q = q.filter_by(**params.keyword_filters) for sql_filter in params.sql_filters: q = q.filter(sql_filter) # xls format has limit of 65536 rows # First row is taken up by headers if params.format == Format.XLS and q.count() >= 65535: keyword_params = dict(**request.GET) # use default format if 'format' in keyword_params: del keyword_params['format'] return HttpResponseRedirect( '%s?%s' % ( too_large_redirect_url, urlencode(keyword_params) ) ) # build export def get_table(q): yield list(table.columns.keys()) for row in q: adapter.track_load() yield row fd, path = tempfile.mkstemp() with os.fdopen(fd, 'wb') as tmpfile: try: tables = [[adapter.table_id, get_table(q)]] export_from_tables(tables, tmpfile, params.format) except exc.DataError: msg = ugettext_lazy( "There was a problem executing your query, " "please make sure your parameters are valid." ) return HttpResponse(msg, status=400) return export_response(Temp(path), params.format, adapter.display_name) def _get_report_filter(domain, report_id, filter_id): report = get_report_config_or_404(report_id, domain)[0] report_filter = report.get_ui_filter(filter_id) if report_filter is None: raise Http404(_('Filter {} not found!').format(filter_id)) return report_filter def _is_location_safe_choice_list(view_fn, request, domain, report_id, filter_id, **view_kwargs): return report_has_location_filter(config_id=report_id, domain=domain) @login_and_domain_required @conditionally_location_safe(_is_location_safe_choice_list) def choice_list_api(request, domain, report_id, filter_id): report_filter = _get_report_filter(domain, report_id, filter_id) if hasattr(report_filter, 'choice_provider'): query_context = ChoiceQueryContext( query=request.GET.get('q', None), limit=int(request.GET.get('limit', 20)), page=int(request.GET.get('page', 1)) - 1, user=request.couch_user ) return json_response([ choice._asdict() for choice in report_filter.choice_provider.query(query_context) ]) else: # mobile UCR hits this API for invalid filters. Just return no choices. return json_response([]) def _shared_context(domain): static_reports = list(StaticReportConfiguration.by_domain(domain)) static_data_sources = list(StaticDataSourceConfiguration.by_domain(domain)) return { 'domain': domain, 'reports': ReportConfiguration.by_domain(domain) + static_reports, 'data_sources': DataSourceConfiguration.by_domain(domain) + static_data_sources, } class DataSourceSummaryView(BaseUserConfigReportsView): urlname = 'summary_configurable_data_source' template_name = "userreports/summary_data_source.html" page_title = ugettext_lazy("Data Source Summary") @property def config_id(self): return self.kwargs['config_id'] @property @memoized def config(self): return get_datasource_config_or_404(self.config_id, self.domain)[0] @property def page_url(self): return reverse(self.urlname, args=(self.domain, self.config_id,)) @property def page_name(self): return "Summary - {}".format(self.config.display_name) @property def page_context(self): return { 'datasource_display_name': self.config.display_name, 'filter_summary': self.configured_filter_summary(), 'indicator_summary': self._add_links_to_output(self.indicator_summary()), 'named_expression_summary': self._add_links_to_output(self.named_expression_summary()), 'named_filter_summary': self._add_links_to_output(self.named_filter_summary()), 'named_filter_prefix': NAMED_FILTER_PREFIX, 'named_expression_prefix': NAMED_EXPRESSION_PREFIX, } def indicator_summary(self): context = self.config.get_factory_context() wrapped_specs = [ IndicatorFactory.from_spec(spec, context).wrapped_spec for spec in self.config.configured_indicators ] return [ { "column_id": wrapped.column_id, "comment": wrapped.comment, "readable_output": wrapped.readable_output(context) } for wrapped in wrapped_specs if wrapped ] def named_expression_summary(self): return [ { "name": name, "comment": self.config.named_expressions[name].get('comment'), "readable_output": str(exp) } for name, exp in self.config.named_expression_objects.items() ] def named_filter_summary(self): return [ { "name": name, "comment": self.config.named_filters[name].get('comment'), "readable_output": str(filter) } for name, filter in self.config.named_filter_objects.items() ] def configured_filter_summary(self): return str(FilterFactory.from_spec(self.config.configured_filter, context=self.config.get_factory_context())) def _add_links_to_output(self, items): def make_link(match): value = match.group() return '<a href="#{value}">{value}</a>'.format(value=value) def add_links(content): content = re.sub(r"{}:[A-Za-z0-9_-]+".format(NAMED_FILTER_PREFIX), make_link, content) content = re.sub(r"{}:[A-Za-z0-9_-]+".format(NAMED_EXPRESSION_PREFIX), make_link, content) return content list = [] for i in items: i['readable_output'] = add_links(i.get('readable_output')) list.append(i) return list @domain_admin_required def copy_report(request, domain): from_domain = domain to_domains = request.POST.getlist("to_domains") report_id = request.POST.get("report_id") successes = [] failures = [] for to_domain in to_domains: domain_link = DomainLink.objects.get(master_domain=from_domain, linked_domain=to_domain) try: link_info = create_linked_ucr(domain_link, report_id) domain_link.update_last_pull( 'report', request.couch_user._id, model_detail=ReportLinkDetail(report_id=link_info.report.get_id).to_json(), ) successes.append(to_domain) except Exception as err: failures.append(to_domain) notify_exception(request, message=str(err)) if successes: messages.success( request, _(f"Successfully linked and copied {link_info.report.title} to {', '.join(successes)}. ")) if failures: messages.error(request, _(f"Due to errors, the report was not copied to {', '.join(failures)}")) return HttpResponseRedirect( reverse(ConfigurableReportView.slug, args=[from_domain, report_id]) )